0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/module.h>
0011 #include <linux/kernel.h>
0012 #include <linux/types.h>
0013 #include <linux/errno.h>
0014 #include <linux/fcntl.h>
0015 #include <linux/interrupt.h>
0016 #include <linux/ioport.h>
0017 #include <linux/in.h>
0018 #include <linux/slab.h>
0019 #include <linux/string.h>
0020 #include <linux/delay.h>
0021 #include <linux/init.h>
0022 #include <linux/crc32.h>
0023 #include <linux/netdevice.h>
0024 #include <linux/etherdevice.h>
0025 #include <linux/skbuff.h>
0026 #include <linux/ethtool.h>
0027 #include <linux/bitops.h>
0028 #include <linux/dma-mapping.h>
0029 #include <linux/of.h>
0030 #include <linux/of_device.h>
0031 #include <linux/pgtable.h>
0032
0033 #include <asm/io.h>
0034 #include <asm/dma.h>
0035 #include <asm/byteorder.h>
0036 #include <asm/idprom.h>
0037 #include <asm/openprom.h>
0038 #include <asm/oplib.h>
0039 #include <asm/auxio.h>
0040 #include <asm/irq.h>
0041
0042 #include "sunqe.h"
0043
0044 #define DRV_NAME "sunqe"
0045 #define DRV_VERSION "4.1"
0046 #define DRV_RELDATE "August 27, 2008"
0047 #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
0048
0049 static char version[] =
0050 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
0051
0052 MODULE_VERSION(DRV_VERSION);
0053 MODULE_AUTHOR(DRV_AUTHOR);
0054 MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
0055 MODULE_LICENSE("GPL");
0056
0057 static struct sunqec *root_qec_dev;
0058
0059 static void qe_set_multicast(struct net_device *dev);
0060
0061 #define QEC_RESET_TRIES 200
0062
0063 static inline int qec_global_reset(void __iomem *gregs)
0064 {
0065 int tries = QEC_RESET_TRIES;
0066
0067 sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
0068 while (--tries) {
0069 u32 tmp = sbus_readl(gregs + GLOB_CTRL);
0070 if (tmp & GLOB_CTRL_RESET) {
0071 udelay(20);
0072 continue;
0073 }
0074 break;
0075 }
0076 if (tries)
0077 return 0;
0078 printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
0079 return -1;
0080 }
0081
0082 #define MACE_RESET_RETRIES 200
0083 #define QE_RESET_RETRIES 200
0084
0085 static inline int qe_stop(struct sunqe *qep)
0086 {
0087 void __iomem *cregs = qep->qcregs;
0088 void __iomem *mregs = qep->mregs;
0089 int tries;
0090
0091
0092 sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
0093 tries = MACE_RESET_RETRIES;
0094 while (--tries) {
0095 u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
0096 if (tmp & MREGS_BCONFIG_RESET) {
0097 udelay(20);
0098 continue;
0099 }
0100 break;
0101 }
0102 if (!tries) {
0103 printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
0104 return -1;
0105 }
0106
0107 sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
0108 tries = QE_RESET_RETRIES;
0109 while (--tries) {
0110 u32 tmp = sbus_readl(cregs + CREG_CTRL);
0111 if (tmp & CREG_CTRL_RESET) {
0112 udelay(20);
0113 continue;
0114 }
0115 break;
0116 }
0117 if (!tries) {
0118 printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
0119 return -1;
0120 }
0121 return 0;
0122 }
0123
0124 static void qe_init_rings(struct sunqe *qep)
0125 {
0126 struct qe_init_block *qb = qep->qe_block;
0127 struct sunqe_buffers *qbufs = qep->buffers;
0128 __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
0129 int i;
0130
0131 qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
0132 memset(qb, 0, sizeof(struct qe_init_block));
0133 memset(qbufs, 0, sizeof(struct sunqe_buffers));
0134 for (i = 0; i < RX_RING_SIZE; i++) {
0135 qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
0136 qb->qe_rxd[i].rx_flags =
0137 (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
0138 }
0139 }
0140
0141 static int qe_init(struct sunqe *qep, int from_irq)
0142 {
0143 struct sunqec *qecp = qep->parent;
0144 void __iomem *cregs = qep->qcregs;
0145 void __iomem *mregs = qep->mregs;
0146 void __iomem *gregs = qecp->gregs;
0147 const unsigned char *e = &qep->dev->dev_addr[0];
0148 __u32 qblk_dvma = (__u32)qep->qblock_dvma;
0149 u32 tmp;
0150 int i;
0151
0152
0153 if (qe_stop(qep))
0154 return -EAGAIN;
0155
0156
0157 sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
0158 sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
0159
0160
0161 sbus_writel(0, cregs + CREG_RIMASK);
0162 sbus_writel(1, cregs + CREG_TIMASK);
0163
0164 sbus_writel(0, cregs + CREG_QMASK);
0165 sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
0166
0167
0168 tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
0169 sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
0170 sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
0171
0172 tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
0173 sbus_readl(gregs + GLOB_RSIZE);
0174 sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
0175 sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
0176
0177
0178 sbus_writel(0, cregs + CREG_CCNT);
0179
0180
0181 sbus_writel(0, cregs + CREG_PIPG);
0182
0183
0184 sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
0185 sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
0186 sbus_writeb(0, mregs + MREGS_RXFCNTL);
0187
0188
0189
0190
0191
0192 sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
0193 sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
0194 sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
0195 MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
0196 mregs + MREGS_FCONFIG);
0197
0198
0199 sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
0200
0201
0202 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
0203 mregs + MREGS_IACONFIG);
0204 while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
0205 barrier();
0206 sbus_writeb(e[0], mregs + MREGS_ETHADDR);
0207 sbus_writeb(e[1], mregs + MREGS_ETHADDR);
0208 sbus_writeb(e[2], mregs + MREGS_ETHADDR);
0209 sbus_writeb(e[3], mregs + MREGS_ETHADDR);
0210 sbus_writeb(e[4], mregs + MREGS_ETHADDR);
0211 sbus_writeb(e[5], mregs + MREGS_ETHADDR);
0212
0213
0214 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
0215 mregs + MREGS_IACONFIG);
0216 while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
0217 barrier();
0218 for (i = 0; i < 8; i++)
0219 sbus_writeb(0, mregs + MREGS_FILTER);
0220
0221
0222 sbus_writeb(0, mregs + MREGS_IACONFIG);
0223
0224 qe_init_rings(qep);
0225
0226
0227 mdelay(5);
0228 if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
0229 int tries = 50;
0230
0231 while (--tries) {
0232 u8 tmp;
0233
0234 mdelay(5);
0235 barrier();
0236 tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
0237 if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
0238 break;
0239 }
0240 if (tries == 0)
0241 printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
0242 }
0243
0244
0245 sbus_readb(mregs + MREGS_MPCNT);
0246
0247
0248
0249
0250 qe_set_multicast(qep->dev);
0251
0252
0253 return 0;
0254 }
0255
0256
0257
0258
0259 static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
0260 {
0261 struct net_device *dev = qep->dev;
0262 int mace_hwbug_workaround = 0;
0263
0264 if (qe_status & CREG_STAT_EDEFER) {
0265 printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
0266 dev->stats.tx_errors++;
0267 }
0268
0269 if (qe_status & CREG_STAT_CLOSS) {
0270 printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
0271 dev->stats.tx_errors++;
0272 dev->stats.tx_carrier_errors++;
0273 }
0274
0275 if (qe_status & CREG_STAT_ERETRIES) {
0276 printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
0277 dev->stats.tx_errors++;
0278 mace_hwbug_workaround = 1;
0279 }
0280
0281 if (qe_status & CREG_STAT_LCOLL) {
0282 printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
0283 dev->stats.tx_errors++;
0284 dev->stats.collisions++;
0285 mace_hwbug_workaround = 1;
0286 }
0287
0288 if (qe_status & CREG_STAT_FUFLOW) {
0289 printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
0290 dev->stats.tx_errors++;
0291 mace_hwbug_workaround = 1;
0292 }
0293
0294 if (qe_status & CREG_STAT_JERROR) {
0295 printk(KERN_ERR "%s: Jabber error.\n", dev->name);
0296 }
0297
0298 if (qe_status & CREG_STAT_BERROR) {
0299 printk(KERN_ERR "%s: Babble error.\n", dev->name);
0300 }
0301
0302 if (qe_status & CREG_STAT_CCOFLOW) {
0303 dev->stats.tx_errors += 256;
0304 dev->stats.collisions += 256;
0305 }
0306
0307 if (qe_status & CREG_STAT_TXDERROR) {
0308 printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
0309 dev->stats.tx_errors++;
0310 dev->stats.tx_aborted_errors++;
0311 mace_hwbug_workaround = 1;
0312 }
0313
0314 if (qe_status & CREG_STAT_TXLERR) {
0315 printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
0316 dev->stats.tx_errors++;
0317 mace_hwbug_workaround = 1;
0318 }
0319
0320 if (qe_status & CREG_STAT_TXPERR) {
0321 printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
0322 dev->stats.tx_errors++;
0323 dev->stats.tx_aborted_errors++;
0324 mace_hwbug_workaround = 1;
0325 }
0326
0327 if (qe_status & CREG_STAT_TXSERR) {
0328 printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
0329 dev->stats.tx_errors++;
0330 dev->stats.tx_aborted_errors++;
0331 mace_hwbug_workaround = 1;
0332 }
0333
0334 if (qe_status & CREG_STAT_RCCOFLOW) {
0335 dev->stats.rx_errors += 256;
0336 dev->stats.collisions += 256;
0337 }
0338
0339 if (qe_status & CREG_STAT_RUOFLOW) {
0340 dev->stats.rx_errors += 256;
0341 dev->stats.rx_over_errors += 256;
0342 }
0343
0344 if (qe_status & CREG_STAT_MCOFLOW) {
0345 dev->stats.rx_errors += 256;
0346 dev->stats.rx_missed_errors += 256;
0347 }
0348
0349 if (qe_status & CREG_STAT_RXFOFLOW) {
0350 printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
0351 dev->stats.rx_errors++;
0352 dev->stats.rx_over_errors++;
0353 }
0354
0355 if (qe_status & CREG_STAT_RLCOLL) {
0356 printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
0357 dev->stats.rx_errors++;
0358 dev->stats.collisions++;
0359 }
0360
0361 if (qe_status & CREG_STAT_FCOFLOW) {
0362 dev->stats.rx_errors += 256;
0363 dev->stats.rx_frame_errors += 256;
0364 }
0365
0366 if (qe_status & CREG_STAT_CECOFLOW) {
0367 dev->stats.rx_errors += 256;
0368 dev->stats.rx_crc_errors += 256;
0369 }
0370
0371 if (qe_status & CREG_STAT_RXDROP) {
0372 printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
0373 dev->stats.rx_errors++;
0374 dev->stats.rx_dropped++;
0375 dev->stats.rx_missed_errors++;
0376 }
0377
0378 if (qe_status & CREG_STAT_RXSMALL) {
0379 printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
0380 dev->stats.rx_errors++;
0381 dev->stats.rx_length_errors++;
0382 }
0383
0384 if (qe_status & CREG_STAT_RXLERR) {
0385 printk(KERN_ERR "%s: Receive late error.\n", dev->name);
0386 dev->stats.rx_errors++;
0387 mace_hwbug_workaround = 1;
0388 }
0389
0390 if (qe_status & CREG_STAT_RXPERR) {
0391 printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
0392 dev->stats.rx_errors++;
0393 dev->stats.rx_missed_errors++;
0394 mace_hwbug_workaround = 1;
0395 }
0396
0397 if (qe_status & CREG_STAT_RXSERR) {
0398 printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
0399 dev->stats.rx_errors++;
0400 dev->stats.rx_missed_errors++;
0401 mace_hwbug_workaround = 1;
0402 }
0403
0404 if (mace_hwbug_workaround)
0405 qe_init(qep, 1);
0406 return mace_hwbug_workaround;
0407 }
0408
0409
0410
0411
0412 static void qe_rx(struct sunqe *qep)
0413 {
0414 struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
0415 struct net_device *dev = qep->dev;
0416 struct qe_rxd *this;
0417 struct sunqe_buffers *qbufs = qep->buffers;
0418 __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
0419 int elem = qep->rx_new;
0420 u32 flags;
0421
0422 this = &rxbase[elem];
0423 while (!((flags = this->rx_flags) & RXD_OWN)) {
0424 struct sk_buff *skb;
0425 unsigned char *this_qbuf =
0426 &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
0427 __u32 this_qbuf_dvma = qbufs_dvma +
0428 qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
0429 struct qe_rxd *end_rxd =
0430 &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
0431 int len = (flags & RXD_LENGTH) - 4;
0432
0433
0434 if (len < ETH_ZLEN) {
0435 dev->stats.rx_errors++;
0436 dev->stats.rx_length_errors++;
0437 dev->stats.rx_dropped++;
0438 } else {
0439 skb = netdev_alloc_skb(dev, len + 2);
0440 if (skb == NULL) {
0441 dev->stats.rx_dropped++;
0442 } else {
0443 skb_reserve(skb, 2);
0444 skb_put(skb, len);
0445 skb_copy_to_linear_data(skb, this_qbuf,
0446 len);
0447 skb->protocol = eth_type_trans(skb, qep->dev);
0448 netif_rx(skb);
0449 dev->stats.rx_packets++;
0450 dev->stats.rx_bytes += len;
0451 }
0452 }
0453 end_rxd->rx_addr = this_qbuf_dvma;
0454 end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
0455
0456 elem = NEXT_RX(elem);
0457 this = &rxbase[elem];
0458 }
0459 qep->rx_new = elem;
0460 }
0461
0462 static void qe_tx_reclaim(struct sunqe *qep);
0463
0464
0465
0466
0467
0468 static irqreturn_t qec_interrupt(int irq, void *dev_id)
0469 {
0470 struct sunqec *qecp = dev_id;
0471 u32 qec_status;
0472 int channel = 0;
0473
0474
0475 qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
0476 while (channel < 4) {
0477 if (qec_status & 0xf) {
0478 struct sunqe *qep = qecp->qes[channel];
0479 u32 qe_status;
0480
0481 qe_status = sbus_readl(qep->qcregs + CREG_STAT);
0482 if (qe_status & CREG_STAT_ERRORS) {
0483 if (qe_is_bolixed(qep, qe_status))
0484 goto next;
0485 }
0486 if (qe_status & CREG_STAT_RXIRQ)
0487 qe_rx(qep);
0488 if (netif_queue_stopped(qep->dev) &&
0489 (qe_status & CREG_STAT_TXIRQ)) {
0490 spin_lock(&qep->lock);
0491 qe_tx_reclaim(qep);
0492 if (TX_BUFFS_AVAIL(qep) > 0) {
0493
0494
0495
0496 netif_wake_queue(qep->dev);
0497 sbus_writel(1, qep->qcregs + CREG_TIMASK);
0498 }
0499 spin_unlock(&qep->lock);
0500 }
0501 next:
0502 ;
0503 }
0504 qec_status >>= 4;
0505 channel++;
0506 }
0507
0508 return IRQ_HANDLED;
0509 }
0510
0511 static int qe_open(struct net_device *dev)
0512 {
0513 struct sunqe *qep = netdev_priv(dev);
0514
0515 qep->mconfig = (MREGS_MCONFIG_TXENAB |
0516 MREGS_MCONFIG_RXENAB |
0517 MREGS_MCONFIG_MBAENAB);
0518 return qe_init(qep, 0);
0519 }
0520
0521 static int qe_close(struct net_device *dev)
0522 {
0523 struct sunqe *qep = netdev_priv(dev);
0524
0525 qe_stop(qep);
0526 return 0;
0527 }
0528
0529
0530
0531
0532 static void qe_tx_reclaim(struct sunqe *qep)
0533 {
0534 struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
0535 int elem = qep->tx_old;
0536
0537 while (elem != qep->tx_new) {
0538 u32 flags = txbase[elem].tx_flags;
0539
0540 if (flags & TXD_OWN)
0541 break;
0542 elem = NEXT_TX(elem);
0543 }
0544 qep->tx_old = elem;
0545 }
0546
0547 static void qe_tx_timeout(struct net_device *dev, unsigned int txqueue)
0548 {
0549 struct sunqe *qep = netdev_priv(dev);
0550 int tx_full;
0551
0552 spin_lock_irq(&qep->lock);
0553
0554
0555
0556
0557 qe_tx_reclaim(qep);
0558 tx_full = TX_BUFFS_AVAIL(qep) <= 0;
0559
0560 spin_unlock_irq(&qep->lock);
0561
0562 if (! tx_full)
0563 goto out;
0564
0565 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
0566 qe_init(qep, 1);
0567
0568 out:
0569 netif_wake_queue(dev);
0570 }
0571
0572
0573 static netdev_tx_t qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
0574 {
0575 struct sunqe *qep = netdev_priv(dev);
0576 struct sunqe_buffers *qbufs = qep->buffers;
0577 __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma;
0578 unsigned char *txbuf;
0579 int len, entry;
0580
0581 spin_lock_irq(&qep->lock);
0582
0583 qe_tx_reclaim(qep);
0584
0585 len = skb->len;
0586 entry = qep->tx_new;
0587
0588 txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
0589 txbuf_dvma = qbufs_dvma +
0590 qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
0591
0592
0593 qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
0594
0595 skb_copy_from_linear_data(skb, txbuf, len);
0596
0597 qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
0598 qep->qe_block->qe_txd[entry].tx_flags =
0599 (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
0600 qep->tx_new = NEXT_TX(entry);
0601
0602
0603 sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
0604
0605 dev->stats.tx_packets++;
0606 dev->stats.tx_bytes += len;
0607
0608 if (TX_BUFFS_AVAIL(qep) <= 0) {
0609
0610
0611
0612
0613
0614 netif_stop_queue(dev);
0615 sbus_writel(0, qep->qcregs + CREG_TIMASK);
0616 }
0617 spin_unlock_irq(&qep->lock);
0618
0619 dev_kfree_skb(skb);
0620
0621 return NETDEV_TX_OK;
0622 }
0623
0624 static void qe_set_multicast(struct net_device *dev)
0625 {
0626 struct sunqe *qep = netdev_priv(dev);
0627 struct netdev_hw_addr *ha;
0628 u8 new_mconfig = qep->mconfig;
0629 int i;
0630 u32 crc;
0631
0632
0633 netif_stop_queue(dev);
0634
0635 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
0636 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
0637 qep->mregs + MREGS_IACONFIG);
0638 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
0639 barrier();
0640 for (i = 0; i < 8; i++)
0641 sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
0642 sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
0643 } else if (dev->flags & IFF_PROMISC) {
0644 new_mconfig |= MREGS_MCONFIG_PROMISC;
0645 } else {
0646 u16 hash_table[4];
0647 u8 *hbytes = (unsigned char *) &hash_table[0];
0648
0649 memset(hash_table, 0, sizeof(hash_table));
0650 netdev_for_each_mc_addr(ha, dev) {
0651 crc = ether_crc_le(6, ha->addr);
0652 crc >>= 26;
0653 hash_table[crc >> 4] |= 1 << (crc & 0xf);
0654 }
0655
0656 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
0657 qep->mregs + MREGS_IACONFIG);
0658 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
0659 barrier();
0660 for (i = 0; i < 8; i++) {
0661 u8 tmp = *hbytes++;
0662 sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
0663 }
0664 sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
0665 }
0666
0667
0668
0669
0670
0671
0672
0673 qep->mconfig = new_mconfig;
0674 sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
0675
0676
0677 netif_wake_queue(dev);
0678 }
0679
0680
0681 static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
0682 {
0683 const struct linux_prom_registers *regs;
0684 struct sunqe *qep = netdev_priv(dev);
0685 struct platform_device *op;
0686
0687 strlcpy(info->driver, "sunqe", sizeof(info->driver));
0688 strlcpy(info->version, "3.0", sizeof(info->version));
0689
0690 op = qep->op;
0691 regs = of_get_property(op->dev.of_node, "reg", NULL);
0692 if (regs)
0693 snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d",
0694 regs->which_io);
0695
0696 }
0697
0698 static u32 qe_get_link(struct net_device *dev)
0699 {
0700 struct sunqe *qep = netdev_priv(dev);
0701 void __iomem *mregs = qep->mregs;
0702 u8 phyconfig;
0703
0704 spin_lock_irq(&qep->lock);
0705 phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
0706 spin_unlock_irq(&qep->lock);
0707
0708 return phyconfig & MREGS_PHYCONFIG_LSTAT;
0709 }
0710
0711 static const struct ethtool_ops qe_ethtool_ops = {
0712 .get_drvinfo = qe_get_drvinfo,
0713 .get_link = qe_get_link,
0714 };
0715
0716
0717 static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
0718 {
0719 u8 bsizes = qecp->qec_bursts;
0720
0721 if (sbus_can_burst64() && (bsizes & DMA_BURST64)) {
0722 sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
0723 } else if (bsizes & DMA_BURST32) {
0724 sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
0725 } else {
0726 sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
0727 }
0728
0729
0730
0731
0732 sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
0733
0734
0735 sbus_writel((resource_size(&op->resource[1]) >> 2),
0736 qecp->gregs + GLOB_MSIZE);
0737
0738
0739
0740
0741 sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
0742 qecp->gregs + GLOB_TSIZE);
0743 sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
0744 qecp->gregs + GLOB_RSIZE);
0745 }
0746
0747 static u8 qec_get_burst(struct device_node *dp)
0748 {
0749 u8 bsizes, bsizes_more;
0750
0751
0752
0753
0754 bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
0755 bsizes &= 0xff;
0756 bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
0757
0758 if (bsizes_more != 0xff)
0759 bsizes &= bsizes_more;
0760 if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
0761 (bsizes & DMA_BURST32)==0)
0762 bsizes = (DMA_BURST32 - 1);
0763
0764 return bsizes;
0765 }
0766
0767 static struct sunqec *get_qec(struct platform_device *child)
0768 {
0769 struct platform_device *op = to_platform_device(child->dev.parent);
0770 struct sunqec *qecp;
0771
0772 qecp = platform_get_drvdata(op);
0773 if (!qecp) {
0774 qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
0775 if (qecp) {
0776 u32 ctrl;
0777
0778 qecp->op = op;
0779 qecp->gregs = of_ioremap(&op->resource[0], 0,
0780 GLOB_REG_SIZE,
0781 "QEC Global Registers");
0782 if (!qecp->gregs)
0783 goto fail;
0784
0785
0786 ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
0787 ctrl &= 0xf0000000;
0788 if (ctrl != GLOB_CTRL_MMODE) {
0789 printk(KERN_ERR "qec: Not in MACE mode!\n");
0790 goto fail;
0791 }
0792
0793 if (qec_global_reset(qecp->gregs))
0794 goto fail;
0795
0796 qecp->qec_bursts = qec_get_burst(op->dev.of_node);
0797
0798 qec_init_once(qecp, op);
0799
0800 if (request_irq(op->archdata.irqs[0], qec_interrupt,
0801 IRQF_SHARED, "qec", (void *) qecp)) {
0802 printk(KERN_ERR "qec: Can't register irq.\n");
0803 goto fail;
0804 }
0805
0806 platform_set_drvdata(op, qecp);
0807
0808 qecp->next_module = root_qec_dev;
0809 root_qec_dev = qecp;
0810 }
0811 }
0812
0813 return qecp;
0814
0815 fail:
0816 if (qecp->gregs)
0817 of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE);
0818 kfree(qecp);
0819 return NULL;
0820 }
0821
0822 static const struct net_device_ops qec_ops = {
0823 .ndo_open = qe_open,
0824 .ndo_stop = qe_close,
0825 .ndo_start_xmit = qe_start_xmit,
0826 .ndo_set_rx_mode = qe_set_multicast,
0827 .ndo_tx_timeout = qe_tx_timeout,
0828 .ndo_set_mac_address = eth_mac_addr,
0829 .ndo_validate_addr = eth_validate_addr,
0830 };
0831
0832 static int qec_ether_init(struct platform_device *op)
0833 {
0834 static unsigned version_printed;
0835 struct net_device *dev;
0836 struct sunqec *qecp;
0837 struct sunqe *qe;
0838 int i, res;
0839
0840 if (version_printed++ == 0)
0841 printk(KERN_INFO "%s", version);
0842
0843 dev = alloc_etherdev(sizeof(struct sunqe));
0844 if (!dev)
0845 return -ENOMEM;
0846
0847 eth_hw_addr_set(dev, idprom->id_ethaddr);
0848
0849 qe = netdev_priv(dev);
0850
0851 res = -ENODEV;
0852
0853 i = of_getintprop_default(op->dev.of_node, "channel#", -1);
0854 if (i == -1)
0855 goto fail;
0856 qe->channel = i;
0857 spin_lock_init(&qe->lock);
0858
0859 qecp = get_qec(op);
0860 if (!qecp)
0861 goto fail;
0862
0863 qecp->qes[qe->channel] = qe;
0864 qe->dev = dev;
0865 qe->parent = qecp;
0866 qe->op = op;
0867
0868 res = -ENOMEM;
0869 qe->qcregs = of_ioremap(&op->resource[0], 0,
0870 CREG_REG_SIZE, "QEC Channel Registers");
0871 if (!qe->qcregs) {
0872 printk(KERN_ERR "qe: Cannot map channel registers.\n");
0873 goto fail;
0874 }
0875
0876 qe->mregs = of_ioremap(&op->resource[1], 0,
0877 MREGS_REG_SIZE, "QE MACE Registers");
0878 if (!qe->mregs) {
0879 printk(KERN_ERR "qe: Cannot map MACE registers.\n");
0880 goto fail;
0881 }
0882
0883 qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE,
0884 &qe->qblock_dvma, GFP_ATOMIC);
0885 qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers),
0886 &qe->buffers_dvma, GFP_ATOMIC);
0887 if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
0888 qe->buffers == NULL || qe->buffers_dvma == 0)
0889 goto fail;
0890
0891
0892 qe_stop(qe);
0893
0894 SET_NETDEV_DEV(dev, &op->dev);
0895
0896 dev->watchdog_timeo = 5*HZ;
0897 dev->irq = op->archdata.irqs[0];
0898 dev->dma = 0;
0899 dev->ethtool_ops = &qe_ethtool_ops;
0900 dev->netdev_ops = &qec_ops;
0901
0902 res = register_netdev(dev);
0903 if (res)
0904 goto fail;
0905
0906 platform_set_drvdata(op, qe);
0907
0908 printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel,
0909 dev->dev_addr);
0910 return 0;
0911
0912 fail:
0913 if (qe->qcregs)
0914 of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE);
0915 if (qe->mregs)
0916 of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE);
0917 if (qe->qe_block)
0918 dma_free_coherent(&op->dev, PAGE_SIZE,
0919 qe->qe_block, qe->qblock_dvma);
0920 if (qe->buffers)
0921 dma_free_coherent(&op->dev,
0922 sizeof(struct sunqe_buffers),
0923 qe->buffers,
0924 qe->buffers_dvma);
0925
0926 free_netdev(dev);
0927
0928 return res;
0929 }
0930
0931 static int qec_sbus_probe(struct platform_device *op)
0932 {
0933 return qec_ether_init(op);
0934 }
0935
0936 static int qec_sbus_remove(struct platform_device *op)
0937 {
0938 struct sunqe *qp = platform_get_drvdata(op);
0939 struct net_device *net_dev = qp->dev;
0940
0941 unregister_netdev(net_dev);
0942
0943 of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
0944 of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
0945 dma_free_coherent(&op->dev, PAGE_SIZE,
0946 qp->qe_block, qp->qblock_dvma);
0947 dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers),
0948 qp->buffers, qp->buffers_dvma);
0949
0950 free_netdev(net_dev);
0951
0952 return 0;
0953 }
0954
0955 static const struct of_device_id qec_sbus_match[] = {
0956 {
0957 .name = "qe",
0958 },
0959 {},
0960 };
0961
0962 MODULE_DEVICE_TABLE(of, qec_sbus_match);
0963
0964 static struct platform_driver qec_sbus_driver = {
0965 .driver = {
0966 .name = "qec",
0967 .of_match_table = qec_sbus_match,
0968 },
0969 .probe = qec_sbus_probe,
0970 .remove = qec_sbus_remove,
0971 };
0972
0973 static int __init qec_init(void)
0974 {
0975 return platform_driver_register(&qec_sbus_driver);
0976 }
0977
0978 static void __exit qec_exit(void)
0979 {
0980 platform_driver_unregister(&qec_sbus_driver);
0981
0982 while (root_qec_dev) {
0983 struct sunqec *next = root_qec_dev->next_module;
0984 struct platform_device *op = root_qec_dev->op;
0985
0986 free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
0987 of_iounmap(&op->resource[0], root_qec_dev->gregs,
0988 GLOB_REG_SIZE);
0989 kfree(root_qec_dev);
0990
0991 root_qec_dev = next;
0992 }
0993 }
0994
0995 module_init(qec_init);
0996 module_exit(qec_exit);