0001
0002
0003
0004
0005
0006
0007
0008 #undef DEBUG
0009
0010 #include <linux/dma-mapping.h>
0011 #include <linux/kernel.h>
0012 #include <linux/module.h>
0013 #include <linux/slab.h>
0014 #include <linux/errno.h>
0015 #include <linux/types.h>
0016 #include <linux/interrupt.h>
0017 #include <linux/string.h>
0018 #include <linux/delay.h>
0019 #include <linux/netdevice.h>
0020 #include <linux/platform_device.h>
0021 #include <linux/etherdevice.h>
0022 #include <linux/skbuff.h>
0023
0024 #include <asm/sgi/hpc3.h>
0025 #include <asm/sgi/ip22.h>
0026 #include <asm/sgi/seeq.h>
0027
0028 #include "sgiseeq.h"
0029
0030 static char *sgiseeqstr = "SGI Seeq8003";
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043 #define SEEQ_RX_BUFFERS 16
0044 #define SEEQ_TX_BUFFERS 16
0045
0046 #define PKT_BUF_SZ 1584
0047
0048 #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1))
0049 #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1))
0050 #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1))
0051 #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1))
0052
0053 #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
0054 sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
0055 sp->tx_old - sp->tx_new - 1)
0056
0057 #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \
0058 (dma_addr_t)((unsigned long)(v) - \
0059 (unsigned long)((sp)->rx_desc)))
0060
0061
0062
0063
0064 static int rx_copybreak = 100;
0065
0066 #define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *))
0067
0068 struct sgiseeq_rx_desc {
0069 volatile struct hpc_dma_desc rdma;
0070 u8 padding[PAD_SIZE];
0071 struct sk_buff *skb;
0072 };
0073
0074 struct sgiseeq_tx_desc {
0075 volatile struct hpc_dma_desc tdma;
0076 u8 padding[PAD_SIZE];
0077 struct sk_buff *skb;
0078 };
0079
0080
0081
0082
0083
0084
0085 struct sgiseeq_init_block {
0086 struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS];
0087 struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS];
0088 };
0089
0090 struct sgiseeq_private {
0091 struct sgiseeq_init_block *srings;
0092 dma_addr_t srings_dma;
0093
0094
0095 struct sgiseeq_rx_desc *rx_desc;
0096 struct sgiseeq_tx_desc *tx_desc;
0097
0098 char *name;
0099 struct hpc3_ethregs *hregs;
0100 struct sgiseeq_regs *sregs;
0101
0102
0103 unsigned int rx_new, tx_new;
0104 unsigned int rx_old, tx_old;
0105
0106 int is_edlc;
0107 unsigned char control;
0108 unsigned char mode;
0109
0110 spinlock_t tx_lock;
0111 };
0112
0113 static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
0114 {
0115 struct sgiseeq_private *sp = netdev_priv(dev);
0116
0117 dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, addr),
0118 sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
0119 }
0120
0121 static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
0122 {
0123 struct sgiseeq_private *sp = netdev_priv(dev);
0124
0125 dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp, addr),
0126 sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
0127 }
0128
0129 static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
0130 {
0131 hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
0132 udelay(20);
0133 hregs->reset = 0;
0134 }
0135
0136 static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs,
0137 struct sgiseeq_regs *sregs)
0138 {
0139 hregs->rx_ctrl = hregs->tx_ctrl = 0;
0140 hpc3_eth_reset(hregs);
0141 }
0142
0143 #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \
0144 SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC)
0145
0146 static inline void seeq_go(struct sgiseeq_private *sp,
0147 struct hpc3_ethregs *hregs,
0148 struct sgiseeq_regs *sregs)
0149 {
0150 sregs->rstat = sp->mode | RSTAT_GO_BITS;
0151 hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE;
0152 }
0153
0154 static inline void __sgiseeq_set_mac_address(struct net_device *dev)
0155 {
0156 struct sgiseeq_private *sp = netdev_priv(dev);
0157 struct sgiseeq_regs *sregs = sp->sregs;
0158 int i;
0159
0160 sregs->tstat = SEEQ_TCMD_RB0;
0161 for (i = 0; i < 6; i++)
0162 sregs->rw.eth_addr[i] = dev->dev_addr[i];
0163 }
0164
0165 static int sgiseeq_set_mac_address(struct net_device *dev, void *addr)
0166 {
0167 struct sgiseeq_private *sp = netdev_priv(dev);
0168 struct sockaddr *sa = addr;
0169
0170 eth_hw_addr_set(dev, sa->sa_data);
0171
0172 spin_lock_irq(&sp->tx_lock);
0173 __sgiseeq_set_mac_address(dev);
0174 spin_unlock_irq(&sp->tx_lock);
0175
0176 return 0;
0177 }
0178
0179 #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD)
0180 #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE)
0181 #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT))
0182
0183 static int seeq_init_ring(struct net_device *dev)
0184 {
0185 struct sgiseeq_private *sp = netdev_priv(dev);
0186 int i;
0187
0188 netif_stop_queue(dev);
0189 sp->rx_new = sp->tx_new = 0;
0190 sp->rx_old = sp->tx_old = 0;
0191
0192 __sgiseeq_set_mac_address(dev);
0193
0194
0195 for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
0196 sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
0197 dma_sync_desc_dev(dev, &sp->tx_desc[i]);
0198 }
0199
0200
0201 for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
0202 if (!sp->rx_desc[i].skb) {
0203 dma_addr_t dma_addr;
0204 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
0205
0206 if (skb == NULL)
0207 return -ENOMEM;
0208 skb_reserve(skb, 2);
0209 dma_addr = dma_map_single(dev->dev.parent,
0210 skb->data - 2,
0211 PKT_BUF_SZ, DMA_FROM_DEVICE);
0212 sp->rx_desc[i].skb = skb;
0213 sp->rx_desc[i].rdma.pbuf = dma_addr;
0214 }
0215 sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
0216 dma_sync_desc_dev(dev, &sp->rx_desc[i]);
0217 }
0218 sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
0219 dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
0220 return 0;
0221 }
0222
0223 static void seeq_purge_ring(struct net_device *dev)
0224 {
0225 struct sgiseeq_private *sp = netdev_priv(dev);
0226 int i;
0227
0228
0229 for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
0230 if (sp->tx_desc[i].skb) {
0231 dev_kfree_skb(sp->tx_desc[i].skb);
0232 sp->tx_desc[i].skb = NULL;
0233 }
0234 }
0235
0236
0237 for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
0238 if (sp->rx_desc[i].skb) {
0239 dev_kfree_skb(sp->rx_desc[i].skb);
0240 sp->rx_desc[i].skb = NULL;
0241 }
0242 }
0243 }
0244
0245 #ifdef DEBUG
0246 static struct sgiseeq_private *gpriv;
0247 static struct net_device *gdev;
0248
0249 static void sgiseeq_dump_rings(void)
0250 {
0251 static int once;
0252 struct sgiseeq_rx_desc *r = gpriv->rx_desc;
0253 struct sgiseeq_tx_desc *t = gpriv->tx_desc;
0254 struct hpc3_ethregs *hregs = gpriv->hregs;
0255 int i;
0256
0257 if (once)
0258 return;
0259 once++;
0260 printk("RING DUMP:\n");
0261 for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
0262 printk("RX [%d]: @(%p) [%08x,%08x,%08x] ",
0263 i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
0264 r[i].rdma.pnext);
0265 i += 1;
0266 printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
0267 i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
0268 r[i].rdma.pnext);
0269 }
0270 for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
0271 printk("TX [%d]: @(%p) [%08x,%08x,%08x] ",
0272 i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
0273 t[i].tdma.pnext);
0274 i += 1;
0275 printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
0276 i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
0277 t[i].tdma.pnext);
0278 }
0279 printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n",
0280 gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old);
0281 printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n",
0282 hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl);
0283 printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n",
0284 hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl);
0285 }
0286 #endif
0287
0288 #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
0289 #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
0290
0291 static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
0292 struct sgiseeq_regs *sregs)
0293 {
0294 struct hpc3_ethregs *hregs = sp->hregs;
0295 int err;
0296
0297 reset_hpc3_and_seeq(hregs, sregs);
0298 err = seeq_init_ring(dev);
0299 if (err)
0300 return err;
0301
0302
0303 if (sp->is_edlc) {
0304 sregs->tstat = TSTAT_INIT_EDLC;
0305 sregs->rw.wregs.control = sp->control;
0306 sregs->rw.wregs.frame_gap = 0;
0307 } else {
0308 sregs->tstat = TSTAT_INIT_SEEQ;
0309 }
0310
0311 hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc);
0312 hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc);
0313
0314 seeq_go(sp, hregs, sregs);
0315 return 0;
0316 }
0317
0318 static void record_rx_errors(struct net_device *dev, unsigned char status)
0319 {
0320 if (status & SEEQ_RSTAT_OVERF ||
0321 status & SEEQ_RSTAT_SFRAME)
0322 dev->stats.rx_over_errors++;
0323 if (status & SEEQ_RSTAT_CERROR)
0324 dev->stats.rx_crc_errors++;
0325 if (status & SEEQ_RSTAT_DERROR)
0326 dev->stats.rx_frame_errors++;
0327 if (status & SEEQ_RSTAT_REOF)
0328 dev->stats.rx_errors++;
0329 }
0330
0331 static inline void rx_maybe_restart(struct sgiseeq_private *sp,
0332 struct hpc3_ethregs *hregs,
0333 struct sgiseeq_regs *sregs)
0334 {
0335 if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) {
0336 hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new);
0337 seeq_go(sp, hregs, sregs);
0338 }
0339 }
0340
0341 static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
0342 struct hpc3_ethregs *hregs,
0343 struct sgiseeq_regs *sregs)
0344 {
0345 struct sgiseeq_rx_desc *rd;
0346 struct sk_buff *skb = NULL;
0347 struct sk_buff *newskb;
0348 unsigned char pkt_status;
0349 int len = 0;
0350 unsigned int orig_end = PREV_RX(sp->rx_new);
0351
0352
0353 rd = &sp->rx_desc[sp->rx_new];
0354 dma_sync_desc_cpu(dev, rd);
0355 while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
0356 len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
0357 dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
0358 PKT_BUF_SZ, DMA_FROM_DEVICE);
0359 pkt_status = rd->skb->data[len];
0360 if (pkt_status & SEEQ_RSTAT_FIG) {
0361
0362
0363 if (!ether_addr_equal(rd->skb->data + 6, dev->dev_addr)) {
0364 if (len > rx_copybreak) {
0365 skb = rd->skb;
0366 newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
0367 if (!newskb) {
0368 newskb = skb;
0369 skb = NULL;
0370 goto memory_squeeze;
0371 }
0372 skb_reserve(newskb, 2);
0373 } else {
0374 skb = netdev_alloc_skb_ip_align(dev, len);
0375 if (skb)
0376 skb_copy_to_linear_data(skb, rd->skb->data, len);
0377
0378 newskb = rd->skb;
0379 }
0380 memory_squeeze:
0381 if (skb) {
0382 skb_put(skb, len);
0383 skb->protocol = eth_type_trans(skb, dev);
0384 netif_rx(skb);
0385 dev->stats.rx_packets++;
0386 dev->stats.rx_bytes += len;
0387 } else {
0388 dev->stats.rx_dropped++;
0389 }
0390 } else {
0391
0392 newskb = rd->skb;
0393 }
0394 } else {
0395 record_rx_errors(dev, pkt_status);
0396 newskb = rd->skb;
0397 }
0398 rd->skb = newskb;
0399 rd->rdma.pbuf = dma_map_single(dev->dev.parent,
0400 newskb->data - 2,
0401 PKT_BUF_SZ, DMA_FROM_DEVICE);
0402
0403
0404 rd->rdma.cntinfo = RCNTINFO_INIT;
0405 sp->rx_new = NEXT_RX(sp->rx_new);
0406 dma_sync_desc_dev(dev, rd);
0407 rd = &sp->rx_desc[sp->rx_new];
0408 dma_sync_desc_cpu(dev, rd);
0409 }
0410 dma_sync_desc_dev(dev, rd);
0411
0412 dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
0413 sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
0414 dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
0415 dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
0416 sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
0417 dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
0418 rx_maybe_restart(sp, hregs, sregs);
0419 }
0420
0421 static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp,
0422 struct sgiseeq_regs *sregs)
0423 {
0424 if (sp->is_edlc) {
0425 sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT);
0426 sregs->rw.wregs.control = sp->control;
0427 }
0428 }
0429
0430 static inline void kick_tx(struct net_device *dev,
0431 struct sgiseeq_private *sp,
0432 struct hpc3_ethregs *hregs)
0433 {
0434 struct sgiseeq_tx_desc *td;
0435 int i = sp->tx_old;
0436
0437
0438
0439
0440
0441
0442
0443 td = &sp->tx_desc[i];
0444 dma_sync_desc_cpu(dev, td);
0445 while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
0446 (HPCDMA_XIU | HPCDMA_ETXD)) {
0447 i = NEXT_TX(i);
0448 td = &sp->tx_desc[i];
0449 dma_sync_desc_cpu(dev, td);
0450 }
0451 if (td->tdma.cntinfo & HPCDMA_XIU) {
0452 dma_sync_desc_dev(dev, td);
0453 hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
0454 hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
0455 }
0456 }
0457
0458 static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp,
0459 struct hpc3_ethregs *hregs,
0460 struct sgiseeq_regs *sregs)
0461 {
0462 struct sgiseeq_tx_desc *td;
0463 unsigned long status = hregs->tx_ctrl;
0464 int j;
0465
0466 tx_maybe_reset_collisions(sp, sregs);
0467
0468 if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) {
0469
0470 if (status & SEEQ_TSTAT_R16)
0471 dev->stats.tx_aborted_errors++;
0472 if (status & SEEQ_TSTAT_UFLOW)
0473 dev->stats.tx_fifo_errors++;
0474 if (status & SEEQ_TSTAT_LCLS)
0475 dev->stats.collisions++;
0476 }
0477
0478
0479 for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
0480 td = &sp->tx_desc[j];
0481
0482 dma_sync_desc_cpu(dev, td);
0483 if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
0484 break;
0485 if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
0486 dma_sync_desc_dev(dev, td);
0487 if (!(status & HPC3_ETXCTRL_ACTIVE)) {
0488 hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
0489 hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
0490 }
0491 break;
0492 }
0493 dev->stats.tx_packets++;
0494 sp->tx_old = NEXT_TX(sp->tx_old);
0495 td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE);
0496 td->tdma.cntinfo |= HPCDMA_EOX;
0497 if (td->skb) {
0498 dev_kfree_skb_any(td->skb);
0499 td->skb = NULL;
0500 }
0501 dma_sync_desc_dev(dev, td);
0502 }
0503 }
0504
0505 static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id)
0506 {
0507 struct net_device *dev = (struct net_device *) dev_id;
0508 struct sgiseeq_private *sp = netdev_priv(dev);
0509 struct hpc3_ethregs *hregs = sp->hregs;
0510 struct sgiseeq_regs *sregs = sp->sregs;
0511
0512 spin_lock(&sp->tx_lock);
0513
0514
0515 hregs->reset = HPC3_ERST_CLRIRQ;
0516
0517
0518 sgiseeq_rx(dev, sp, hregs, sregs);
0519
0520
0521 if (sp->tx_old != sp->tx_new)
0522 sgiseeq_tx(dev, sp, hregs, sregs);
0523
0524 if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) {
0525 netif_wake_queue(dev);
0526 }
0527 spin_unlock(&sp->tx_lock);
0528
0529 return IRQ_HANDLED;
0530 }
0531
0532 static int sgiseeq_open(struct net_device *dev)
0533 {
0534 struct sgiseeq_private *sp = netdev_priv(dev);
0535 struct sgiseeq_regs *sregs = sp->sregs;
0536 unsigned int irq = dev->irq;
0537 int err;
0538
0539 if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
0540 printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
0541 return -EAGAIN;
0542 }
0543
0544 err = init_seeq(dev, sp, sregs);
0545 if (err)
0546 goto out_free_irq;
0547
0548 netif_start_queue(dev);
0549
0550 return 0;
0551
0552 out_free_irq:
0553 free_irq(irq, dev);
0554
0555 return err;
0556 }
0557
0558 static int sgiseeq_close(struct net_device *dev)
0559 {
0560 struct sgiseeq_private *sp = netdev_priv(dev);
0561 struct sgiseeq_regs *sregs = sp->sregs;
0562 unsigned int irq = dev->irq;
0563
0564 netif_stop_queue(dev);
0565
0566
0567 reset_hpc3_and_seeq(sp->hregs, sregs);
0568 free_irq(irq, dev);
0569 seeq_purge_ring(dev);
0570
0571 return 0;
0572 }
0573
0574 static inline int sgiseeq_reset(struct net_device *dev)
0575 {
0576 struct sgiseeq_private *sp = netdev_priv(dev);
0577 struct sgiseeq_regs *sregs = sp->sregs;
0578 int err;
0579
0580 err = init_seeq(dev, sp, sregs);
0581 if (err)
0582 return err;
0583
0584 netif_trans_update(dev);
0585 netif_wake_queue(dev);
0586
0587 return 0;
0588 }
0589
0590 static netdev_tx_t
0591 sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
0592 {
0593 struct sgiseeq_private *sp = netdev_priv(dev);
0594 struct hpc3_ethregs *hregs = sp->hregs;
0595 unsigned long flags;
0596 struct sgiseeq_tx_desc *td;
0597 int len, entry;
0598
0599 spin_lock_irqsave(&sp->tx_lock, flags);
0600
0601
0602 len = skb->len;
0603 if (len < ETH_ZLEN) {
0604 if (skb_padto(skb, ETH_ZLEN)) {
0605 spin_unlock_irqrestore(&sp->tx_lock, flags);
0606 return NETDEV_TX_OK;
0607 }
0608 len = ETH_ZLEN;
0609 }
0610
0611 dev->stats.tx_bytes += len;
0612 entry = sp->tx_new;
0613 td = &sp->tx_desc[entry];
0614 dma_sync_desc_cpu(dev, td);
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629 td->skb = skb;
0630 td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data,
0631 len, DMA_TO_DEVICE);
0632 td->tdma.cntinfo = (len & HPCDMA_BCNT) |
0633 HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
0634 dma_sync_desc_dev(dev, td);
0635 if (sp->tx_old != sp->tx_new) {
0636 struct sgiseeq_tx_desc *backend;
0637
0638 backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
0639 dma_sync_desc_cpu(dev, backend);
0640 backend->tdma.cntinfo &= ~HPCDMA_EOX;
0641 dma_sync_desc_dev(dev, backend);
0642 }
0643 sp->tx_new = NEXT_TX(sp->tx_new);
0644
0645
0646 if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
0647 kick_tx(dev, sp, hregs);
0648
0649 if (!TX_BUFFS_AVAIL(sp))
0650 netif_stop_queue(dev);
0651 spin_unlock_irqrestore(&sp->tx_lock, flags);
0652
0653 return NETDEV_TX_OK;
0654 }
0655
0656 static void timeout(struct net_device *dev, unsigned int txqueue)
0657 {
0658 printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
0659 sgiseeq_reset(dev);
0660
0661 netif_trans_update(dev);
0662 netif_wake_queue(dev);
0663 }
0664
0665 static void sgiseeq_set_multicast(struct net_device *dev)
0666 {
0667 struct sgiseeq_private *sp = netdev_priv(dev);
0668 unsigned char oldmode = sp->mode;
0669
0670 if(dev->flags & IFF_PROMISC)
0671 sp->mode = SEEQ_RCMD_RANY;
0672 else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
0673 sp->mode = SEEQ_RCMD_RBMCAST;
0674 else
0675 sp->mode = SEEQ_RCMD_RBCAST;
0676
0677
0678
0679
0680
0681 if (oldmode != sp->mode)
0682 sgiseeq_reset(dev);
0683 }
0684
0685 static inline void setup_tx_ring(struct net_device *dev,
0686 struct sgiseeq_tx_desc *buf,
0687 int nbufs)
0688 {
0689 struct sgiseeq_private *sp = netdev_priv(dev);
0690 int i = 0;
0691
0692 while (i < (nbufs - 1)) {
0693 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
0694 buf[i].tdma.pbuf = 0;
0695 dma_sync_desc_dev(dev, &buf[i]);
0696 i++;
0697 }
0698 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
0699 dma_sync_desc_dev(dev, &buf[i]);
0700 }
0701
0702 static inline void setup_rx_ring(struct net_device *dev,
0703 struct sgiseeq_rx_desc *buf,
0704 int nbufs)
0705 {
0706 struct sgiseeq_private *sp = netdev_priv(dev);
0707 int i = 0;
0708
0709 while (i < (nbufs - 1)) {
0710 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
0711 buf[i].rdma.pbuf = 0;
0712 dma_sync_desc_dev(dev, &buf[i]);
0713 i++;
0714 }
0715 buf[i].rdma.pbuf = 0;
0716 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
0717 dma_sync_desc_dev(dev, &buf[i]);
0718 }
0719
0720 static const struct net_device_ops sgiseeq_netdev_ops = {
0721 .ndo_open = sgiseeq_open,
0722 .ndo_stop = sgiseeq_close,
0723 .ndo_start_xmit = sgiseeq_start_xmit,
0724 .ndo_tx_timeout = timeout,
0725 .ndo_set_rx_mode = sgiseeq_set_multicast,
0726 .ndo_set_mac_address = sgiseeq_set_mac_address,
0727 .ndo_validate_addr = eth_validate_addr,
0728 };
0729
0730 static int sgiseeq_probe(struct platform_device *pdev)
0731 {
0732 struct sgiseeq_platform_data *pd = dev_get_platdata(&pdev->dev);
0733 struct hpc3_regs *hpcregs = pd->hpc;
0734 struct sgiseeq_init_block *sr;
0735 unsigned int irq = pd->irq;
0736 struct sgiseeq_private *sp;
0737 struct net_device *dev;
0738 int err;
0739
0740 dev = alloc_etherdev(sizeof (struct sgiseeq_private));
0741 if (!dev) {
0742 err = -ENOMEM;
0743 goto err_out;
0744 }
0745
0746 platform_set_drvdata(pdev, dev);
0747 SET_NETDEV_DEV(dev, &pdev->dev);
0748 sp = netdev_priv(dev);
0749
0750
0751 sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings),
0752 &sp->srings_dma, DMA_BIDIRECTIONAL, GFP_KERNEL);
0753 if (!sr) {
0754 printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
0755 err = -ENOMEM;
0756 goto err_out_free_dev;
0757 }
0758 sp->srings = sr;
0759 sp->rx_desc = sp->srings->rxvector;
0760 sp->tx_desc = sp->srings->txvector;
0761 spin_lock_init(&sp->tx_lock);
0762
0763
0764 setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
0765 setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);
0766
0767 eth_hw_addr_set(dev, pd->mac);
0768
0769 #ifdef DEBUG
0770 gpriv = sp;
0771 gdev = dev;
0772 #endif
0773 sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0];
0774 sp->hregs = &hpcregs->ethregs;
0775 sp->name = sgiseeqstr;
0776 sp->mode = SEEQ_RCMD_RBCAST;
0777
0778
0779 sp->hregs->pconfig = 0x161;
0780 sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
0781 HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
0782
0783
0784 sp->hregs->pconfig = 0x161;
0785 sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
0786 HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
0787
0788
0789 hpc3_eth_reset(sp->hregs);
0790
0791 sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff);
0792 if (sp->is_edlc)
0793 sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT |
0794 SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT |
0795 SEEQ_CTRL_ENCARR;
0796
0797 dev->netdev_ops = &sgiseeq_netdev_ops;
0798 dev->watchdog_timeo = (200 * HZ) / 1000;
0799 dev->irq = irq;
0800
0801 if (register_netdev(dev)) {
0802 printk(KERN_ERR "Sgiseeq: Cannot register net device, "
0803 "aborting.\n");
0804 err = -ENODEV;
0805 goto err_out_free_attrs;
0806 }
0807
0808 printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
0809
0810 return 0;
0811
0812 err_out_free_attrs:
0813 dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
0814 sp->srings_dma, DMA_BIDIRECTIONAL);
0815 err_out_free_dev:
0816 free_netdev(dev);
0817
0818 err_out:
0819 return err;
0820 }
0821
0822 static int sgiseeq_remove(struct platform_device *pdev)
0823 {
0824 struct net_device *dev = platform_get_drvdata(pdev);
0825 struct sgiseeq_private *sp = netdev_priv(dev);
0826
0827 unregister_netdev(dev);
0828 dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
0829 sp->srings_dma, DMA_BIDIRECTIONAL);
0830 free_netdev(dev);
0831
0832 return 0;
0833 }
0834
0835 static struct platform_driver sgiseeq_driver = {
0836 .probe = sgiseeq_probe,
0837 .remove = sgiseeq_remove,
0838 .driver = {
0839 .name = "sgiseeq",
0840 }
0841 };
0842
0843 module_platform_driver(sgiseeq_driver);
0844
0845 MODULE_DESCRIPTION("SGI Seeq 8003 driver");
0846 MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
0847 MODULE_LICENSE("GPL");
0848 MODULE_ALIAS("platform:sgiseeq");