0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/pci.h>
0014 #include "tulip.h"
0015 #include <linux/etherdevice.h>
0016
0017 int tulip_rx_copybreak;
0018 unsigned int tulip_max_interrupt_work;
0019
0020 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
0021 #define MIT_SIZE 15
0022 #define MIT_TABLE 15
0023
0024 static unsigned int mit_table[MIT_SIZE+1] =
0025 {
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038 0x0,
0039 0x80150000,
0040 0x80150000,
0041 0x80270000,
0042 0x80370000,
0043 0x80490000,
0044 0x80590000,
0045 0x80690000,
0046 0x807B0000,
0047 0x808B0000,
0048 0x809D0000,
0049 0x80AD0000,
0050 0x80BD0000,
0051 0x80CF0000,
0052 0x80DF0000,
0053
0054 0x80F10000
0055 };
0056 #endif
0057
0058
0059 int tulip_refill_rx(struct net_device *dev)
0060 {
0061 struct tulip_private *tp = netdev_priv(dev);
0062 int entry;
0063 int refilled = 0;
0064
0065
0066 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
0067 entry = tp->dirty_rx % RX_RING_SIZE;
0068 if (tp->rx_buffers[entry].skb == NULL) {
0069 struct sk_buff *skb;
0070 dma_addr_t mapping;
0071
0072 skb = tp->rx_buffers[entry].skb =
0073 netdev_alloc_skb(dev, PKT_BUF_SZ);
0074 if (skb == NULL)
0075 break;
0076
0077 mapping = dma_map_single(&tp->pdev->dev, skb->data,
0078 PKT_BUF_SZ, DMA_FROM_DEVICE);
0079 if (dma_mapping_error(&tp->pdev->dev, mapping)) {
0080 dev_kfree_skb(skb);
0081 tp->rx_buffers[entry].skb = NULL;
0082 break;
0083 }
0084
0085 tp->rx_buffers[entry].mapping = mapping;
0086
0087 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
0088 refilled++;
0089 }
0090 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
0091 }
0092 if(tp->chip_id == LC82C168) {
0093 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
0094
0095
0096
0097 iowrite32(0x01, tp->base_addr + CSR2);
0098 }
0099 }
0100 return refilled;
0101 }
0102
0103 #ifdef CONFIG_TULIP_NAPI
0104
0105 void oom_timer(struct timer_list *t)
0106 {
0107 struct tulip_private *tp = from_timer(tp, t, oom_timer);
0108
0109 napi_schedule(&tp->napi);
0110 }
0111
0112 int tulip_poll(struct napi_struct *napi, int budget)
0113 {
0114 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
0115 struct net_device *dev = tp->dev;
0116 int entry = tp->cur_rx % RX_RING_SIZE;
0117 int work_done = 0;
0118 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
0119 int received = 0;
0120 #endif
0121
0122 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
0123
0124
0125
0126
0127 if (budget >=RX_RING_SIZE) budget--;
0128 #endif
0129
0130 if (tulip_debug > 4)
0131 netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n",
0132 entry, tp->rx_ring[entry].status);
0133
0134 do {
0135 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
0136 netdev_dbg(dev, " In tulip_poll(), hardware disappeared\n");
0137 break;
0138 }
0139
0140 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
0141
0142
0143
0144 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
0145 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
0146 short pkt_len;
0147
0148 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
0149 break;
0150
0151 if (tulip_debug > 5)
0152 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
0153 entry, status);
0154
0155 if (++work_done >= budget)
0156 goto not_done;
0157
0158
0159
0160
0161
0162
0163 pkt_len = ((status >> 16) & 0x7ff) - 4;
0164
0165
0166
0167
0168
0169
0170
0171 if ((status & (RxLengthOver2047 |
0172 RxDescCRCError |
0173 RxDescCollisionSeen |
0174 RxDescRunt |
0175 RxDescDescErr |
0176 RxWholePkt)) != RxWholePkt ||
0177 pkt_len > 1518) {
0178 if ((status & (RxLengthOver2047 |
0179 RxWholePkt)) != RxWholePkt) {
0180
0181 if ((status & 0xffff) != 0x7fff) {
0182 if (tulip_debug > 1)
0183 dev_warn(&dev->dev,
0184 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
0185 status);
0186 dev->stats.rx_length_errors++;
0187 }
0188 } else {
0189
0190 if (tulip_debug > 2)
0191 netdev_dbg(dev, "Receive error, Rx status %08x\n",
0192 status);
0193 dev->stats.rx_errors++;
0194 if (pkt_len > 1518 ||
0195 (status & RxDescRunt))
0196 dev->stats.rx_length_errors++;
0197
0198 if (status & 0x0004)
0199 dev->stats.rx_frame_errors++;
0200 if (status & 0x0002)
0201 dev->stats.rx_crc_errors++;
0202 if (status & 0x0001)
0203 dev->stats.rx_fifo_errors++;
0204 }
0205 } else {
0206 struct sk_buff *skb;
0207
0208
0209
0210 if (pkt_len < tulip_rx_copybreak &&
0211 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
0212 skb_reserve(skb, 2);
0213 dma_sync_single_for_cpu(&tp->pdev->dev,
0214 tp->rx_buffers[entry].mapping,
0215 pkt_len,
0216 DMA_FROM_DEVICE);
0217 #if ! defined(__alpha__)
0218 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
0219 pkt_len);
0220 skb_put(skb, pkt_len);
0221 #else
0222 skb_put_data(skb,
0223 tp->rx_buffers[entry].skb->data,
0224 pkt_len);
0225 #endif
0226 dma_sync_single_for_device(&tp->pdev->dev,
0227 tp->rx_buffers[entry].mapping,
0228 pkt_len,
0229 DMA_FROM_DEVICE);
0230 } else {
0231 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
0232 pkt_len);
0233
0234 #ifndef final_version
0235 if (tp->rx_buffers[entry].mapping !=
0236 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
0237 dev_err(&dev->dev,
0238 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
0239 le32_to_cpu(tp->rx_ring[entry].buffer1),
0240 (unsigned long long)tp->rx_buffers[entry].mapping,
0241 skb->head, temp);
0242 }
0243 #endif
0244
0245 dma_unmap_single(&tp->pdev->dev,
0246 tp->rx_buffers[entry].mapping,
0247 PKT_BUF_SZ,
0248 DMA_FROM_DEVICE);
0249
0250 tp->rx_buffers[entry].skb = NULL;
0251 tp->rx_buffers[entry].mapping = 0;
0252 }
0253 skb->protocol = eth_type_trans(skb, dev);
0254
0255 netif_receive_skb(skb);
0256
0257 dev->stats.rx_packets++;
0258 dev->stats.rx_bytes += pkt_len;
0259 }
0260 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
0261 received++;
0262 #endif
0263
0264 entry = (++tp->cur_rx) % RX_RING_SIZE;
0265 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
0266 tulip_refill_rx(dev);
0267
0268 }
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
0281
0282 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301 if( tp->flags & HAS_INTR_MITIGATION) {
0302 if( received > 1 ) {
0303 if( ! tp->mit_on ) {
0304 tp->mit_on = 1;
0305 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
0306 }
0307 }
0308 else {
0309 if( tp->mit_on ) {
0310 tp->mit_on = 0;
0311 iowrite32(0, tp->base_addr + CSR11);
0312 }
0313 }
0314 }
0315
0316 #endif
0317
0318 tulip_refill_rx(dev);
0319
0320
0321 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
0322 goto oom;
0323
0324
0325
0326 napi_complete_done(napi, work_done);
0327 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338
0339
0340 return work_done;
0341
0342 not_done:
0343 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
0344 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
0345 tulip_refill_rx(dev);
0346
0347 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
0348 goto oom;
0349
0350 return work_done;
0351
0352 oom:
0353
0354
0355 mod_timer(&tp->oom_timer, jiffies+1);
0356
0357
0358
0359
0360
0361
0362 napi_complete_done(napi, work_done);
0363
0364 return work_done;
0365 }
0366
0367 #else
0368
0369 static int tulip_rx(struct net_device *dev)
0370 {
0371 struct tulip_private *tp = netdev_priv(dev);
0372 int entry = tp->cur_rx % RX_RING_SIZE;
0373 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
0374 int received = 0;
0375
0376 if (tulip_debug > 4)
0377 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
0378 entry, tp->rx_ring[entry].status);
0379
0380 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
0381 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
0382 short pkt_len;
0383
0384 if (tulip_debug > 5)
0385 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
0386 entry, status);
0387 if (--rx_work_limit < 0)
0388 break;
0389
0390
0391
0392
0393
0394
0395 pkt_len = ((status >> 16) & 0x7ff) - 4;
0396
0397
0398
0399
0400
0401
0402 if ((status & (RxLengthOver2047 |
0403 RxDescCRCError |
0404 RxDescCollisionSeen |
0405 RxDescRunt |
0406 RxDescDescErr |
0407 RxWholePkt)) != RxWholePkt ||
0408 pkt_len > 1518) {
0409 if ((status & (RxLengthOver2047 |
0410 RxWholePkt)) != RxWholePkt) {
0411
0412 if ((status & 0xffff) != 0x7fff) {
0413 if (tulip_debug > 1)
0414 netdev_warn(dev,
0415 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
0416 status);
0417 dev->stats.rx_length_errors++;
0418 }
0419 } else {
0420
0421 if (tulip_debug > 2)
0422 netdev_dbg(dev, "Receive error, Rx status %08x\n",
0423 status);
0424 dev->stats.rx_errors++;
0425 if (pkt_len > 1518 ||
0426 (status & RxDescRunt))
0427 dev->stats.rx_length_errors++;
0428 if (status & 0x0004)
0429 dev->stats.rx_frame_errors++;
0430 if (status & 0x0002)
0431 dev->stats.rx_crc_errors++;
0432 if (status & 0x0001)
0433 dev->stats.rx_fifo_errors++;
0434 }
0435 } else {
0436 struct sk_buff *skb;
0437
0438
0439
0440 if (pkt_len < tulip_rx_copybreak &&
0441 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
0442 skb_reserve(skb, 2);
0443 dma_sync_single_for_cpu(&tp->pdev->dev,
0444 tp->rx_buffers[entry].mapping,
0445 pkt_len,
0446 DMA_FROM_DEVICE);
0447 #if ! defined(__alpha__)
0448 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
0449 pkt_len);
0450 skb_put(skb, pkt_len);
0451 #else
0452 skb_put_data(skb,
0453 tp->rx_buffers[entry].skb->data,
0454 pkt_len);
0455 #endif
0456 dma_sync_single_for_device(&tp->pdev->dev,
0457 tp->rx_buffers[entry].mapping,
0458 pkt_len,
0459 DMA_FROM_DEVICE);
0460 } else {
0461 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
0462 pkt_len);
0463
0464 #ifndef final_version
0465 if (tp->rx_buffers[entry].mapping !=
0466 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
0467 dev_err(&dev->dev,
0468 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
0469 le32_to_cpu(tp->rx_ring[entry].buffer1),
0470 (long long)tp->rx_buffers[entry].mapping,
0471 skb->head, temp);
0472 }
0473 #endif
0474
0475 dma_unmap_single(&tp->pdev->dev,
0476 tp->rx_buffers[entry].mapping,
0477 PKT_BUF_SZ, DMA_FROM_DEVICE);
0478
0479 tp->rx_buffers[entry].skb = NULL;
0480 tp->rx_buffers[entry].mapping = 0;
0481 }
0482 skb->protocol = eth_type_trans(skb, dev);
0483
0484 netif_rx(skb);
0485
0486 dev->stats.rx_packets++;
0487 dev->stats.rx_bytes += pkt_len;
0488 }
0489 received++;
0490 entry = (++tp->cur_rx) % RX_RING_SIZE;
0491 }
0492 return received;
0493 }
0494 #endif
0495
0496 static inline unsigned int phy_interrupt (struct net_device *dev)
0497 {
0498 #ifdef __hppa__
0499 struct tulip_private *tp = netdev_priv(dev);
0500 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
0501
0502 if (csr12 != tp->csr12_shadow) {
0503
0504 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
0505 tp->csr12_shadow = csr12;
0506
0507 spin_lock(&tp->lock);
0508 tulip_check_duplex(dev);
0509 spin_unlock(&tp->lock);
0510
0511 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
0512
0513 return 1;
0514 }
0515 #endif
0516
0517 return 0;
0518 }
0519
0520
0521
0522 irqreturn_t tulip_interrupt(int irq, void *dev_instance)
0523 {
0524 struct net_device *dev = (struct net_device *)dev_instance;
0525 struct tulip_private *tp = netdev_priv(dev);
0526 void __iomem *ioaddr = tp->base_addr;
0527 int csr5;
0528 int missed;
0529 int rx = 0;
0530 int tx = 0;
0531 int oi = 0;
0532 int maxrx = RX_RING_SIZE;
0533 int maxtx = TX_RING_SIZE;
0534 int maxoi = TX_RING_SIZE;
0535 #ifdef CONFIG_TULIP_NAPI
0536 int rxd = 0;
0537 #else
0538 int entry;
0539 #endif
0540 unsigned int work_count = tulip_max_interrupt_work;
0541 unsigned int handled = 0;
0542
0543
0544 csr5 = ioread32(ioaddr + CSR5);
0545
0546 if (tp->flags & HAS_PHY_IRQ)
0547 handled = phy_interrupt (dev);
0548
0549 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
0550 return IRQ_RETVAL(handled);
0551
0552 tp->nir++;
0553
0554 do {
0555
0556 #ifdef CONFIG_TULIP_NAPI
0557
0558 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
0559 rxd++;
0560
0561 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
0562 napi_schedule(&tp->napi);
0563
0564 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
0565 break;
0566 }
0567
0568
0569
0570
0571 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
0572
0573 #else
0574
0575 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
0576
0577
0578 if (csr5 & (RxIntr | RxNoBuf)) {
0579 rx += tulip_rx(dev);
0580 tulip_refill_rx(dev);
0581 }
0582
0583 #endif
0584
0585 if (tulip_debug > 4)
0586 netdev_dbg(dev, "interrupt csr5=%#8.8x new csr5=%#8.8x\n",
0587 csr5, ioread32(ioaddr + CSR5));
0588
0589
0590 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
0591 unsigned int dirty_tx;
0592
0593 spin_lock(&tp->lock);
0594
0595 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
0596 dirty_tx++) {
0597 int entry = dirty_tx % TX_RING_SIZE;
0598 int status = le32_to_cpu(tp->tx_ring[entry].status);
0599
0600 if (status < 0)
0601 break;
0602
0603
0604 if (tp->tx_buffers[entry].skb == NULL) {
0605
0606 if (tp->tx_buffers[entry].mapping)
0607 dma_unmap_single(&tp->pdev->dev,
0608 tp->tx_buffers[entry].mapping,
0609 sizeof(tp->setup_frame),
0610 DMA_TO_DEVICE);
0611 continue;
0612 }
0613
0614 if (status & 0x8000) {
0615
0616 #ifndef final_version
0617 if (tulip_debug > 1)
0618 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
0619 status);
0620 #endif
0621 dev->stats.tx_errors++;
0622 if (status & 0x4104)
0623 dev->stats.tx_aborted_errors++;
0624 if (status & 0x0C00)
0625 dev->stats.tx_carrier_errors++;
0626 if (status & 0x0200)
0627 dev->stats.tx_window_errors++;
0628 if (status & 0x0002)
0629 dev->stats.tx_fifo_errors++;
0630 if ((status & 0x0080) && tp->full_duplex == 0)
0631 dev->stats.tx_heartbeat_errors++;
0632 } else {
0633 dev->stats.tx_bytes +=
0634 tp->tx_buffers[entry].skb->len;
0635 dev->stats.collisions += (status >> 3) & 15;
0636 dev->stats.tx_packets++;
0637 }
0638
0639 dma_unmap_single(&tp->pdev->dev,
0640 tp->tx_buffers[entry].mapping,
0641 tp->tx_buffers[entry].skb->len,
0642 DMA_TO_DEVICE);
0643
0644
0645 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
0646 tp->tx_buffers[entry].skb = NULL;
0647 tp->tx_buffers[entry].mapping = 0;
0648 tx++;
0649 }
0650
0651 #ifndef final_version
0652 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
0653 dev_err(&dev->dev,
0654 "Out-of-sync dirty pointer, %d vs. %d\n",
0655 dirty_tx, tp->cur_tx);
0656 dirty_tx += TX_RING_SIZE;
0657 }
0658 #endif
0659
0660 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
0661 netif_wake_queue(dev);
0662
0663 tp->dirty_tx = dirty_tx;
0664 if (csr5 & TxDied) {
0665 if (tulip_debug > 2)
0666 dev_warn(&dev->dev,
0667 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
0668 csr5, ioread32(ioaddr + CSR6),
0669 tp->csr6);
0670 tulip_restart_rxtx(tp);
0671 }
0672 spin_unlock(&tp->lock);
0673 }
0674
0675
0676 if (csr5 & AbnormalIntr) {
0677 if (csr5 == 0xffffffff)
0678 break;
0679 if (csr5 & TxJabber)
0680 dev->stats.tx_errors++;
0681 if (csr5 & TxFIFOUnderflow) {
0682 if ((tp->csr6 & 0xC000) != 0xC000)
0683 tp->csr6 += 0x4000;
0684 else
0685 tp->csr6 |= 0x00200000;
0686
0687 tulip_restart_rxtx(tp);
0688 iowrite32(0, ioaddr + CSR1);
0689 }
0690 if (csr5 & (RxDied | RxNoBuf)) {
0691 if (tp->flags & COMET_MAC_ADDR) {
0692 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
0693 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
0694 }
0695 }
0696 if (csr5 & RxDied) {
0697 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
0698 dev->stats.rx_errors++;
0699 tulip_start_rxtx(tp);
0700 }
0701
0702
0703
0704
0705 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
0706 if (tp->link_change)
0707 (tp->link_change)(dev, csr5);
0708 }
0709 if (csr5 & SystemError) {
0710 int error = (csr5 >> 23) & 7;
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721 dev_err(&dev->dev,
0722 "(%lu) System Error occurred (%d)\n",
0723 tp->nir, error);
0724 }
0725
0726 iowrite32(0x0800f7ba, ioaddr + CSR5);
0727 oi++;
0728 }
0729 if (csr5 & TimerInt) {
0730
0731 if (tulip_debug > 2)
0732 dev_err(&dev->dev,
0733 "Re-enabling interrupts, %08x\n",
0734 csr5);
0735 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
0736 tp->ttimer = 0;
0737 oi++;
0738 }
0739 if (tx > maxtx || rx > maxrx || oi > maxoi) {
0740 if (tulip_debug > 1)
0741 dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
0742 csr5, tp->nir, tx, rx, oi);
0743
0744
0745 iowrite32(0x8001ffff, ioaddr + CSR5);
0746 if (tp->flags & HAS_INTR_MITIGATION) {
0747
0748
0749 iowrite32(0x8b240000, ioaddr + CSR11);
0750 } else if (tp->chip_id == LC82C168) {
0751
0752 iowrite32(0x00, ioaddr + CSR7);
0753 mod_timer(&tp->timer, RUN_AT(HZ/50));
0754 } else {
0755
0756
0757 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
0758 iowrite32(0x0012, ioaddr + CSR11);
0759 }
0760 break;
0761 }
0762
0763 work_count--;
0764 if (work_count == 0)
0765 break;
0766
0767 csr5 = ioread32(ioaddr + CSR5);
0768
0769 #ifdef CONFIG_TULIP_NAPI
0770 if (rxd)
0771 csr5 &= ~RxPollInt;
0772 } while ((csr5 & (TxNoBuf |
0773 TxDied |
0774 TxIntr |
0775 TimerInt |
0776
0777 RxDied |
0778 TxFIFOUnderflow |
0779 TxJabber |
0780 TPLnkFail |
0781 SystemError )) != 0);
0782 #else
0783 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
0784
0785 tulip_refill_rx(dev);
0786
0787
0788 entry = tp->dirty_rx % RX_RING_SIZE;
0789 if (tp->rx_buffers[entry].skb == NULL) {
0790 if (tulip_debug > 1)
0791 dev_warn(&dev->dev,
0792 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
0793 tp->nir, tp->cur_rx, tp->ttimer, rx);
0794 if (tp->chip_id == LC82C168) {
0795 iowrite32(0x00, ioaddr + CSR7);
0796 mod_timer(&tp->timer, RUN_AT(HZ/50));
0797 } else {
0798 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
0799 if (tulip_debug > 1)
0800 dev_warn(&dev->dev,
0801 "in rx suspend mode: (%lu) set timer\n",
0802 tp->nir);
0803 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
0804 ioaddr + CSR7);
0805 iowrite32(TimerInt, ioaddr + CSR5);
0806 iowrite32(12, ioaddr + CSR11);
0807 tp->ttimer = 1;
0808 }
0809 }
0810 }
0811 #endif
0812
0813 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
0814 dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
0815 }
0816
0817 if (tulip_debug > 4)
0818 netdev_dbg(dev, "exiting interrupt, csr5=%#04x\n",
0819 ioread32(ioaddr + CSR5));
0820
0821 return IRQ_HANDLED;
0822 }