0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0056
0057 #include <linux/module.h>
0058 #include <linux/kernel.h>
0059 #include <linux/types.h>
0060 #include <linux/compiler.h>
0061 #include <linux/slab.h>
0062 #include <linux/delay.h>
0063 #include <linux/init.h>
0064 #include <linux/interrupt.h>
0065 #include <linux/vmalloc.h>
0066 #include <linux/ioport.h>
0067 #include <linux/pci.h>
0068 #include <linux/mm.h>
0069 #include <linux/highmem.h>
0070 #include <linux/list.h>
0071 #include <linux/dma-mapping.h>
0072
0073 #include <linux/netdevice.h>
0074 #include <linux/etherdevice.h>
0075 #include <linux/skbuff.h>
0076 #include <linux/ethtool.h>
0077 #include <linux/crc32.h>
0078 #include <linux/random.h>
0079 #include <linux/mii.h>
0080 #include <linux/ip.h>
0081 #include <linux/tcp.h>
0082 #include <linux/mutex.h>
0083 #include <linux/firmware.h>
0084
0085 #include <net/checksum.h>
0086
0087 #include <linux/atomic.h>
0088 #include <asm/io.h>
0089 #include <asm/byteorder.h>
0090 #include <linux/uaccess.h>
0091 #include <linux/jiffies.h>
0092
0093 #define cas_page_map(x) kmap_atomic((x))
0094 #define cas_page_unmap(x) kunmap_atomic((x))
0095 #define CAS_NCPUS num_online_cpus()
0096
0097 #define cas_skb_release(x) netif_rx(x)
0098
0099
0100 #define USE_HP_WORKAROUND
0101 #define HP_WORKAROUND_DEFAULT
0102 #define CAS_HP_ALT_FIRMWARE cas_prog_null
0103
0104 #include "cassini.h"
0105
0106 #define USE_TX_COMPWB
0107 #define USE_CSMA_CD_PROTO
0108 #define USE_RX_BLANK
0109 #undef USE_ENTROPY_DEV
0110
0111
0112
0113
0114 #undef USE_PCI_INTB
0115 #undef USE_PCI_INTC
0116 #undef USE_PCI_INTD
0117 #undef USE_QOS
0118
0119 #undef USE_VPD_DEBUG
0120
0121
0122 #define USE_PAGE_ORDER
0123 #define RX_DONT_BATCH 0
0124 #define RX_COPY_ALWAYS 0
0125 #define RX_COPY_MIN 64
0126 #undef RX_COUNT_BUFFERS
0127
0128 #define DRV_MODULE_NAME "cassini"
0129 #define DRV_MODULE_VERSION "1.6"
0130 #define DRV_MODULE_RELDATE "21 May 2008"
0131
0132 #define CAS_DEF_MSG_ENABLE \
0133 (NETIF_MSG_DRV | \
0134 NETIF_MSG_PROBE | \
0135 NETIF_MSG_LINK | \
0136 NETIF_MSG_TIMER | \
0137 NETIF_MSG_IFDOWN | \
0138 NETIF_MSG_IFUP | \
0139 NETIF_MSG_RX_ERR | \
0140 NETIF_MSG_TX_ERR)
0141
0142
0143
0144
0145 #define CAS_TX_TIMEOUT (HZ)
0146 #define CAS_LINK_TIMEOUT (22*HZ/10)
0147 #define CAS_LINK_FAST_TIMEOUT (1)
0148
0149
0150
0151
0152 #define STOP_TRIES_PHY 1000
0153 #define STOP_TRIES 5000
0154
0155
0156
0157
0158
0159 #define CAS_MIN_FRAME 97
0160 #define CAS_1000MB_MIN_FRAME 255
0161 #define CAS_MIN_MTU 60
0162 #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
0163
0164 #if 1
0165
0166
0167
0168
0169 #else
0170 #define CAS_RESET_MTU 1
0171 #define CAS_RESET_ALL 2
0172 #define CAS_RESET_SPARE 3
0173 #endif
0174
0175 static char version[] =
0176 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
0177
0178 static int cassini_debug = -1;
0179 static int link_mode;
0180
0181 MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
0182 MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
0183 MODULE_LICENSE("GPL");
0184 MODULE_FIRMWARE("sun/cassini.bin");
0185 module_param(cassini_debug, int, 0);
0186 MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
0187 module_param(link_mode, int, 0);
0188 MODULE_PARM_DESC(link_mode, "default link mode");
0189
0190
0191
0192
0193
0194 #define DEFAULT_LINKDOWN_TIMEOUT 5
0195
0196
0197
0198 static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
0199 module_param(linkdown_timeout, int, 0);
0200 MODULE_PARM_DESC(linkdown_timeout,
0201 "min reset interval in sec. for PCS linkdown issue; disabled if not positive");
0202
0203
0204
0205
0206
0207
0208 static int link_transition_timeout;
0209
0210
0211
0212 static u16 link_modes[] = {
0213 BMCR_ANENABLE,
0214 0,
0215 BMCR_SPEED100,
0216 BMCR_FULLDPLX,
0217 BMCR_SPEED100|BMCR_FULLDPLX,
0218 CAS_BMCR_SPEED1000|BMCR_FULLDPLX
0219 };
0220
0221 static const struct pci_device_id cas_pci_tbl[] = {
0222 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
0223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
0224 { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
0225 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
0226 { 0, }
0227 };
0228
0229 MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
0230
0231 static void cas_set_link_modes(struct cas *cp);
0232
0233 static inline void cas_lock_tx(struct cas *cp)
0234 {
0235 int i;
0236
0237 for (i = 0; i < N_TX_RINGS; i++)
0238 spin_lock_nested(&cp->tx_lock[i], i);
0239 }
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249 #define cas_lock_all_save(cp, flags) \
0250 do { \
0251 struct cas *xxxcp = (cp); \
0252 spin_lock_irqsave(&xxxcp->lock, flags); \
0253 cas_lock_tx(xxxcp); \
0254 } while (0)
0255
0256 static inline void cas_unlock_tx(struct cas *cp)
0257 {
0258 int i;
0259
0260 for (i = N_TX_RINGS; i > 0; i--)
0261 spin_unlock(&cp->tx_lock[i - 1]);
0262 }
0263
0264 #define cas_unlock_all_restore(cp, flags) \
0265 do { \
0266 struct cas *xxxcp = (cp); \
0267 cas_unlock_tx(xxxcp); \
0268 spin_unlock_irqrestore(&xxxcp->lock, flags); \
0269 } while (0)
0270
0271 static void cas_disable_irq(struct cas *cp, const int ring)
0272 {
0273
0274 if (ring == 0) {
0275 writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
0276 return;
0277 }
0278
0279
0280 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
0281 switch (ring) {
0282 #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
0283 #ifdef USE_PCI_INTB
0284 case 1:
0285 #endif
0286 #ifdef USE_PCI_INTC
0287 case 2:
0288 #endif
0289 #ifdef USE_PCI_INTD
0290 case 3:
0291 #endif
0292 writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
0293 cp->regs + REG_PLUS_INTRN_MASK(ring));
0294 break;
0295 #endif
0296 default:
0297 writel(INTRN_MASK_CLEAR_ALL, cp->regs +
0298 REG_PLUS_INTRN_MASK(ring));
0299 break;
0300 }
0301 }
0302 }
0303
0304 static inline void cas_mask_intr(struct cas *cp)
0305 {
0306 int i;
0307
0308 for (i = 0; i < N_RX_COMP_RINGS; i++)
0309 cas_disable_irq(cp, i);
0310 }
0311
0312 static void cas_enable_irq(struct cas *cp, const int ring)
0313 {
0314 if (ring == 0) {
0315 writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
0316 return;
0317 }
0318
0319 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
0320 switch (ring) {
0321 #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
0322 #ifdef USE_PCI_INTB
0323 case 1:
0324 #endif
0325 #ifdef USE_PCI_INTC
0326 case 2:
0327 #endif
0328 #ifdef USE_PCI_INTD
0329 case 3:
0330 #endif
0331 writel(INTRN_MASK_RX_EN, cp->regs +
0332 REG_PLUS_INTRN_MASK(ring));
0333 break;
0334 #endif
0335 default:
0336 break;
0337 }
0338 }
0339 }
0340
0341 static inline void cas_unmask_intr(struct cas *cp)
0342 {
0343 int i;
0344
0345 for (i = 0; i < N_RX_COMP_RINGS; i++)
0346 cas_enable_irq(cp, i);
0347 }
0348
0349 static inline void cas_entropy_gather(struct cas *cp)
0350 {
0351 #ifdef USE_ENTROPY_DEV
0352 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
0353 return;
0354
0355 batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
0356 readl(cp->regs + REG_ENTROPY_IV),
0357 sizeof(uint64_t)*8);
0358 #endif
0359 }
0360
0361 static inline void cas_entropy_reset(struct cas *cp)
0362 {
0363 #ifdef USE_ENTROPY_DEV
0364 if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
0365 return;
0366
0367 writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
0368 cp->regs + REG_BIM_LOCAL_DEV_EN);
0369 writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
0370 writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
0371
0372
0373 if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
0374 cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
0375 #endif
0376 }
0377
0378
0379
0380
0381 static u16 cas_phy_read(struct cas *cp, int reg)
0382 {
0383 u32 cmd;
0384 int limit = STOP_TRIES_PHY;
0385
0386 cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
0387 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
0388 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
0389 cmd |= MIF_FRAME_TURN_AROUND_MSB;
0390 writel(cmd, cp->regs + REG_MIF_FRAME);
0391
0392
0393 while (limit-- > 0) {
0394 udelay(10);
0395 cmd = readl(cp->regs + REG_MIF_FRAME);
0396 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
0397 return cmd & MIF_FRAME_DATA_MASK;
0398 }
0399 return 0xFFFF;
0400 }
0401
0402 static int cas_phy_write(struct cas *cp, int reg, u16 val)
0403 {
0404 int limit = STOP_TRIES_PHY;
0405 u32 cmd;
0406
0407 cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
0408 cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
0409 cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
0410 cmd |= MIF_FRAME_TURN_AROUND_MSB;
0411 cmd |= val & MIF_FRAME_DATA_MASK;
0412 writel(cmd, cp->regs + REG_MIF_FRAME);
0413
0414
0415 while (limit-- > 0) {
0416 udelay(10);
0417 cmd = readl(cp->regs + REG_MIF_FRAME);
0418 if (cmd & MIF_FRAME_TURN_AROUND_LSB)
0419 return 0;
0420 }
0421 return -1;
0422 }
0423
0424 static void cas_phy_powerup(struct cas *cp)
0425 {
0426 u16 ctl = cas_phy_read(cp, MII_BMCR);
0427
0428 if ((ctl & BMCR_PDOWN) == 0)
0429 return;
0430 ctl &= ~BMCR_PDOWN;
0431 cas_phy_write(cp, MII_BMCR, ctl);
0432 }
0433
0434 static void cas_phy_powerdown(struct cas *cp)
0435 {
0436 u16 ctl = cas_phy_read(cp, MII_BMCR);
0437
0438 if (ctl & BMCR_PDOWN)
0439 return;
0440 ctl |= BMCR_PDOWN;
0441 cas_phy_write(cp, MII_BMCR, ctl);
0442 }
0443
0444
0445 static int cas_page_free(struct cas *cp, cas_page_t *page)
0446 {
0447 dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size,
0448 DMA_FROM_DEVICE);
0449 __free_pages(page->buffer, cp->page_order);
0450 kfree(page);
0451 return 0;
0452 }
0453
0454 #ifdef RX_COUNT_BUFFERS
0455 #define RX_USED_ADD(x, y) ((x)->used += (y))
0456 #define RX_USED_SET(x, y) ((x)->used = (y))
0457 #else
0458 #define RX_USED_ADD(x, y) do { } while(0)
0459 #define RX_USED_SET(x, y) do { } while(0)
0460 #endif
0461
0462
0463
0464
0465 static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
0466 {
0467 cas_page_t *page;
0468
0469 page = kmalloc(sizeof(cas_page_t), flags);
0470 if (!page)
0471 return NULL;
0472
0473 INIT_LIST_HEAD(&page->list);
0474 RX_USED_SET(page, 0);
0475 page->buffer = alloc_pages(flags, cp->page_order);
0476 if (!page->buffer)
0477 goto page_err;
0478 page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0,
0479 cp->page_size, DMA_FROM_DEVICE);
0480 return page;
0481
0482 page_err:
0483 kfree(page);
0484 return NULL;
0485 }
0486
0487
0488 static void cas_spare_init(struct cas *cp)
0489 {
0490 spin_lock(&cp->rx_inuse_lock);
0491 INIT_LIST_HEAD(&cp->rx_inuse_list);
0492 spin_unlock(&cp->rx_inuse_lock);
0493
0494 spin_lock(&cp->rx_spare_lock);
0495 INIT_LIST_HEAD(&cp->rx_spare_list);
0496 cp->rx_spares_needed = RX_SPARE_COUNT;
0497 spin_unlock(&cp->rx_spare_lock);
0498 }
0499
0500
0501 static void cas_spare_free(struct cas *cp)
0502 {
0503 struct list_head list, *elem, *tmp;
0504
0505
0506 INIT_LIST_HEAD(&list);
0507 spin_lock(&cp->rx_spare_lock);
0508 list_splice_init(&cp->rx_spare_list, &list);
0509 spin_unlock(&cp->rx_spare_lock);
0510 list_for_each_safe(elem, tmp, &list) {
0511 cas_page_free(cp, list_entry(elem, cas_page_t, list));
0512 }
0513
0514 INIT_LIST_HEAD(&list);
0515 #if 1
0516
0517
0518
0519
0520 spin_lock(&cp->rx_inuse_lock);
0521 list_splice_init(&cp->rx_inuse_list, &list);
0522 spin_unlock(&cp->rx_inuse_lock);
0523 #else
0524 spin_lock(&cp->rx_spare_lock);
0525 list_splice_init(&cp->rx_inuse_list, &list);
0526 spin_unlock(&cp->rx_spare_lock);
0527 #endif
0528 list_for_each_safe(elem, tmp, &list) {
0529 cas_page_free(cp, list_entry(elem, cas_page_t, list));
0530 }
0531 }
0532
0533
0534 static void cas_spare_recover(struct cas *cp, const gfp_t flags)
0535 {
0536 struct list_head list, *elem, *tmp;
0537 int needed, i;
0538
0539
0540
0541
0542
0543
0544 INIT_LIST_HEAD(&list);
0545 spin_lock(&cp->rx_inuse_lock);
0546 list_splice_init(&cp->rx_inuse_list, &list);
0547 spin_unlock(&cp->rx_inuse_lock);
0548
0549 list_for_each_safe(elem, tmp, &list) {
0550 cas_page_t *page = list_entry(elem, cas_page_t, list);
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564 if (page_count(page->buffer) > 1)
0565 continue;
0566
0567 list_del(elem);
0568 spin_lock(&cp->rx_spare_lock);
0569 if (cp->rx_spares_needed > 0) {
0570 list_add(elem, &cp->rx_spare_list);
0571 cp->rx_spares_needed--;
0572 spin_unlock(&cp->rx_spare_lock);
0573 } else {
0574 spin_unlock(&cp->rx_spare_lock);
0575 cas_page_free(cp, page);
0576 }
0577 }
0578
0579
0580 if (!list_empty(&list)) {
0581 spin_lock(&cp->rx_inuse_lock);
0582 list_splice(&list, &cp->rx_inuse_list);
0583 spin_unlock(&cp->rx_inuse_lock);
0584 }
0585
0586 spin_lock(&cp->rx_spare_lock);
0587 needed = cp->rx_spares_needed;
0588 spin_unlock(&cp->rx_spare_lock);
0589 if (!needed)
0590 return;
0591
0592
0593 INIT_LIST_HEAD(&list);
0594 i = 0;
0595 while (i < needed) {
0596 cas_page_t *spare = cas_page_alloc(cp, flags);
0597 if (!spare)
0598 break;
0599 list_add(&spare->list, &list);
0600 i++;
0601 }
0602
0603 spin_lock(&cp->rx_spare_lock);
0604 list_splice(&list, &cp->rx_spare_list);
0605 cp->rx_spares_needed -= i;
0606 spin_unlock(&cp->rx_spare_lock);
0607 }
0608
0609
0610 static cas_page_t *cas_page_dequeue(struct cas *cp)
0611 {
0612 struct list_head *entry;
0613 int recover;
0614
0615 spin_lock(&cp->rx_spare_lock);
0616 if (list_empty(&cp->rx_spare_list)) {
0617
0618 spin_unlock(&cp->rx_spare_lock);
0619 cas_spare_recover(cp, GFP_ATOMIC);
0620 spin_lock(&cp->rx_spare_lock);
0621 if (list_empty(&cp->rx_spare_list)) {
0622 netif_err(cp, rx_err, cp->dev,
0623 "no spare buffers available\n");
0624 spin_unlock(&cp->rx_spare_lock);
0625 return NULL;
0626 }
0627 }
0628
0629 entry = cp->rx_spare_list.next;
0630 list_del(entry);
0631 recover = ++cp->rx_spares_needed;
0632 spin_unlock(&cp->rx_spare_lock);
0633
0634
0635 if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
0636 #if 1
0637 atomic_inc(&cp->reset_task_pending);
0638 atomic_inc(&cp->reset_task_pending_spare);
0639 schedule_work(&cp->reset_task);
0640 #else
0641 atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
0642 schedule_work(&cp->reset_task);
0643 #endif
0644 }
0645 return list_entry(entry, cas_page_t, list);
0646 }
0647
0648
0649 static void cas_mif_poll(struct cas *cp, const int enable)
0650 {
0651 u32 cfg;
0652
0653 cfg = readl(cp->regs + REG_MIF_CFG);
0654 cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
0655
0656 if (cp->phy_type & CAS_PHY_MII_MDIO1)
0657 cfg |= MIF_CFG_PHY_SELECT;
0658
0659
0660 if (enable) {
0661 cfg |= MIF_CFG_POLL_EN;
0662 cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
0663 cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
0664 }
0665 writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
0666 cp->regs + REG_MIF_MASK);
0667 writel(cfg, cp->regs + REG_MIF_CFG);
0668 }
0669
0670
0671 static void cas_begin_auto_negotiation(struct cas *cp,
0672 const struct ethtool_link_ksettings *ep)
0673 {
0674 u16 ctl;
0675 #if 1
0676 int lcntl;
0677 int changed = 0;
0678 int oldstate = cp->lstate;
0679 int link_was_not_down = !(oldstate == link_down);
0680 #endif
0681
0682 if (!ep)
0683 goto start_aneg;
0684 lcntl = cp->link_cntl;
0685 if (ep->base.autoneg == AUTONEG_ENABLE) {
0686 cp->link_cntl = BMCR_ANENABLE;
0687 } else {
0688 u32 speed = ep->base.speed;
0689 cp->link_cntl = 0;
0690 if (speed == SPEED_100)
0691 cp->link_cntl |= BMCR_SPEED100;
0692 else if (speed == SPEED_1000)
0693 cp->link_cntl |= CAS_BMCR_SPEED1000;
0694 if (ep->base.duplex == DUPLEX_FULL)
0695 cp->link_cntl |= BMCR_FULLDPLX;
0696 }
0697 #if 1
0698 changed = (lcntl != cp->link_cntl);
0699 #endif
0700 start_aneg:
0701 if (cp->lstate == link_up) {
0702 netdev_info(cp->dev, "PCS link down\n");
0703 } else {
0704 if (changed) {
0705 netdev_info(cp->dev, "link configuration changed\n");
0706 }
0707 }
0708 cp->lstate = link_down;
0709 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
0710 if (!cp->hw_running)
0711 return;
0712 #if 1
0713
0714
0715
0716
0717
0718 if (oldstate == link_up)
0719 netif_carrier_off(cp->dev);
0720 if (changed && link_was_not_down) {
0721
0722
0723
0724
0725
0726 atomic_inc(&cp->reset_task_pending);
0727 atomic_inc(&cp->reset_task_pending_all);
0728 schedule_work(&cp->reset_task);
0729 cp->timer_ticks = 0;
0730 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
0731 return;
0732 }
0733 #endif
0734 if (cp->phy_type & CAS_PHY_SERDES) {
0735 u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
0736
0737 if (cp->link_cntl & BMCR_ANENABLE) {
0738 val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
0739 cp->lstate = link_aneg;
0740 } else {
0741 if (cp->link_cntl & BMCR_FULLDPLX)
0742 val |= PCS_MII_CTRL_DUPLEX;
0743 val &= ~PCS_MII_AUTONEG_EN;
0744 cp->lstate = link_force_ok;
0745 }
0746 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
0747 writel(val, cp->regs + REG_PCS_MII_CTRL);
0748
0749 } else {
0750 cas_mif_poll(cp, 0);
0751 ctl = cas_phy_read(cp, MII_BMCR);
0752 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
0753 CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
0754 ctl |= cp->link_cntl;
0755 if (ctl & BMCR_ANENABLE) {
0756 ctl |= BMCR_ANRESTART;
0757 cp->lstate = link_aneg;
0758 } else {
0759 cp->lstate = link_force_ok;
0760 }
0761 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
0762 cas_phy_write(cp, MII_BMCR, ctl);
0763 cas_mif_poll(cp, 1);
0764 }
0765
0766 cp->timer_ticks = 0;
0767 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
0768 }
0769
0770
0771 static int cas_reset_mii_phy(struct cas *cp)
0772 {
0773 int limit = STOP_TRIES_PHY;
0774 u16 val;
0775
0776 cas_phy_write(cp, MII_BMCR, BMCR_RESET);
0777 udelay(100);
0778 while (--limit) {
0779 val = cas_phy_read(cp, MII_BMCR);
0780 if ((val & BMCR_RESET) == 0)
0781 break;
0782 udelay(10);
0783 }
0784 return limit <= 0;
0785 }
0786
0787 static void cas_saturn_firmware_init(struct cas *cp)
0788 {
0789 const struct firmware *fw;
0790 const char fw_name[] = "sun/cassini.bin";
0791 int err;
0792
0793 if (PHY_NS_DP83065 != cp->phy_id)
0794 return;
0795
0796 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
0797 if (err) {
0798 pr_err("Failed to load firmware \"%s\"\n",
0799 fw_name);
0800 return;
0801 }
0802 if (fw->size < 2) {
0803 pr_err("bogus length %zu in \"%s\"\n",
0804 fw->size, fw_name);
0805 goto out;
0806 }
0807 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
0808 cp->fw_size = fw->size - 2;
0809 cp->fw_data = vmalloc(cp->fw_size);
0810 if (!cp->fw_data)
0811 goto out;
0812 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
0813 out:
0814 release_firmware(fw);
0815 }
0816
0817 static void cas_saturn_firmware_load(struct cas *cp)
0818 {
0819 int i;
0820
0821 if (!cp->fw_data)
0822 return;
0823
0824 cas_phy_powerdown(cp);
0825
0826
0827 cas_phy_write(cp, DP83065_MII_MEM, 0x0);
0828
0829
0830 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
0831 cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
0832 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
0833 cas_phy_write(cp, DP83065_MII_REGD, 0x82);
0834 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
0835 cas_phy_write(cp, DP83065_MII_REGD, 0x0);
0836 cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
0837 cas_phy_write(cp, DP83065_MII_REGD, 0x39);
0838
0839
0840 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
0841 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
0842 for (i = 0; i < cp->fw_size; i++)
0843 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
0844
0845
0846 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
0847 cas_phy_write(cp, DP83065_MII_REGD, 0x1);
0848 }
0849
0850
0851
0852 static void cas_phy_init(struct cas *cp)
0853 {
0854 u16 val;
0855
0856
0857 if (CAS_PHY_MII(cp->phy_type)) {
0858 writel(PCS_DATAPATH_MODE_MII,
0859 cp->regs + REG_PCS_DATAPATH_MODE);
0860
0861 cas_mif_poll(cp, 0);
0862 cas_reset_mii_phy(cp);
0863
0864 if (PHY_LUCENT_B0 == cp->phy_id) {
0865
0866 cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
0867 cas_phy_write(cp, MII_BMCR, 0x00f1);
0868 cas_phy_write(cp, LUCENT_MII_REG, 0x0);
0869
0870 } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
0871
0872 cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
0873 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
0874 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
0875 cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
0876 cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
0877 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
0878 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
0879 cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
0880 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
0881 cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
0882 cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
0883
0884 } else if (PHY_BROADCOM_5411 == cp->phy_id) {
0885 val = cas_phy_read(cp, BROADCOM_MII_REG4);
0886 val = cas_phy_read(cp, BROADCOM_MII_REG4);
0887 if (val & 0x0080) {
0888
0889 cas_phy_write(cp, BROADCOM_MII_REG4,
0890 val & ~0x0080);
0891 }
0892
0893 } else if (cp->cas_flags & CAS_FLAG_SATURN) {
0894 writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
0895 SATURN_PCFG_FSI : 0x0,
0896 cp->regs + REG_SATURN_PCFG);
0897
0898
0899
0900
0901
0902 if (PHY_NS_DP83065 == cp->phy_id) {
0903 cas_saturn_firmware_load(cp);
0904 }
0905 cas_phy_powerup(cp);
0906 }
0907
0908
0909 val = cas_phy_read(cp, MII_BMCR);
0910 val &= ~BMCR_ANENABLE;
0911 cas_phy_write(cp, MII_BMCR, val);
0912 udelay(10);
0913
0914 cas_phy_write(cp, MII_ADVERTISE,
0915 cas_phy_read(cp, MII_ADVERTISE) |
0916 (ADVERTISE_10HALF | ADVERTISE_10FULL |
0917 ADVERTISE_100HALF | ADVERTISE_100FULL |
0918 CAS_ADVERTISE_PAUSE |
0919 CAS_ADVERTISE_ASYM_PAUSE));
0920
0921 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
0922
0923
0924
0925 val = cas_phy_read(cp, CAS_MII_1000_CTRL);
0926 val &= ~CAS_ADVERTISE_1000HALF;
0927 val |= CAS_ADVERTISE_1000FULL;
0928 cas_phy_write(cp, CAS_MII_1000_CTRL, val);
0929 }
0930
0931 } else {
0932
0933 u32 val;
0934 int limit;
0935
0936 writel(PCS_DATAPATH_MODE_SERDES,
0937 cp->regs + REG_PCS_DATAPATH_MODE);
0938
0939
0940 if (cp->cas_flags & CAS_FLAG_SATURN)
0941 writel(0, cp->regs + REG_SATURN_PCFG);
0942
0943
0944 val = readl(cp->regs + REG_PCS_MII_CTRL);
0945 val |= PCS_MII_RESET;
0946 writel(val, cp->regs + REG_PCS_MII_CTRL);
0947
0948 limit = STOP_TRIES;
0949 while (--limit > 0) {
0950 udelay(10);
0951 if ((readl(cp->regs + REG_PCS_MII_CTRL) &
0952 PCS_MII_RESET) == 0)
0953 break;
0954 }
0955 if (limit <= 0)
0956 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
0957 readl(cp->regs + REG_PCS_STATE_MACHINE));
0958
0959
0960
0961
0962 writel(0x0, cp->regs + REG_PCS_CFG);
0963
0964
0965 val = readl(cp->regs + REG_PCS_MII_ADVERT);
0966 val &= ~PCS_MII_ADVERT_HD;
0967 val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
0968 PCS_MII_ADVERT_ASYM_PAUSE);
0969 writel(val, cp->regs + REG_PCS_MII_ADVERT);
0970
0971
0972 writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
0973
0974
0975 writel(PCS_SERDES_CTRL_SYNCD_EN,
0976 cp->regs + REG_PCS_SERDES_CTRL);
0977 }
0978 }
0979
0980
0981 static int cas_pcs_link_check(struct cas *cp)
0982 {
0983 u32 stat, state_machine;
0984 int retval = 0;
0985
0986
0987
0988
0989
0990 stat = readl(cp->regs + REG_PCS_MII_STATUS);
0991 if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
0992 stat = readl(cp->regs + REG_PCS_MII_STATUS);
0993
0994
0995
0996
0997 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
0998 PCS_MII_STATUS_REMOTE_FAULT)) ==
0999 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
1000 netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1001
1002
1003
1004
1005 state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
1006 if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
1007 stat &= ~PCS_MII_STATUS_LINK_STATUS;
1008 } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
1009 stat |= PCS_MII_STATUS_LINK_STATUS;
1010 }
1011
1012 if (stat & PCS_MII_STATUS_LINK_STATUS) {
1013 if (cp->lstate != link_up) {
1014 if (cp->opened) {
1015 cp->lstate = link_up;
1016 cp->link_transition = LINK_TRANSITION_LINK_UP;
1017
1018 cas_set_link_modes(cp);
1019 netif_carrier_on(cp->dev);
1020 }
1021 }
1022 } else if (cp->lstate == link_up) {
1023 cp->lstate = link_down;
1024 if (link_transition_timeout != 0 &&
1025 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1026 !cp->link_transition_jiffies_valid) {
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039 retval = 1;
1040 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1041 cp->link_transition_jiffies = jiffies;
1042 cp->link_transition_jiffies_valid = 1;
1043 } else {
1044 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1045 }
1046 netif_carrier_off(cp->dev);
1047 if (cp->opened)
1048 netif_info(cp, link, cp->dev, "PCS link down\n");
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058 if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
1059
1060 stat = readl(cp->regs + REG_PCS_SERDES_STATE);
1061 if (stat == 0x03)
1062 return 1;
1063 }
1064 } else if (cp->lstate == link_down) {
1065 if (link_transition_timeout != 0 &&
1066 cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
1067 !cp->link_transition_jiffies_valid) {
1068
1069
1070
1071
1072
1073 retval = 1;
1074 cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
1075 cp->link_transition_jiffies = jiffies;
1076 cp->link_transition_jiffies_valid = 1;
1077 } else {
1078 cp->link_transition = LINK_TRANSITION_STILL_FAILED;
1079 }
1080 }
1081
1082 return retval;
1083 }
1084
1085 static int cas_pcs_interrupt(struct net_device *dev,
1086 struct cas *cp, u32 status)
1087 {
1088 u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
1089
1090 if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
1091 return 0;
1092 return cas_pcs_link_check(cp);
1093 }
1094
1095 static int cas_txmac_interrupt(struct net_device *dev,
1096 struct cas *cp, u32 status)
1097 {
1098 u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
1099
1100 if (!txmac_stat)
1101 return 0;
1102
1103 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1104 "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1105
1106
1107
1108
1109 if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
1110 !(txmac_stat & ~MAC_TX_DEFER_TIMER))
1111 return 0;
1112
1113 spin_lock(&cp->stat_lock[0]);
1114 if (txmac_stat & MAC_TX_UNDERRUN) {
1115 netdev_err(dev, "TX MAC xmit underrun\n");
1116 cp->net_stats[0].tx_fifo_errors++;
1117 }
1118
1119 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1120 netdev_err(dev, "TX MAC max packet size error\n");
1121 cp->net_stats[0].tx_errors++;
1122 }
1123
1124
1125
1126
1127 if (txmac_stat & MAC_TX_COLL_NORMAL)
1128 cp->net_stats[0].collisions += 0x10000;
1129
1130 if (txmac_stat & MAC_TX_COLL_EXCESS) {
1131 cp->net_stats[0].tx_aborted_errors += 0x10000;
1132 cp->net_stats[0].collisions += 0x10000;
1133 }
1134
1135 if (txmac_stat & MAC_TX_COLL_LATE) {
1136 cp->net_stats[0].tx_aborted_errors += 0x10000;
1137 cp->net_stats[0].collisions += 0x10000;
1138 }
1139 spin_unlock(&cp->stat_lock[0]);
1140
1141
1142
1143
1144 return 0;
1145 }
1146
1147 static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
1148 {
1149 cas_hp_inst_t *inst;
1150 u32 val;
1151 int i;
1152
1153 i = 0;
1154 while ((inst = firmware) && inst->note) {
1155 writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
1156
1157 val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
1158 val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
1159 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
1160
1161 val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
1162 val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
1163 val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
1164 val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
1165 val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
1166 val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
1167 val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
1168 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
1169
1170 val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
1171 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
1172 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
1173 val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
1174 writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
1175 ++firmware;
1176 ++i;
1177 }
1178 }
1179
1180 static void cas_init_rx_dma(struct cas *cp)
1181 {
1182 u64 desc_dma = cp->block_dvma;
1183 u32 val;
1184 int i, size;
1185
1186
1187 val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
1188 val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
1189 val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
1190 if ((N_RX_DESC_RINGS > 1) &&
1191 (cp->cas_flags & CAS_FLAG_REG_PLUS))
1192 val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
1193 writel(val, cp->regs + REG_RX_CFG);
1194
1195 val = (unsigned long) cp->init_rxds[0] -
1196 (unsigned long) cp->init_block;
1197 writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
1198 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
1199 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
1200
1201 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1202
1203
1204
1205 val = (unsigned long) cp->init_rxds[1] -
1206 (unsigned long) cp->init_block;
1207 writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
1208 writel((desc_dma + val) & 0xffffffff, cp->regs +
1209 REG_PLUS_RX_DB1_LOW);
1210 writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
1211 REG_PLUS_RX_KICK1);
1212 }
1213
1214
1215 val = (unsigned long) cp->init_rxcs[0] -
1216 (unsigned long) cp->init_block;
1217 writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
1218 writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
1219
1220 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1221
1222 for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
1223 val = (unsigned long) cp->init_rxcs[i] -
1224 (unsigned long) cp->init_block;
1225 writel((desc_dma + val) >> 32, cp->regs +
1226 REG_PLUS_RX_CBN_HI(i));
1227 writel((desc_dma + val) & 0xffffffff, cp->regs +
1228 REG_PLUS_RX_CBN_LOW(i));
1229 }
1230 }
1231
1232
1233
1234
1235
1236 readl(cp->regs + REG_INTR_STATUS_ALIAS);
1237 writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
1238
1239
1240 val = CAS_BASE(RX_PAUSE_THRESH_OFF,
1241 cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
1242 val |= CAS_BASE(RX_PAUSE_THRESH_ON,
1243 cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
1244 writel(val, cp->regs + REG_RX_PAUSE_THRESH);
1245
1246
1247 for (i = 0; i < 64; i++) {
1248 writel(i, cp->regs + REG_RX_TABLE_ADDR);
1249 writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
1250 writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
1251 writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
1252 }
1253
1254
1255 writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
1256 writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
1257
1258
1259 #ifdef USE_RX_BLANK
1260 val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
1261 val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
1262 writel(val, cp->regs + REG_RX_BLANK);
1263 #else
1264 writel(0x0, cp->regs + REG_RX_BLANK);
1265 #endif
1266
1267
1268
1269
1270
1271
1272
1273 val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
1274 writel(val, cp->regs + REG_RX_AE_THRESH);
1275 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
1276 val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
1277 writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
1278 }
1279
1280
1281
1282
1283 writel(0x0, cp->regs + REG_RX_RED);
1284
1285
1286 val = 0;
1287 if (cp->page_size == 0x1000)
1288 val = 0x1;
1289 else if (cp->page_size == 0x2000)
1290 val = 0x2;
1291 else if (cp->page_size == 0x4000)
1292 val = 0x3;
1293
1294
1295 size = cp->dev->mtu + 64;
1296 if (size > cp->page_size)
1297 size = cp->page_size;
1298
1299 if (size <= 0x400)
1300 i = 0x0;
1301 else if (size <= 0x800)
1302 i = 0x1;
1303 else if (size <= 0x1000)
1304 i = 0x2;
1305 else
1306 i = 0x3;
1307
1308 cp->mtu_stride = 1 << (i + 10);
1309 val = CAS_BASE(RX_PAGE_SIZE, val);
1310 val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
1311 val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
1312 val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
1313 writel(val, cp->regs + REG_RX_PAGE_SIZE);
1314
1315
1316 if (&CAS_HP_FIRMWARE[0] == &cas_prog_null[0])
1317 return;
1318
1319 val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
1320 val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
1321 val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
1322 writel(val, cp->regs + REG_HP_CFG);
1323 }
1324
1325 static inline void cas_rxc_init(struct cas_rx_comp *rxc)
1326 {
1327 memset(rxc, 0, sizeof(*rxc));
1328 rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
1329 }
1330
1331
1332
1333
1334
1335 static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
1336 {
1337 cas_page_t *page = cp->rx_pages[1][index];
1338 cas_page_t *new;
1339
1340 if (page_count(page->buffer) == 1)
1341 return page;
1342
1343 new = cas_page_dequeue(cp);
1344 if (new) {
1345 spin_lock(&cp->rx_inuse_lock);
1346 list_add(&page->list, &cp->rx_inuse_list);
1347 spin_unlock(&cp->rx_inuse_lock);
1348 }
1349 return new;
1350 }
1351
1352
1353 static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
1354 const int index)
1355 {
1356 cas_page_t **page0 = cp->rx_pages[0];
1357 cas_page_t **page1 = cp->rx_pages[1];
1358
1359
1360 if (page_count(page0[index]->buffer) > 1) {
1361 cas_page_t *new = cas_page_spare(cp, index);
1362 if (new) {
1363 page1[index] = page0[index];
1364 page0[index] = new;
1365 }
1366 }
1367 RX_USED_SET(page0[index], 0);
1368 return page0[index];
1369 }
1370
1371 static void cas_clean_rxds(struct cas *cp)
1372 {
1373
1374 struct cas_rx_desc *rxd = cp->init_rxds[0];
1375 int i, size;
1376
1377
1378 for (i = 0; i < N_RX_FLOWS; i++) {
1379 struct sk_buff *skb;
1380 while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
1381 cas_skb_release(skb);
1382 }
1383 }
1384
1385
1386 size = RX_DESC_RINGN_SIZE(0);
1387 for (i = 0; i < size; i++) {
1388 cas_page_t *page = cas_page_swap(cp, 0, i);
1389 rxd[i].buffer = cpu_to_le64(page->dma_addr);
1390 rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
1391 CAS_BASE(RX_INDEX_RING, 0));
1392 }
1393
1394 cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
1395 cp->rx_last[0] = 0;
1396 cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
1397 }
1398
1399 static void cas_clean_rxcs(struct cas *cp)
1400 {
1401 int i, j;
1402
1403
1404 memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
1405 memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
1406 for (i = 0; i < N_RX_COMP_RINGS; i++) {
1407 struct cas_rx_comp *rxc = cp->init_rxcs[i];
1408 for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
1409 cas_rxc_init(rxc + j);
1410 }
1411 }
1412 }
1413
1414 #if 0
1415
1416
1417
1418
1419
1420
1421 static int cas_rxmac_reset(struct cas *cp)
1422 {
1423 struct net_device *dev = cp->dev;
1424 int limit;
1425 u32 val;
1426
1427
1428 writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1429 for (limit = 0; limit < STOP_TRIES; limit++) {
1430 if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
1431 break;
1432 udelay(10);
1433 }
1434 if (limit == STOP_TRIES) {
1435 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1436 return 1;
1437 }
1438
1439
1440 writel(0, cp->regs + REG_RX_CFG);
1441 for (limit = 0; limit < STOP_TRIES; limit++) {
1442 if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
1443 break;
1444 udelay(10);
1445 }
1446 if (limit == STOP_TRIES) {
1447 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1448 return 1;
1449 }
1450
1451 mdelay(5);
1452
1453
1454 writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
1455 for (limit = 0; limit < STOP_TRIES; limit++) {
1456 if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
1457 break;
1458 udelay(10);
1459 }
1460 if (limit == STOP_TRIES) {
1461 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1462 return 1;
1463 }
1464
1465
1466 cas_clean_rxds(cp);
1467 cas_clean_rxcs(cp);
1468
1469
1470 cas_init_rx_dma(cp);
1471
1472
1473 val = readl(cp->regs + REG_RX_CFG);
1474 writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
1475 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
1476 val = readl(cp->regs + REG_MAC_RX_CFG);
1477 writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
1478 return 0;
1479 }
1480 #endif
1481
1482 static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1483 u32 status)
1484 {
1485 u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
1486
1487 if (!stat)
1488 return 0;
1489
1490 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1491
1492
1493 spin_lock(&cp->stat_lock[0]);
1494 if (stat & MAC_RX_ALIGN_ERR)
1495 cp->net_stats[0].rx_frame_errors += 0x10000;
1496
1497 if (stat & MAC_RX_CRC_ERR)
1498 cp->net_stats[0].rx_crc_errors += 0x10000;
1499
1500 if (stat & MAC_RX_LEN_ERR)
1501 cp->net_stats[0].rx_length_errors += 0x10000;
1502
1503 if (stat & MAC_RX_OVERFLOW) {
1504 cp->net_stats[0].rx_over_errors++;
1505 cp->net_stats[0].rx_fifo_errors++;
1506 }
1507
1508
1509
1510
1511 spin_unlock(&cp->stat_lock[0]);
1512 return 0;
1513 }
1514
1515 static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1516 u32 status)
1517 {
1518 u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
1519
1520 if (!stat)
1521 return 0;
1522
1523 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1524 "mac interrupt, stat: 0x%x\n", stat);
1525
1526
1527
1528
1529
1530 if (stat & MAC_CTRL_PAUSE_STATE)
1531 cp->pause_entered++;
1532
1533 if (stat & MAC_CTRL_PAUSE_RECEIVED)
1534 cp->pause_last_time_recvd = (stat >> 16);
1535
1536 return 0;
1537 }
1538
1539
1540
1541 static inline int cas_mdio_link_not_up(struct cas *cp)
1542 {
1543 u16 val;
1544
1545 switch (cp->lstate) {
1546 case link_force_ret:
1547 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1548 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1549 cp->timer_ticks = 5;
1550 cp->lstate = link_force_ok;
1551 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1552 break;
1553
1554 case link_aneg:
1555 val = cas_phy_read(cp, MII_BMCR);
1556
1557
1558
1559
1560 val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
1561 val |= BMCR_FULLDPLX;
1562 val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
1563 CAS_BMCR_SPEED1000 : BMCR_SPEED100;
1564 cas_phy_write(cp, MII_BMCR, val);
1565 cp->timer_ticks = 5;
1566 cp->lstate = link_force_try;
1567 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1568 break;
1569
1570 case link_force_try:
1571
1572 val = cas_phy_read(cp, MII_BMCR);
1573 cp->timer_ticks = 5;
1574 if (val & CAS_BMCR_SPEED1000) {
1575 val &= ~CAS_BMCR_SPEED1000;
1576 val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
1577 cas_phy_write(cp, MII_BMCR, val);
1578 break;
1579 }
1580
1581 if (val & BMCR_SPEED100) {
1582 if (val & BMCR_FULLDPLX)
1583 val &= ~BMCR_FULLDPLX;
1584 else {
1585 val &= ~BMCR_SPEED100;
1586 }
1587 cas_phy_write(cp, MII_BMCR, val);
1588 break;
1589 }
1590 break;
1591 default:
1592 break;
1593 }
1594 return 0;
1595 }
1596
1597
1598
1599 static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1600 {
1601 int restart;
1602
1603 if (bmsr & BMSR_LSTATUS) {
1604
1605
1606
1607
1608
1609 if ((cp->lstate == link_force_try) &&
1610 (cp->link_cntl & BMCR_ANENABLE)) {
1611 cp->lstate = link_force_ret;
1612 cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
1613 cas_mif_poll(cp, 0);
1614 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1615 cp->timer_ticks = 5;
1616 if (cp->opened)
1617 netif_info(cp, link, cp->dev,
1618 "Got link after fallback, retrying autoneg once...\n");
1619 cas_phy_write(cp, MII_BMCR,
1620 cp->link_fcntl | BMCR_ANENABLE |
1621 BMCR_ANRESTART);
1622 cas_mif_poll(cp, 1);
1623
1624 } else if (cp->lstate != link_up) {
1625 cp->lstate = link_up;
1626 cp->link_transition = LINK_TRANSITION_LINK_UP;
1627
1628 if (cp->opened) {
1629 cas_set_link_modes(cp);
1630 netif_carrier_on(cp->dev);
1631 }
1632 }
1633 return 0;
1634 }
1635
1636
1637
1638
1639 restart = 0;
1640 if (cp->lstate == link_up) {
1641 cp->lstate = link_down;
1642 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1643
1644 netif_carrier_off(cp->dev);
1645 if (cp->opened)
1646 netif_info(cp, link, cp->dev, "Link down\n");
1647 restart = 1;
1648
1649 } else if (++cp->timer_ticks > 10)
1650 cas_mdio_link_not_up(cp);
1651
1652 return restart;
1653 }
1654
1655 static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
1656 u32 status)
1657 {
1658 u32 stat = readl(cp->regs + REG_MIF_STATUS);
1659 u16 bmsr;
1660
1661
1662 if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
1663 return 0;
1664
1665 bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
1666 return cas_mii_link_check(cp, bmsr);
1667 }
1668
1669 static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1670 u32 status)
1671 {
1672 u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
1673
1674 if (!stat)
1675 return 0;
1676
1677 netdev_err(dev, "PCI error [%04x:%04x]",
1678 stat, readl(cp->regs + REG_BIM_DIAG));
1679
1680
1681 if ((stat & PCI_ERR_BADACK) &&
1682 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1683 pr_cont(" <No ACK64# during ABS64 cycle>");
1684
1685 if (stat & PCI_ERR_DTRTO)
1686 pr_cont(" <Delayed transaction timeout>");
1687 if (stat & PCI_ERR_OTHER)
1688 pr_cont(" <other>");
1689 if (stat & PCI_ERR_BIM_DMA_WRITE)
1690 pr_cont(" <BIM DMA 0 write req>");
1691 if (stat & PCI_ERR_BIM_DMA_READ)
1692 pr_cont(" <BIM DMA 0 read req>");
1693 pr_cont("\n");
1694
1695 if (stat & PCI_ERR_OTHER) {
1696 int pci_errs;
1697
1698
1699
1700
1701 pci_errs = pci_status_get_and_clear_errors(cp->pdev);
1702
1703 netdev_err(dev, "PCI status errors[%04x]\n", pci_errs);
1704 if (pci_errs & PCI_STATUS_PARITY)
1705 netdev_err(dev, "PCI parity error detected\n");
1706 if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT)
1707 netdev_err(dev, "PCI target abort\n");
1708 if (pci_errs & PCI_STATUS_REC_TARGET_ABORT)
1709 netdev_err(dev, "PCI master acks target abort\n");
1710 if (pci_errs & PCI_STATUS_REC_MASTER_ABORT)
1711 netdev_err(dev, "PCI master abort\n");
1712 if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR)
1713 netdev_err(dev, "PCI system error SERR#\n");
1714 if (pci_errs & PCI_STATUS_DETECTED_PARITY)
1715 netdev_err(dev, "PCI parity error\n");
1716 }
1717
1718
1719 return 1;
1720 }
1721
1722
1723
1724
1725
1726
1727 static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1728 u32 status)
1729 {
1730 if (status & INTR_RX_TAG_ERROR) {
1731
1732 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1733 "corrupt rx tag framing\n");
1734 spin_lock(&cp->stat_lock[0]);
1735 cp->net_stats[0].rx_errors++;
1736 spin_unlock(&cp->stat_lock[0]);
1737 goto do_reset;
1738 }
1739
1740 if (status & INTR_RX_LEN_MISMATCH) {
1741
1742 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1743 "length mismatch for rx frame\n");
1744 spin_lock(&cp->stat_lock[0]);
1745 cp->net_stats[0].rx_errors++;
1746 spin_unlock(&cp->stat_lock[0]);
1747 goto do_reset;
1748 }
1749
1750 if (status & INTR_PCS_STATUS) {
1751 if (cas_pcs_interrupt(dev, cp, status))
1752 goto do_reset;
1753 }
1754
1755 if (status & INTR_TX_MAC_STATUS) {
1756 if (cas_txmac_interrupt(dev, cp, status))
1757 goto do_reset;
1758 }
1759
1760 if (status & INTR_RX_MAC_STATUS) {
1761 if (cas_rxmac_interrupt(dev, cp, status))
1762 goto do_reset;
1763 }
1764
1765 if (status & INTR_MAC_CTRL_STATUS) {
1766 if (cas_mac_interrupt(dev, cp, status))
1767 goto do_reset;
1768 }
1769
1770 if (status & INTR_MIF_STATUS) {
1771 if (cas_mif_interrupt(dev, cp, status))
1772 goto do_reset;
1773 }
1774
1775 if (status & INTR_PCI_ERROR_STATUS) {
1776 if (cas_pci_interrupt(dev, cp, status))
1777 goto do_reset;
1778 }
1779 return 0;
1780
1781 do_reset:
1782 #if 1
1783 atomic_inc(&cp->reset_task_pending);
1784 atomic_inc(&cp->reset_task_pending_all);
1785 netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1786 schedule_work(&cp->reset_task);
1787 #else
1788 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1789 netdev_err(dev, "reset called in cas_abnormal_irq\n");
1790 schedule_work(&cp->reset_task);
1791 #endif
1792 return 1;
1793 }
1794
1795
1796
1797
1798 #define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1)
1799 #define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
1800 static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr,
1801 const int len)
1802 {
1803 unsigned long off = addr + len;
1804
1805 if (CAS_TABORT(cp) == 1)
1806 return 0;
1807 if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN)
1808 return 0;
1809 return TX_TARGET_ABORT_LEN;
1810 }
1811
1812 static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1813 {
1814 struct cas_tx_desc *txds;
1815 struct sk_buff **skbs;
1816 struct net_device *dev = cp->dev;
1817 int entry, count;
1818
1819 spin_lock(&cp->tx_lock[ring]);
1820 txds = cp->init_txds[ring];
1821 skbs = cp->tx_skbs[ring];
1822 entry = cp->tx_old[ring];
1823
1824 count = TX_BUFF_COUNT(ring, entry, limit);
1825 while (entry != limit) {
1826 struct sk_buff *skb = skbs[entry];
1827 dma_addr_t daddr;
1828 u32 dlen;
1829 int frag;
1830
1831 if (!skb) {
1832
1833 entry = TX_DESC_NEXT(ring, entry);
1834 continue;
1835 }
1836
1837
1838 count -= skb_shinfo(skb)->nr_frags +
1839 + cp->tx_tiny_use[ring][entry].nbufs + 1;
1840 if (count < 0)
1841 break;
1842
1843 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1844 "tx[%d] done, slot %d\n", ring, entry);
1845
1846 skbs[entry] = NULL;
1847 cp->tx_tiny_use[ring][entry].nbufs = 0;
1848
1849 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1850 struct cas_tx_desc *txd = txds + entry;
1851
1852 daddr = le64_to_cpu(txd->buffer);
1853 dlen = CAS_VAL(TX_DESC_BUFLEN,
1854 le64_to_cpu(txd->control));
1855 dma_unmap_page(&cp->pdev->dev, daddr, dlen,
1856 DMA_TO_DEVICE);
1857 entry = TX_DESC_NEXT(ring, entry);
1858
1859
1860 if (cp->tx_tiny_use[ring][entry].used) {
1861 cp->tx_tiny_use[ring][entry].used = 0;
1862 entry = TX_DESC_NEXT(ring, entry);
1863 }
1864 }
1865
1866 spin_lock(&cp->stat_lock[ring]);
1867 cp->net_stats[ring].tx_packets++;
1868 cp->net_stats[ring].tx_bytes += skb->len;
1869 spin_unlock(&cp->stat_lock[ring]);
1870 dev_consume_skb_irq(skb);
1871 }
1872 cp->tx_old[ring] = entry;
1873
1874
1875
1876
1877
1878 if (netif_queue_stopped(dev) &&
1879 (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)))
1880 netif_wake_queue(dev);
1881 spin_unlock(&cp->tx_lock[ring]);
1882 }
1883
1884 static void cas_tx(struct net_device *dev, struct cas *cp,
1885 u32 status)
1886 {
1887 int limit, ring;
1888 #ifdef USE_TX_COMPWB
1889 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1890 #endif
1891 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1892 "tx interrupt, status: 0x%x, %llx\n",
1893 status, (unsigned long long)compwb);
1894
1895 for (ring = 0; ring < N_TX_RINGS; ring++) {
1896 #ifdef USE_TX_COMPWB
1897
1898 limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) |
1899 CAS_VAL(TX_COMPWB_LSB, compwb);
1900 compwb = TX_COMPWB_NEXT(compwb);
1901 #else
1902 limit = readl(cp->regs + REG_TX_COMPN(ring));
1903 #endif
1904 if (cp->tx_old[ring] != limit)
1905 cas_tx_ringN(cp, ring, limit);
1906 }
1907 }
1908
1909
1910 static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
1911 int entry, const u64 *words,
1912 struct sk_buff **skbref)
1913 {
1914 int dlen, hlen, len, i, alloclen;
1915 int off, swivel = RX_SWIVEL_OFF_VAL;
1916 struct cas_page *page;
1917 struct sk_buff *skb;
1918 void *addr, *crcaddr;
1919 __sum16 csum;
1920 char *p;
1921
1922 hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]);
1923 dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]);
1924 len = hlen + dlen;
1925
1926 if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT))
1927 alloclen = len;
1928 else
1929 alloclen = max(hlen, RX_COPY_MIN);
1930
1931 skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size);
1932 if (skb == NULL)
1933 return -1;
1934
1935 *skbref = skb;
1936 skb_reserve(skb, swivel);
1937
1938 p = skb->data;
1939 addr = crcaddr = NULL;
1940 if (hlen) {
1941 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
1942 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1943 off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 +
1944 swivel;
1945
1946 i = hlen;
1947 if (!dlen)
1948 i += cp->crc_size;
1949 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1950 i, DMA_FROM_DEVICE);
1951 addr = cas_page_map(page->buffer);
1952 memcpy(p, addr + off, i);
1953 dma_sync_single_for_device(&cp->pdev->dev,
1954 page->dma_addr + off, i,
1955 DMA_FROM_DEVICE);
1956 cas_page_unmap(addr);
1957 RX_USED_ADD(page, 0x100);
1958 p += hlen;
1959 swivel = 0;
1960 }
1961
1962
1963 if (alloclen < (hlen + dlen)) {
1964 skb_frag_t *frag = skb_shinfo(skb)->frags;
1965
1966
1967 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
1968 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
1969 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
1970
1971 hlen = min(cp->page_size - off, dlen);
1972 if (hlen < 0) {
1973 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1974 "rx page overflow: %d\n", hlen);
1975 dev_kfree_skb_irq(skb);
1976 return -1;
1977 }
1978 i = hlen;
1979 if (i == dlen)
1980 i += cp->crc_size;
1981 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
1982 i, DMA_FROM_DEVICE);
1983
1984
1985 swivel = 0;
1986 if (p == (char *) skb->data) {
1987 addr = cas_page_map(page->buffer);
1988 memcpy(p, addr + off, RX_COPY_MIN);
1989 dma_sync_single_for_device(&cp->pdev->dev,
1990 page->dma_addr + off, i,
1991 DMA_FROM_DEVICE);
1992 cas_page_unmap(addr);
1993 off += RX_COPY_MIN;
1994 swivel = RX_COPY_MIN;
1995 RX_USED_ADD(page, cp->mtu_stride);
1996 } else {
1997 RX_USED_ADD(page, hlen);
1998 }
1999 skb_put(skb, alloclen);
2000
2001 skb_shinfo(skb)->nr_frags++;
2002 skb->data_len += hlen - swivel;
2003 skb->truesize += hlen - swivel;
2004 skb->len += hlen - swivel;
2005
2006 __skb_frag_set_page(frag, page->buffer);
2007 __skb_frag_ref(frag);
2008 skb_frag_off_set(frag, off);
2009 skb_frag_size_set(frag, hlen - swivel);
2010
2011
2012 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2013 hlen = dlen;
2014 off = 0;
2015
2016 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2017 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2018 dma_sync_single_for_cpu(&cp->pdev->dev,
2019 page->dma_addr,
2020 hlen + cp->crc_size,
2021 DMA_FROM_DEVICE);
2022 dma_sync_single_for_device(&cp->pdev->dev,
2023 page->dma_addr,
2024 hlen + cp->crc_size,
2025 DMA_FROM_DEVICE);
2026
2027 skb_shinfo(skb)->nr_frags++;
2028 skb->data_len += hlen;
2029 skb->len += hlen;
2030 frag++;
2031
2032 __skb_frag_set_page(frag, page->buffer);
2033 __skb_frag_ref(frag);
2034 skb_frag_off_set(frag, 0);
2035 skb_frag_size_set(frag, hlen);
2036 RX_USED_ADD(page, hlen + cp->crc_size);
2037 }
2038
2039 if (cp->crc_size) {
2040 addr = cas_page_map(page->buffer);
2041 crcaddr = addr + off + hlen;
2042 }
2043
2044 } else {
2045
2046 if (!dlen)
2047 goto end_copy_pkt;
2048
2049 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2050 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2051 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2052 hlen = min(cp->page_size - off, dlen);
2053 if (hlen < 0) {
2054 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2055 "rx page overflow: %d\n", hlen);
2056 dev_kfree_skb_irq(skb);
2057 return -1;
2058 }
2059 i = hlen;
2060 if (i == dlen)
2061 i += cp->crc_size;
2062 dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
2063 i, DMA_FROM_DEVICE);
2064 addr = cas_page_map(page->buffer);
2065 memcpy(p, addr + off, i);
2066 dma_sync_single_for_device(&cp->pdev->dev,
2067 page->dma_addr + off, i,
2068 DMA_FROM_DEVICE);
2069 cas_page_unmap(addr);
2070 if (p == (char *) skb->data)
2071 RX_USED_ADD(page, cp->mtu_stride);
2072 else
2073 RX_USED_ADD(page, i);
2074
2075
2076 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
2077 p += hlen;
2078 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2079 page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
2080 dma_sync_single_for_cpu(&cp->pdev->dev,
2081 page->dma_addr,
2082 dlen + cp->crc_size,
2083 DMA_FROM_DEVICE);
2084 addr = cas_page_map(page->buffer);
2085 memcpy(p, addr, dlen + cp->crc_size);
2086 dma_sync_single_for_device(&cp->pdev->dev,
2087 page->dma_addr,
2088 dlen + cp->crc_size,
2089 DMA_FROM_DEVICE);
2090 cas_page_unmap(addr);
2091 RX_USED_ADD(page, dlen + cp->crc_size);
2092 }
2093 end_copy_pkt:
2094 if (cp->crc_size) {
2095 addr = NULL;
2096 crcaddr = skb->data + alloclen;
2097 }
2098 skb_put(skb, alloclen);
2099 }
2100
2101 csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3]));
2102 if (cp->crc_size) {
2103
2104 csum = csum_fold(csum_partial(crcaddr, cp->crc_size,
2105 csum_unfold(csum)));
2106 if (addr)
2107 cas_page_unmap(addr);
2108 }
2109 skb->protocol = eth_type_trans(skb, cp->dev);
2110 if (skb->protocol == htons(ETH_P_IP)) {
2111 skb->csum = csum_unfold(~csum);
2112 skb->ip_summed = CHECKSUM_COMPLETE;
2113 } else
2114 skb_checksum_none_assert(skb);
2115 return len;
2116 }
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133 static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2134 struct sk_buff *skb)
2135 {
2136 int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1);
2137 struct sk_buff_head *flow = &cp->rx_flows[flowid];
2138
2139
2140
2141
2142
2143 __skb_queue_tail(flow, skb);
2144 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2145 while ((skb = __skb_dequeue(flow))) {
2146 cas_skb_release(skb);
2147 }
2148 }
2149 }
2150
2151
2152
2153
2154 static void cas_post_page(struct cas *cp, const int ring, const int index)
2155 {
2156 cas_page_t *new;
2157 int entry;
2158
2159 entry = cp->rx_old[ring];
2160
2161 new = cas_page_swap(cp, ring, index);
2162 cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr);
2163 cp->init_rxds[ring][entry].index =
2164 cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) |
2165 CAS_BASE(RX_INDEX_RING, ring));
2166
2167 entry = RX_DESC_ENTRY(ring, entry + 1);
2168 cp->rx_old[ring] = entry;
2169
2170 if (entry % 4)
2171 return;
2172
2173 if (ring == 0)
2174 writel(entry, cp->regs + REG_RX_KICK);
2175 else if ((N_RX_DESC_RINGS > 1) &&
2176 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2177 writel(entry, cp->regs + REG_PLUS_RX_KICK1);
2178 }
2179
2180
2181
2182 static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2183 {
2184 unsigned int entry, last, count, released;
2185 int cluster;
2186 cas_page_t **page = cp->rx_pages[ring];
2187
2188 entry = cp->rx_old[ring];
2189
2190 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2191 "rxd[%d] interrupt, done: %d\n", ring, entry);
2192
2193 cluster = -1;
2194 count = entry & 0x3;
2195 last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4);
2196 released = 0;
2197 while (entry != last) {
2198
2199 if (page_count(page[entry]->buffer) > 1) {
2200 cas_page_t *new = cas_page_dequeue(cp);
2201 if (!new) {
2202
2203
2204
2205 cp->cas_flags |= CAS_FLAG_RXD_POST(ring);
2206 if (!timer_pending(&cp->link_timer))
2207 mod_timer(&cp->link_timer, jiffies +
2208 CAS_LINK_FAST_TIMEOUT);
2209 cp->rx_old[ring] = entry;
2210 cp->rx_last[ring] = num ? num - released : 0;
2211 return -ENOMEM;
2212 }
2213 spin_lock(&cp->rx_inuse_lock);
2214 list_add(&page[entry]->list, &cp->rx_inuse_list);
2215 spin_unlock(&cp->rx_inuse_lock);
2216 cp->init_rxds[ring][entry].buffer =
2217 cpu_to_le64(new->dma_addr);
2218 page[entry] = new;
2219
2220 }
2221
2222 if (++count == 4) {
2223 cluster = entry;
2224 count = 0;
2225 }
2226 released++;
2227 entry = RX_DESC_ENTRY(ring, entry + 1);
2228 }
2229 cp->rx_old[ring] = entry;
2230
2231 if (cluster < 0)
2232 return 0;
2233
2234 if (ring == 0)
2235 writel(cluster, cp->regs + REG_RX_KICK);
2236 else if ((N_RX_DESC_RINGS > 1) &&
2237 (cp->cas_flags & CAS_FLAG_REG_PLUS))
2238 writel(cluster, cp->regs + REG_PLUS_RX_KICK1);
2239 return 0;
2240 }
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255 static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2256 {
2257 struct cas_rx_comp *rxcs = cp->init_rxcs[ring];
2258 int entry, drops;
2259 int npackets = 0;
2260
2261 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2262 "rx[%d] interrupt, done: %d/%d\n",
2263 ring,
2264 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2265
2266 entry = cp->rx_new[ring];
2267 drops = 0;
2268 while (1) {
2269 struct cas_rx_comp *rxc = rxcs + entry;
2270 struct sk_buff *skb;
2271 int type, len;
2272 u64 words[4];
2273 int i, dring;
2274
2275 words[0] = le64_to_cpu(rxc->word1);
2276 words[1] = le64_to_cpu(rxc->word2);
2277 words[2] = le64_to_cpu(rxc->word3);
2278 words[3] = le64_to_cpu(rxc->word4);
2279
2280
2281 type = CAS_VAL(RX_COMP1_TYPE, words[0]);
2282 if (type == 0)
2283 break;
2284
2285
2286 if (words[3] & RX_COMP4_ZERO) {
2287 break;
2288 }
2289
2290
2291 if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) {
2292 spin_lock(&cp->stat_lock[ring]);
2293 cp->net_stats[ring].rx_errors++;
2294 if (words[3] & RX_COMP4_LEN_MISMATCH)
2295 cp->net_stats[ring].rx_length_errors++;
2296 if (words[3] & RX_COMP4_BAD)
2297 cp->net_stats[ring].rx_crc_errors++;
2298 spin_unlock(&cp->stat_lock[ring]);
2299
2300
2301 drop_it:
2302 spin_lock(&cp->stat_lock[ring]);
2303 ++cp->net_stats[ring].rx_dropped;
2304 spin_unlock(&cp->stat_lock[ring]);
2305 goto next;
2306 }
2307
2308 len = cas_rx_process_pkt(cp, rxc, entry, words, &skb);
2309 if (len < 0) {
2310 ++drops;
2311 goto drop_it;
2312 }
2313
2314
2315
2316
2317 if (RX_DONT_BATCH || (type == 0x2)) {
2318
2319 cas_skb_release(skb);
2320 } else {
2321 cas_rx_flow_pkt(cp, words, skb);
2322 }
2323
2324 spin_lock(&cp->stat_lock[ring]);
2325 cp->net_stats[ring].rx_packets++;
2326 cp->net_stats[ring].rx_bytes += len;
2327 spin_unlock(&cp->stat_lock[ring]);
2328
2329 next:
2330 npackets++;
2331
2332
2333 if (words[0] & RX_COMP1_RELEASE_HDR) {
2334 i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]);
2335 dring = CAS_VAL(RX_INDEX_RING, i);
2336 i = CAS_VAL(RX_INDEX_NUM, i);
2337 cas_post_page(cp, dring, i);
2338 }
2339
2340 if (words[0] & RX_COMP1_RELEASE_DATA) {
2341 i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]);
2342 dring = CAS_VAL(RX_INDEX_RING, i);
2343 i = CAS_VAL(RX_INDEX_NUM, i);
2344 cas_post_page(cp, dring, i);
2345 }
2346
2347 if (words[0] & RX_COMP1_RELEASE_NEXT) {
2348 i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
2349 dring = CAS_VAL(RX_INDEX_RING, i);
2350 i = CAS_VAL(RX_INDEX_NUM, i);
2351 cas_post_page(cp, dring, i);
2352 }
2353
2354
2355 entry = RX_COMP_ENTRY(ring, entry + 1 +
2356 CAS_VAL(RX_COMP1_SKIP, words[0]));
2357 #ifdef USE_NAPI
2358 if (budget && (npackets >= budget))
2359 break;
2360 #endif
2361 }
2362 cp->rx_new[ring] = entry;
2363
2364 if (drops)
2365 netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2366 return npackets;
2367 }
2368
2369
2370
2371 static void cas_post_rxcs_ringN(struct net_device *dev,
2372 struct cas *cp, int ring)
2373 {
2374 struct cas_rx_comp *rxc = cp->init_rxcs[ring];
2375 int last, entry;
2376
2377 last = cp->rx_cur[ring];
2378 entry = cp->rx_new[ring];
2379 netif_printk(cp, intr, KERN_DEBUG, dev,
2380 "rxc[%d] interrupt, done: %d/%d\n",
2381 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2382
2383
2384 while (last != entry) {
2385 cas_rxc_init(rxc + last);
2386 last = RX_COMP_ENTRY(ring, last + 1);
2387 }
2388 cp->rx_cur[ring] = last;
2389
2390 if (ring == 0)
2391 writel(last, cp->regs + REG_RX_COMP_TAIL);
2392 else if (cp->cas_flags & CAS_FLAG_REG_PLUS)
2393 writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring));
2394 }
2395
2396
2397
2398
2399
2400
2401 #if defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
2402 static inline void cas_handle_irqN(struct net_device *dev,
2403 struct cas *cp, const u32 status,
2404 const int ring)
2405 {
2406 if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT))
2407 cas_post_rxcs_ringN(dev, cp, ring);
2408 }
2409
2410 static irqreturn_t cas_interruptN(int irq, void *dev_id)
2411 {
2412 struct net_device *dev = dev_id;
2413 struct cas *cp = netdev_priv(dev);
2414 unsigned long flags;
2415 int ring = (irq == cp->pci_irq_INTC) ? 2 : 3;
2416 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring));
2417
2418
2419 if (status == 0)
2420 return IRQ_NONE;
2421
2422 spin_lock_irqsave(&cp->lock, flags);
2423 if (status & INTR_RX_DONE_ALT) {
2424 #ifdef USE_NAPI
2425 cas_mask_intr(cp);
2426 napi_schedule(&cp->napi);
2427 #else
2428 cas_rx_ringN(cp, ring, 0);
2429 #endif
2430 status &= ~INTR_RX_DONE_ALT;
2431 }
2432
2433 if (status)
2434 cas_handle_irqN(dev, cp, status, ring);
2435 spin_unlock_irqrestore(&cp->lock, flags);
2436 return IRQ_HANDLED;
2437 }
2438 #endif
2439
2440 #ifdef USE_PCI_INTB
2441
2442 static inline void cas_handle_irq1(struct cas *cp, const u32 status)
2443 {
2444 if (status & INTR_RX_BUF_UNAVAIL_1) {
2445
2446
2447 cas_post_rxds_ringN(cp, 1, 0);
2448 spin_lock(&cp->stat_lock[1]);
2449 cp->net_stats[1].rx_dropped++;
2450 spin_unlock(&cp->stat_lock[1]);
2451 }
2452
2453 if (status & INTR_RX_BUF_AE_1)
2454 cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) -
2455 RX_AE_FREEN_VAL(1));
2456
2457 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2458 cas_post_rxcs_ringN(cp, 1);
2459 }
2460
2461
2462 static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2463 {
2464 struct net_device *dev = dev_id;
2465 struct cas *cp = netdev_priv(dev);
2466 unsigned long flags;
2467 u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2468
2469
2470 if (status == 0)
2471 return IRQ_NONE;
2472
2473 spin_lock_irqsave(&cp->lock, flags);
2474 if (status & INTR_RX_DONE_ALT) {
2475 #ifdef USE_NAPI
2476 cas_mask_intr(cp);
2477 napi_schedule(&cp->napi);
2478 #else
2479 cas_rx_ringN(cp, 1, 0);
2480 #endif
2481 status &= ~INTR_RX_DONE_ALT;
2482 }
2483 if (status)
2484 cas_handle_irq1(cp, status);
2485 spin_unlock_irqrestore(&cp->lock, flags);
2486 return IRQ_HANDLED;
2487 }
2488 #endif
2489
2490 static inline void cas_handle_irq(struct net_device *dev,
2491 struct cas *cp, const u32 status)
2492 {
2493
2494 if (status & INTR_ERROR_MASK)
2495 cas_abnormal_irq(dev, cp, status);
2496
2497 if (status & INTR_RX_BUF_UNAVAIL) {
2498
2499
2500
2501 cas_post_rxds_ringN(cp, 0, 0);
2502 spin_lock(&cp->stat_lock[0]);
2503 cp->net_stats[0].rx_dropped++;
2504 spin_unlock(&cp->stat_lock[0]);
2505 } else if (status & INTR_RX_BUF_AE) {
2506 cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) -
2507 RX_AE_FREEN_VAL(0));
2508 }
2509
2510 if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL))
2511 cas_post_rxcs_ringN(dev, cp, 0);
2512 }
2513
2514 static irqreturn_t cas_interrupt(int irq, void *dev_id)
2515 {
2516 struct net_device *dev = dev_id;
2517 struct cas *cp = netdev_priv(dev);
2518 unsigned long flags;
2519 u32 status = readl(cp->regs + REG_INTR_STATUS);
2520
2521 if (status == 0)
2522 return IRQ_NONE;
2523
2524 spin_lock_irqsave(&cp->lock, flags);
2525 if (status & (INTR_TX_ALL | INTR_TX_INTME)) {
2526 cas_tx(dev, cp, status);
2527 status &= ~(INTR_TX_ALL | INTR_TX_INTME);
2528 }
2529
2530 if (status & INTR_RX_DONE) {
2531 #ifdef USE_NAPI
2532 cas_mask_intr(cp);
2533 napi_schedule(&cp->napi);
2534 #else
2535 cas_rx_ringN(cp, 0, 0);
2536 #endif
2537 status &= ~INTR_RX_DONE;
2538 }
2539
2540 if (status)
2541 cas_handle_irq(dev, cp, status);
2542 spin_unlock_irqrestore(&cp->lock, flags);
2543 return IRQ_HANDLED;
2544 }
2545
2546
2547 #ifdef USE_NAPI
2548 static int cas_poll(struct napi_struct *napi, int budget)
2549 {
2550 struct cas *cp = container_of(napi, struct cas, napi);
2551 struct net_device *dev = cp->dev;
2552 int i, enable_intr, credits;
2553 u32 status = readl(cp->regs + REG_INTR_STATUS);
2554 unsigned long flags;
2555
2556 spin_lock_irqsave(&cp->lock, flags);
2557 cas_tx(dev, cp, status);
2558 spin_unlock_irqrestore(&cp->lock, flags);
2559
2560
2561
2562
2563
2564
2565
2566
2567 enable_intr = 1;
2568 credits = 0;
2569 for (i = 0; i < N_RX_COMP_RINGS; i++) {
2570 int j;
2571 for (j = 0; j < N_RX_COMP_RINGS; j++) {
2572 credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS);
2573 if (credits >= budget) {
2574 enable_intr = 0;
2575 goto rx_comp;
2576 }
2577 }
2578 }
2579
2580 rx_comp:
2581
2582 spin_lock_irqsave(&cp->lock, flags);
2583 if (status)
2584 cas_handle_irq(dev, cp, status);
2585
2586 #ifdef USE_PCI_INTB
2587 if (N_RX_COMP_RINGS > 1) {
2588 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1));
2589 if (status)
2590 cas_handle_irq1(dev, cp, status);
2591 }
2592 #endif
2593
2594 #ifdef USE_PCI_INTC
2595 if (N_RX_COMP_RINGS > 2) {
2596 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2));
2597 if (status)
2598 cas_handle_irqN(dev, cp, status, 2);
2599 }
2600 #endif
2601
2602 #ifdef USE_PCI_INTD
2603 if (N_RX_COMP_RINGS > 3) {
2604 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3));
2605 if (status)
2606 cas_handle_irqN(dev, cp, status, 3);
2607 }
2608 #endif
2609 spin_unlock_irqrestore(&cp->lock, flags);
2610 if (enable_intr) {
2611 napi_complete(napi);
2612 cas_unmask_intr(cp);
2613 }
2614 return credits;
2615 }
2616 #endif
2617
2618 #ifdef CONFIG_NET_POLL_CONTROLLER
2619 static void cas_netpoll(struct net_device *dev)
2620 {
2621 struct cas *cp = netdev_priv(dev);
2622
2623 cas_disable_irq(cp, 0);
2624 cas_interrupt(cp->pdev->irq, dev);
2625 cas_enable_irq(cp, 0);
2626
2627 #ifdef USE_PCI_INTB
2628 if (N_RX_COMP_RINGS > 1) {
2629
2630 }
2631 #endif
2632 #ifdef USE_PCI_INTC
2633 if (N_RX_COMP_RINGS > 2) {
2634
2635 }
2636 #endif
2637 #ifdef USE_PCI_INTD
2638 if (N_RX_COMP_RINGS > 3) {
2639
2640 }
2641 #endif
2642 }
2643 #endif
2644
2645 static void cas_tx_timeout(struct net_device *dev, unsigned int txqueue)
2646 {
2647 struct cas *cp = netdev_priv(dev);
2648
2649 netdev_err(dev, "transmit timed out, resetting\n");
2650 if (!cp->hw_running) {
2651 netdev_err(dev, "hrm.. hw not running!\n");
2652 return;
2653 }
2654
2655 netdev_err(dev, "MIF_STATE[%08x]\n",
2656 readl(cp->regs + REG_MIF_STATE_MACHINE));
2657
2658 netdev_err(dev, "MAC_STATE[%08x]\n",
2659 readl(cp->regs + REG_MAC_STATE_MACHINE));
2660
2661 netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2662 readl(cp->regs + REG_TX_CFG),
2663 readl(cp->regs + REG_MAC_TX_STATUS),
2664 readl(cp->regs + REG_MAC_TX_CFG),
2665 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2666 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2667 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2668 readl(cp->regs + REG_TX_SM_1),
2669 readl(cp->regs + REG_TX_SM_2));
2670
2671 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2672 readl(cp->regs + REG_RX_CFG),
2673 readl(cp->regs + REG_MAC_RX_STATUS),
2674 readl(cp->regs + REG_MAC_RX_CFG));
2675
2676 netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2677 readl(cp->regs + REG_HP_STATE_MACHINE),
2678 readl(cp->regs + REG_HP_STATUS0),
2679 readl(cp->regs + REG_HP_STATUS1),
2680 readl(cp->regs + REG_HP_STATUS2));
2681
2682 #if 1
2683 atomic_inc(&cp->reset_task_pending);
2684 atomic_inc(&cp->reset_task_pending_all);
2685 schedule_work(&cp->reset_task);
2686 #else
2687 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
2688 schedule_work(&cp->reset_task);
2689 #endif
2690 }
2691
2692 static inline int cas_intme(int ring, int entry)
2693 {
2694
2695 if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1)))
2696 return 1;
2697 return 0;
2698 }
2699
2700
2701 static void cas_write_txd(struct cas *cp, int ring, int entry,
2702 dma_addr_t mapping, int len, u64 ctrl, int last)
2703 {
2704 struct cas_tx_desc *txd = cp->init_txds[ring] + entry;
2705
2706 ctrl |= CAS_BASE(TX_DESC_BUFLEN, len);
2707 if (cas_intme(ring, entry))
2708 ctrl |= TX_DESC_INTME;
2709 if (last)
2710 ctrl |= TX_DESC_EOF;
2711 txd->control = cpu_to_le64(ctrl);
2712 txd->buffer = cpu_to_le64(mapping);
2713 }
2714
2715 static inline void *tx_tiny_buf(struct cas *cp, const int ring,
2716 const int entry)
2717 {
2718 return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry;
2719 }
2720
2721 static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring,
2722 const int entry, const int tentry)
2723 {
2724 cp->tx_tiny_use[ring][tentry].nbufs++;
2725 cp->tx_tiny_use[ring][entry].used = 1;
2726 return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry;
2727 }
2728
2729 static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2730 struct sk_buff *skb)
2731 {
2732 struct net_device *dev = cp->dev;
2733 int entry, nr_frags, frag, tabort, tentry;
2734 dma_addr_t mapping;
2735 unsigned long flags;
2736 u64 ctrl;
2737 u32 len;
2738
2739 spin_lock_irqsave(&cp->tx_lock[ring], flags);
2740
2741
2742 if (TX_BUFFS_AVAIL(cp, ring) <=
2743 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2744 netif_stop_queue(dev);
2745 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2746 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2747 return 1;
2748 }
2749
2750 ctrl = 0;
2751 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2752 const u64 csum_start_off = skb_checksum_start_offset(skb);
2753 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2754
2755 ctrl = TX_DESC_CSUM_EN |
2756 CAS_BASE(TX_DESC_CSUM_START, csum_start_off) |
2757 CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off);
2758 }
2759
2760 entry = cp->tx_new[ring];
2761 cp->tx_skbs[ring][entry] = skb;
2762
2763 nr_frags = skb_shinfo(skb)->nr_frags;
2764 len = skb_headlen(skb);
2765 mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data),
2766 offset_in_page(skb->data), len, DMA_TO_DEVICE);
2767
2768 tentry = entry;
2769 tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
2770 if (unlikely(tabort)) {
2771
2772 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2773 ctrl | TX_DESC_SOF, 0);
2774 entry = TX_DESC_NEXT(ring, entry);
2775
2776 skb_copy_from_linear_data_offset(skb, len - tabort,
2777 tx_tiny_buf(cp, ring, entry), tabort);
2778 mapping = tx_tiny_map(cp, ring, entry, tentry);
2779 cas_write_txd(cp, ring, entry, mapping, tabort, ctrl,
2780 (nr_frags == 0));
2781 } else {
2782 cas_write_txd(cp, ring, entry, mapping, len, ctrl |
2783 TX_DESC_SOF, (nr_frags == 0));
2784 }
2785 entry = TX_DESC_NEXT(ring, entry);
2786
2787 for (frag = 0; frag < nr_frags; frag++) {
2788 const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
2789
2790 len = skb_frag_size(fragp);
2791 mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
2792 DMA_TO_DEVICE);
2793
2794 tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
2795 if (unlikely(tabort)) {
2796 void *addr;
2797
2798
2799 cas_write_txd(cp, ring, entry, mapping, len - tabort,
2800 ctrl, 0);
2801 entry = TX_DESC_NEXT(ring, entry);
2802
2803 addr = cas_page_map(skb_frag_page(fragp));
2804 memcpy(tx_tiny_buf(cp, ring, entry),
2805 addr + skb_frag_off(fragp) + len - tabort,
2806 tabort);
2807 cas_page_unmap(addr);
2808 mapping = tx_tiny_map(cp, ring, entry, tentry);
2809 len = tabort;
2810 }
2811
2812 cas_write_txd(cp, ring, entry, mapping, len, ctrl,
2813 (frag + 1 == nr_frags));
2814 entry = TX_DESC_NEXT(ring, entry);
2815 }
2816
2817 cp->tx_new[ring] = entry;
2818 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2819 netif_stop_queue(dev);
2820
2821 netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2822 "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2823 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2824 writel(entry, cp->regs + REG_TX_KICKN(ring));
2825 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2826 return 0;
2827 }
2828
2829 static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2830 {
2831 struct cas *cp = netdev_priv(dev);
2832
2833
2834
2835
2836 static int ring;
2837
2838 if (skb_padto(skb, cp->min_frame_size))
2839 return NETDEV_TX_OK;
2840
2841
2842
2843
2844 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2845 return NETDEV_TX_BUSY;
2846 return NETDEV_TX_OK;
2847 }
2848
2849 static void cas_init_tx_dma(struct cas *cp)
2850 {
2851 u64 desc_dma = cp->block_dvma;
2852 unsigned long off;
2853 u32 val;
2854 int i;
2855
2856
2857 #ifdef USE_TX_COMPWB
2858 off = offsetof(struct cas_init_block, tx_compwb);
2859 writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI);
2860 writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW);
2861 #endif
2862
2863
2864
2865
2866 val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 |
2867 TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 |
2868 TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE |
2869 TX_CFG_INTR_COMPWB_DIS;
2870
2871
2872 for (i = 0; i < MAX_TX_RINGS; i++) {
2873 off = (unsigned long) cp->init_txds[i] -
2874 (unsigned long) cp->init_block;
2875
2876 val |= CAS_TX_RINGN_BASE(i);
2877 writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i));
2878 writel((desc_dma + off) & 0xffffffff, cp->regs +
2879 REG_TX_DBN_LOW(i));
2880
2881
2882
2883 }
2884 writel(val, cp->regs + REG_TX_CFG);
2885
2886
2887
2888
2889 #ifdef USE_QOS
2890 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2891 writel(0x1600, cp->regs + REG_TX_MAXBURST_1);
2892 writel(0x2400, cp->regs + REG_TX_MAXBURST_2);
2893 writel(0x4800, cp->regs + REG_TX_MAXBURST_3);
2894 #else
2895 writel(0x800, cp->regs + REG_TX_MAXBURST_0);
2896 writel(0x800, cp->regs + REG_TX_MAXBURST_1);
2897 writel(0x800, cp->regs + REG_TX_MAXBURST_2);
2898 writel(0x800, cp->regs + REG_TX_MAXBURST_3);
2899 #endif
2900 }
2901
2902
2903 static inline void cas_init_dma(struct cas *cp)
2904 {
2905 cas_init_tx_dma(cp);
2906 cas_init_rx_dma(cp);
2907 }
2908
2909 static void cas_process_mc_list(struct cas *cp)
2910 {
2911 u16 hash_table[16];
2912 u32 crc;
2913 struct netdev_hw_addr *ha;
2914 int i = 1;
2915
2916 memset(hash_table, 0, sizeof(hash_table));
2917 netdev_for_each_mc_addr(ha, cp->dev) {
2918 if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2919
2920
2921
2922 writel((ha->addr[4] << 8) | ha->addr[5],
2923 cp->regs + REG_MAC_ADDRN(i*3 + 0));
2924 writel((ha->addr[2] << 8) | ha->addr[3],
2925 cp->regs + REG_MAC_ADDRN(i*3 + 1));
2926 writel((ha->addr[0] << 8) | ha->addr[1],
2927 cp->regs + REG_MAC_ADDRN(i*3 + 2));
2928 i++;
2929 }
2930 else {
2931
2932
2933
2934 crc = ether_crc_le(ETH_ALEN, ha->addr);
2935 crc >>= 24;
2936 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2937 }
2938 }
2939 for (i = 0; i < 16; i++)
2940 writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
2941 }
2942
2943
2944 static u32 cas_setup_multicast(struct cas *cp)
2945 {
2946 u32 rxcfg = 0;
2947 int i;
2948
2949 if (cp->dev->flags & IFF_PROMISC) {
2950 rxcfg |= MAC_RX_CFG_PROMISC_EN;
2951
2952 } else if (cp->dev->flags & IFF_ALLMULTI) {
2953 for (i=0; i < 16; i++)
2954 writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i));
2955 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2956
2957 } else {
2958 cas_process_mc_list(cp);
2959 rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
2960 }
2961
2962 return rxcfg;
2963 }
2964
2965
2966 static void cas_clear_mac_err(struct cas *cp)
2967 {
2968 writel(0, cp->regs + REG_MAC_COLL_NORMAL);
2969 writel(0, cp->regs + REG_MAC_COLL_FIRST);
2970 writel(0, cp->regs + REG_MAC_COLL_EXCESS);
2971 writel(0, cp->regs + REG_MAC_COLL_LATE);
2972 writel(0, cp->regs + REG_MAC_TIMER_DEFER);
2973 writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK);
2974 writel(0, cp->regs + REG_MAC_RECV_FRAME);
2975 writel(0, cp->regs + REG_MAC_LEN_ERR);
2976 writel(0, cp->regs + REG_MAC_ALIGN_ERR);
2977 writel(0, cp->regs + REG_MAC_FCS_ERR);
2978 writel(0, cp->regs + REG_MAC_RX_CODE_ERR);
2979 }
2980
2981
2982 static void cas_mac_reset(struct cas *cp)
2983 {
2984 int i;
2985
2986
2987 writel(0x1, cp->regs + REG_MAC_TX_RESET);
2988 writel(0x1, cp->regs + REG_MAC_RX_RESET);
2989
2990
2991 i = STOP_TRIES;
2992 while (i-- > 0) {
2993 if (readl(cp->regs + REG_MAC_TX_RESET) == 0)
2994 break;
2995 udelay(10);
2996 }
2997
2998
2999 i = STOP_TRIES;
3000 while (i-- > 0) {
3001 if (readl(cp->regs + REG_MAC_RX_RESET) == 0)
3002 break;
3003 udelay(10);
3004 }
3005
3006 if (readl(cp->regs + REG_MAC_TX_RESET) |
3007 readl(cp->regs + REG_MAC_RX_RESET))
3008 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
3009 readl(cp->regs + REG_MAC_TX_RESET),
3010 readl(cp->regs + REG_MAC_RX_RESET),
3011 readl(cp->regs + REG_MAC_STATE_MACHINE));
3012 }
3013
3014
3015
3016 static void cas_init_mac(struct cas *cp)
3017 {
3018 const unsigned char *e = &cp->dev->dev_addr[0];
3019 int i;
3020 cas_mac_reset(cp);
3021
3022
3023 writel(CAWR_RR_DIS, cp->regs + REG_CAWR);
3024
3025 #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
3026
3027
3028
3029 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0)
3030 writel(INF_BURST_EN, cp->regs + REG_INF_BURST);
3031 #endif
3032
3033 writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE);
3034
3035 writel(0x00, cp->regs + REG_MAC_IPG0);
3036 writel(0x08, cp->regs + REG_MAC_IPG1);
3037 writel(0x04, cp->regs + REG_MAC_IPG2);
3038
3039
3040 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3041
3042
3043 writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN);
3044
3045
3046
3047
3048
3049 writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) |
3050 CAS_BASE(MAC_FRAMESIZE_MAX_FRAME,
3051 (CAS_MAX_MTU + ETH_HLEN + 4 + 4)),
3052 cp->regs + REG_MAC_FRAMESIZE_MAX);
3053
3054
3055
3056
3057
3058 if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size)
3059 writel(0x41, cp->regs + REG_MAC_PA_SIZE);
3060 else
3061 writel(0x07, cp->regs + REG_MAC_PA_SIZE);
3062 writel(0x04, cp->regs + REG_MAC_JAM_SIZE);
3063 writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT);
3064 writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE);
3065
3066 writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED);
3067
3068 writel(0, cp->regs + REG_MAC_ADDR_FILTER0);
3069 writel(0, cp->regs + REG_MAC_ADDR_FILTER1);
3070 writel(0, cp->regs + REG_MAC_ADDR_FILTER2);
3071 writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK);
3072 writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK);
3073
3074
3075 for (i = 0; i < 45; i++)
3076 writel(0x0, cp->regs + REG_MAC_ADDRN(i));
3077
3078 writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0));
3079 writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1));
3080 writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2));
3081
3082 writel(0x0001, cp->regs + REG_MAC_ADDRN(42));
3083 writel(0xc200, cp->regs + REG_MAC_ADDRN(43));
3084 writel(0x0180, cp->regs + REG_MAC_ADDRN(44));
3085
3086 cp->mac_rx_cfg = cas_setup_multicast(cp);
3087
3088 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3089 cas_clear_mac_err(cp);
3090 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3091
3092
3093
3094
3095
3096 writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK);
3097 writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
3098
3099
3100
3101
3102 writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK);
3103 }
3104
3105
3106 static void cas_init_pause_thresholds(struct cas *cp)
3107 {
3108
3109
3110
3111 if (cp->rx_fifo_size <= (2 * 1024)) {
3112 cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size;
3113 } else {
3114 int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
3115 if (max_frame * 3 > cp->rx_fifo_size) {
3116 cp->rx_pause_off = 7104;
3117 cp->rx_pause_on = 960;
3118 } else {
3119 int off = (cp->rx_fifo_size - (max_frame * 2));
3120 int on = off - max_frame;
3121 cp->rx_pause_off = off;
3122 cp->rx_pause_on = on;
3123 }
3124 }
3125 }
3126
3127 static int cas_vpd_match(const void __iomem *p, const char *str)
3128 {
3129 int len = strlen(str) + 1;
3130 int i;
3131
3132 for (i = 0; i < len; i++) {
3133 if (readb(p + i) != str[i])
3134 return 0;
3135 }
3136 return 1;
3137 }
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151 static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3152 const int offset)
3153 {
3154 void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START;
3155 void __iomem *base, *kstart;
3156 int i, len;
3157 int found = 0;
3158 #define VPD_FOUND_MAC 0x01
3159 #define VPD_FOUND_PHY 0x02
3160
3161 int phy_type = CAS_PHY_MII_MDIO0;
3162 int mac_off = 0;
3163
3164 #if defined(CONFIG_SPARC)
3165 const unsigned char *addr;
3166 #endif
3167
3168
3169 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3170 cp->regs + REG_BIM_LOCAL_DEV_EN);
3171
3172
3173 if (readb(p) != 0x55 || readb(p + 1) != 0xaa)
3174 goto use_random_mac_addr;
3175
3176
3177 base = NULL;
3178 for (i = 2; i < EXPANSION_ROM_SIZE; i++) {
3179
3180 if ((readb(p + i + 0) == 0x50) &&
3181 (readb(p + i + 1) == 0x43) &&
3182 (readb(p + i + 2) == 0x49) &&
3183 (readb(p + i + 3) == 0x52)) {
3184 base = p + (readb(p + i + 8) |
3185 (readb(p + i + 9) << 8));
3186 break;
3187 }
3188 }
3189
3190 if (!base || (readb(base) != 0x82))
3191 goto use_random_mac_addr;
3192
3193 i = (readb(base + 1) | (readb(base + 2) << 8)) + 3;
3194 while (i < EXPANSION_ROM_SIZE) {
3195 if (readb(base + i) != 0x90)
3196 goto use_random_mac_addr;
3197
3198
3199 len = readb(base + i + 1) | (readb(base + i + 2) << 8);
3200
3201
3202 kstart = base + i + 3;
3203 p = kstart;
3204 while ((p - kstart) < len) {
3205 int klen = readb(p + 2);
3206 int j;
3207 char type;
3208
3209 p += 3;
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248 if (readb(p) != 'I')
3249 goto next;
3250
3251
3252 type = readb(p + 3);
3253 if (type == 'B') {
3254 if ((klen == 29) && readb(p + 4) == 6 &&
3255 cas_vpd_match(p + 5,
3256 "local-mac-address")) {
3257 if (mac_off++ > offset)
3258 goto next;
3259
3260
3261 for (j = 0; j < 6; j++)
3262 dev_addr[j] =
3263 readb(p + 23 + j);
3264 goto found_mac;
3265 }
3266 }
3267
3268 if (type != 'S')
3269 goto next;
3270
3271 #ifdef USE_ENTROPY_DEV
3272 if ((klen == 24) &&
3273 cas_vpd_match(p + 5, "entropy-dev") &&
3274 cas_vpd_match(p + 17, "vms110")) {
3275 cp->cas_flags |= CAS_FLAG_ENTROPY_DEV;
3276 goto next;
3277 }
3278 #endif
3279
3280 if (found & VPD_FOUND_PHY)
3281 goto next;
3282
3283 if ((klen == 18) && readb(p + 4) == 4 &&
3284 cas_vpd_match(p + 5, "phy-type")) {
3285 if (cas_vpd_match(p + 14, "pcs")) {
3286 phy_type = CAS_PHY_SERDES;
3287 goto found_phy;
3288 }
3289 }
3290
3291 if ((klen == 23) && readb(p + 4) == 4 &&
3292 cas_vpd_match(p + 5, "phy-interface")) {
3293 if (cas_vpd_match(p + 19, "pcs")) {
3294 phy_type = CAS_PHY_SERDES;
3295 goto found_phy;
3296 }
3297 }
3298 found_mac:
3299 found |= VPD_FOUND_MAC;
3300 goto next;
3301
3302 found_phy:
3303 found |= VPD_FOUND_PHY;
3304
3305 next:
3306 p += klen;
3307 }
3308 i += len + 3;
3309 }
3310
3311 use_random_mac_addr:
3312 if (found & VPD_FOUND_MAC)
3313 goto done;
3314
3315 #if defined(CONFIG_SPARC)
3316 addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3317 if (addr != NULL) {
3318 memcpy(dev_addr, addr, ETH_ALEN);
3319 goto done;
3320 }
3321 #endif
3322
3323
3324 pr_info("MAC address not found in ROM VPD\n");
3325 dev_addr[0] = 0x08;
3326 dev_addr[1] = 0x00;
3327 dev_addr[2] = 0x20;
3328 get_random_bytes(dev_addr + 3, 3);
3329
3330 done:
3331 writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3332 return phy_type;
3333 }
3334
3335
3336 static void cas_check_pci_invariants(struct cas *cp)
3337 {
3338 struct pci_dev *pdev = cp->pdev;
3339
3340 cp->cas_flags = 0;
3341 if ((pdev->vendor == PCI_VENDOR_ID_SUN) &&
3342 (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) {
3343 if (pdev->revision >= CAS_ID_REVPLUS)
3344 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3345 if (pdev->revision < CAS_ID_REVPLUS02u)
3346 cp->cas_flags |= CAS_FLAG_TARGET_ABORT;
3347
3348
3349
3350
3351 if (pdev->revision < CAS_ID_REV2)
3352 cp->cas_flags |= CAS_FLAG_NO_HW_CSUM;
3353 } else {
3354
3355 cp->cas_flags |= CAS_FLAG_REG_PLUS;
3356
3357
3358
3359
3360 if ((pdev->vendor == PCI_VENDOR_ID_NS) &&
3361 (pdev->device == PCI_DEVICE_ID_NS_SATURN))
3362 cp->cas_flags |= CAS_FLAG_SATURN;
3363 }
3364 }
3365
3366
3367 static int cas_check_invariants(struct cas *cp)
3368 {
3369 struct pci_dev *pdev = cp->pdev;
3370 u8 addr[ETH_ALEN];
3371 u32 cfg;
3372 int i;
3373
3374
3375 cp->page_order = 0;
3376 #ifdef USE_PAGE_ORDER
3377 if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) {
3378
3379 struct page *page = alloc_pages(GFP_ATOMIC,
3380 CAS_JUMBO_PAGE_SHIFT -
3381 PAGE_SHIFT);
3382 if (page) {
3383 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3384 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3385 } else {
3386 printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3387 }
3388 }
3389 #endif
3390 cp->page_size = (PAGE_SIZE << cp->page_order);
3391
3392
3393 cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64;
3394 cp->rx_fifo_size = RX_FIFO_SIZE;
3395
3396
3397
3398
3399 cp->phy_type = cas_get_vpd_info(cp, addr, PCI_SLOT(pdev->devfn));
3400 eth_hw_addr_set(cp->dev, addr);
3401 if (cp->phy_type & CAS_PHY_SERDES) {
3402 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3403 return 0;
3404 }
3405
3406
3407 cfg = readl(cp->regs + REG_MIF_CFG);
3408 if (cfg & MIF_CFG_MDIO_1) {
3409 cp->phy_type = CAS_PHY_MII_MDIO1;
3410 } else if (cfg & MIF_CFG_MDIO_0) {
3411 cp->phy_type = CAS_PHY_MII_MDIO0;
3412 }
3413
3414 cas_mif_poll(cp, 0);
3415 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3416
3417 for (i = 0; i < 32; i++) {
3418 u32 phy_id;
3419 int j;
3420
3421 for (j = 0; j < 3; j++) {
3422 cp->phy_addr = i;
3423 phy_id = cas_phy_read(cp, MII_PHYSID1) << 16;
3424 phy_id |= cas_phy_read(cp, MII_PHYSID2);
3425 if (phy_id && (phy_id != 0xFFFFFFFF)) {
3426 cp->phy_id = phy_id;
3427 goto done;
3428 }
3429 }
3430 }
3431 pr_err("MII phy did not respond [%08x]\n",
3432 readl(cp->regs + REG_MIF_STATE_MACHINE));
3433 return -1;
3434
3435 done:
3436
3437 cfg = cas_phy_read(cp, MII_BMSR);
3438 if ((cfg & CAS_BMSR_1000_EXTEND) &&
3439 cas_phy_read(cp, CAS_MII_1000_EXTEND))
3440 cp->cas_flags |= CAS_FLAG_1000MB_CAP;
3441 return 0;
3442 }
3443
3444
3445 static inline void cas_start_dma(struct cas *cp)
3446 {
3447 int i;
3448 u32 val;
3449 int txfailed = 0;
3450
3451
3452 val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN;
3453 writel(val, cp->regs + REG_TX_CFG);
3454 val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN;
3455 writel(val, cp->regs + REG_RX_CFG);
3456
3457
3458 val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN;
3459 writel(val, cp->regs + REG_MAC_TX_CFG);
3460 val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN;
3461 writel(val, cp->regs + REG_MAC_RX_CFG);
3462
3463 i = STOP_TRIES;
3464 while (i-- > 0) {
3465 val = readl(cp->regs + REG_MAC_TX_CFG);
3466 if ((val & MAC_TX_CFG_EN))
3467 break;
3468 udelay(10);
3469 }
3470 if (i < 0) txfailed = 1;
3471 i = STOP_TRIES;
3472 while (i-- > 0) {
3473 val = readl(cp->regs + REG_MAC_RX_CFG);
3474 if ((val & MAC_RX_CFG_EN)) {
3475 if (txfailed) {
3476 netdev_err(cp->dev,
3477 "enabling mac failed [tx:%08x:%08x]\n",
3478 readl(cp->regs + REG_MIF_STATE_MACHINE),
3479 readl(cp->regs + REG_MAC_STATE_MACHINE));
3480 }
3481 goto enable_rx_done;
3482 }
3483 udelay(10);
3484 }
3485 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3486 (txfailed ? "tx,rx" : "rx"),
3487 readl(cp->regs + REG_MIF_STATE_MACHINE),
3488 readl(cp->regs + REG_MAC_STATE_MACHINE));
3489
3490 enable_rx_done:
3491 cas_unmask_intr(cp);
3492 writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
3493 writel(0, cp->regs + REG_RX_COMP_TAIL);
3494
3495 if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
3496 if (N_RX_DESC_RINGS > 1)
3497 writel(RX_DESC_RINGN_SIZE(1) - 4,
3498 cp->regs + REG_PLUS_RX_KICK1);
3499 }
3500 }
3501
3502
3503 static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd,
3504 int *pause)
3505 {
3506 u32 val = readl(cp->regs + REG_PCS_MII_LPA);
3507 *fd = (val & PCS_MII_LPA_FD) ? 1 : 0;
3508 *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00;
3509 if (val & PCS_MII_LPA_ASYM_PAUSE)
3510 *pause |= 0x10;
3511 *spd = 1000;
3512 }
3513
3514
3515 static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd,
3516 int *pause)
3517 {
3518 u32 val;
3519
3520 *fd = 0;
3521 *spd = 10;
3522 *pause = 0;
3523
3524
3525 val = cas_phy_read(cp, MII_LPA);
3526 if (val & CAS_LPA_PAUSE)
3527 *pause = 0x01;
3528
3529 if (val & CAS_LPA_ASYM_PAUSE)
3530 *pause |= 0x10;
3531
3532 if (val & LPA_DUPLEX)
3533 *fd = 1;
3534 if (val & LPA_100)
3535 *spd = 100;
3536
3537 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
3538 val = cas_phy_read(cp, CAS_MII_1000_STATUS);
3539 if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF))
3540 *spd = 1000;
3541 if (val & CAS_LPA_1000FULL)
3542 *fd = 1;
3543 }
3544 }
3545
3546
3547
3548
3549
3550
3551 static void cas_set_link_modes(struct cas *cp)
3552 {
3553 u32 val;
3554 int full_duplex, speed, pause;
3555
3556 full_duplex = 0;
3557 speed = 10;
3558 pause = 0;
3559
3560 if (CAS_PHY_MII(cp->phy_type)) {
3561 cas_mif_poll(cp, 0);
3562 val = cas_phy_read(cp, MII_BMCR);
3563 if (val & BMCR_ANENABLE) {
3564 cas_read_mii_link_mode(cp, &full_duplex, &speed,
3565 &pause);
3566 } else {
3567 if (val & BMCR_FULLDPLX)
3568 full_duplex = 1;
3569
3570 if (val & BMCR_SPEED100)
3571 speed = 100;
3572 else if (val & CAS_BMCR_SPEED1000)
3573 speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
3574 1000 : 100;
3575 }
3576 cas_mif_poll(cp, 1);
3577
3578 } else {
3579 val = readl(cp->regs + REG_PCS_MII_CTRL);
3580 cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause);
3581 if ((val & PCS_MII_AUTONEG_EN) == 0) {
3582 if (val & PCS_MII_CTRL_DUPLEX)
3583 full_duplex = 1;
3584 }
3585 }
3586
3587 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3588 speed, full_duplex ? "full" : "half");
3589
3590 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3591 if (CAS_PHY_MII(cp->phy_type)) {
3592 val |= MAC_XIF_MII_BUFFER_OUTPUT_EN;
3593 if (!full_duplex)
3594 val |= MAC_XIF_DISABLE_ECHO;
3595 }
3596 if (full_duplex)
3597 val |= MAC_XIF_FDPLX_LED;
3598 if (speed == 1000)
3599 val |= MAC_XIF_GMII_MODE;
3600 writel(val, cp->regs + REG_MAC_XIF_CFG);
3601
3602
3603 val = MAC_TX_CFG_IPG_EN;
3604 if (full_duplex) {
3605 val |= MAC_TX_CFG_IGNORE_CARRIER;
3606 val |= MAC_TX_CFG_IGNORE_COLL;
3607 } else {
3608 #ifndef USE_CSMA_CD_PROTO
3609 val |= MAC_TX_CFG_NEVER_GIVE_UP_EN;
3610 val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM;
3611 #endif
3612 }
3613
3614
3615
3616
3617
3618
3619
3620 if ((speed == 1000) && !full_duplex) {
3621 writel(val | MAC_TX_CFG_CARRIER_EXTEND,
3622 cp->regs + REG_MAC_TX_CFG);
3623
3624 val = readl(cp->regs + REG_MAC_RX_CFG);
3625 val &= ~MAC_RX_CFG_STRIP_FCS;
3626 writel(val | MAC_RX_CFG_CARRIER_EXTEND,
3627 cp->regs + REG_MAC_RX_CFG);
3628
3629 writel(0x200, cp->regs + REG_MAC_SLOT_TIME);
3630
3631 cp->crc_size = 4;
3632
3633 cp->min_frame_size = CAS_1000MB_MIN_FRAME;
3634
3635 } else {
3636 writel(val, cp->regs + REG_MAC_TX_CFG);
3637
3638
3639
3640
3641 val = readl(cp->regs + REG_MAC_RX_CFG);
3642 if (full_duplex) {
3643 val |= MAC_RX_CFG_STRIP_FCS;
3644 cp->crc_size = 0;
3645 cp->min_frame_size = CAS_MIN_MTU;
3646 } else {
3647 val &= ~MAC_RX_CFG_STRIP_FCS;
3648 cp->crc_size = 4;
3649 cp->min_frame_size = CAS_MIN_FRAME;
3650 }
3651 writel(val & ~MAC_RX_CFG_CARRIER_EXTEND,
3652 cp->regs + REG_MAC_RX_CFG);
3653 writel(0x40, cp->regs + REG_MAC_SLOT_TIME);
3654 }
3655
3656 if (netif_msg_link(cp)) {
3657 if (pause & 0x01) {
3658 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3659 cp->rx_fifo_size,
3660 cp->rx_pause_off,
3661 cp->rx_pause_on);
3662 } else if (pause & 0x10) {
3663 netdev_info(cp->dev, "TX pause enabled\n");
3664 } else {
3665 netdev_info(cp->dev, "Pause is disabled\n");
3666 }
3667 }
3668
3669 val = readl(cp->regs + REG_MAC_CTRL_CFG);
3670 val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN);
3671 if (pause) {
3672 val |= MAC_CTRL_CFG_SEND_PAUSE_EN;
3673 if (pause & 0x01) {
3674 val |= MAC_CTRL_CFG_RECV_PAUSE_EN;
3675 }
3676 }
3677 writel(val, cp->regs + REG_MAC_CTRL_CFG);
3678 cas_start_dma(cp);
3679 }
3680
3681
3682 static void cas_init_hw(struct cas *cp, int restart_link)
3683 {
3684 if (restart_link)
3685 cas_phy_init(cp);
3686
3687 cas_init_pause_thresholds(cp);
3688 cas_init_mac(cp);
3689 cas_init_dma(cp);
3690
3691 if (restart_link) {
3692
3693 cp->timer_ticks = 0;
3694 cas_begin_auto_negotiation(cp, NULL);
3695 } else if (cp->lstate == link_up) {
3696 cas_set_link_modes(cp);
3697 netif_carrier_on(cp->dev);
3698 }
3699 }
3700
3701
3702
3703
3704
3705 static void cas_hard_reset(struct cas *cp)
3706 {
3707 writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN);
3708 udelay(20);
3709 pci_restore_state(cp->pdev);
3710 }
3711
3712
3713 static void cas_global_reset(struct cas *cp, int blkflag)
3714 {
3715 int limit;
3716
3717
3718 if (blkflag && !CAS_PHY_MII(cp->phy_type)) {
3719
3720
3721
3722
3723
3724
3725 writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK),
3726 cp->regs + REG_SW_RESET);
3727 } else {
3728 writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET);
3729 }
3730
3731
3732 mdelay(3);
3733
3734 limit = STOP_TRIES;
3735 while (limit-- > 0) {
3736 u32 val = readl(cp->regs + REG_SW_RESET);
3737 if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0)
3738 goto done;
3739 udelay(10);
3740 }
3741 netdev_err(cp->dev, "sw reset failed\n");
3742
3743 done:
3744
3745 writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE |
3746 BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG);
3747
3748
3749
3750
3751
3752 writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO |
3753 PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE |
3754 PCI_ERR_BIM_DMA_READ), cp->regs +
3755 REG_PCI_ERR_STATUS_MASK);
3756
3757
3758
3759
3760 writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE);
3761 }
3762
3763 static void cas_reset(struct cas *cp, int blkflag)
3764 {
3765 u32 val;
3766
3767 cas_mask_intr(cp);
3768 cas_global_reset(cp, blkflag);
3769 cas_mac_reset(cp);
3770 cas_entropy_reset(cp);
3771
3772
3773 val = readl(cp->regs + REG_TX_CFG);
3774 val &= ~TX_CFG_DMA_EN;
3775 writel(val, cp->regs + REG_TX_CFG);
3776
3777 val = readl(cp->regs + REG_RX_CFG);
3778 val &= ~RX_CFG_DMA_EN;
3779 writel(val, cp->regs + REG_RX_CFG);
3780
3781
3782 if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) ||
3783 (&CAS_HP_ALT_FIRMWARE[0] == &cas_prog_null[0])) {
3784 cas_load_firmware(cp, CAS_HP_FIRMWARE);
3785 } else {
3786 cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE);
3787 }
3788
3789
3790 spin_lock(&cp->stat_lock[N_TX_RINGS]);
3791 cas_clear_mac_err(cp);
3792 spin_unlock(&cp->stat_lock[N_TX_RINGS]);
3793 }
3794
3795
3796 static void cas_shutdown(struct cas *cp)
3797 {
3798 unsigned long flags;
3799
3800
3801 cp->hw_running = 0;
3802
3803 del_timer_sync(&cp->link_timer);
3804
3805
3806 #if 0
3807 while (atomic_read(&cp->reset_task_pending_mtu) ||
3808 atomic_read(&cp->reset_task_pending_spare) ||
3809 atomic_read(&cp->reset_task_pending_all))
3810 schedule();
3811
3812 #else
3813 while (atomic_read(&cp->reset_task_pending))
3814 schedule();
3815 #endif
3816
3817 cas_lock_all_save(cp, flags);
3818 cas_reset(cp, 0);
3819 if (cp->cas_flags & CAS_FLAG_SATURN)
3820 cas_phy_powerdown(cp);
3821 cas_unlock_all_restore(cp, flags);
3822 }
3823
3824 static int cas_change_mtu(struct net_device *dev, int new_mtu)
3825 {
3826 struct cas *cp = netdev_priv(dev);
3827
3828 dev->mtu = new_mtu;
3829 if (!netif_running(dev) || !netif_device_present(dev))
3830 return 0;
3831
3832
3833 #if 1
3834 atomic_inc(&cp->reset_task_pending);
3835 if ((cp->phy_type & CAS_PHY_SERDES)) {
3836 atomic_inc(&cp->reset_task_pending_all);
3837 } else {
3838 atomic_inc(&cp->reset_task_pending_mtu);
3839 }
3840 schedule_work(&cp->reset_task);
3841 #else
3842 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3843 CAS_RESET_ALL : CAS_RESET_MTU);
3844 pr_err("reset called in cas_change_mtu\n");
3845 schedule_work(&cp->reset_task);
3846 #endif
3847
3848 flush_work(&cp->reset_task);
3849 return 0;
3850 }
3851
3852 static void cas_clean_txd(struct cas *cp, int ring)
3853 {
3854 struct cas_tx_desc *txd = cp->init_txds[ring];
3855 struct sk_buff *skb, **skbs = cp->tx_skbs[ring];
3856 u64 daddr, dlen;
3857 int i, size;
3858
3859 size = TX_DESC_RINGN_SIZE(ring);
3860 for (i = 0; i < size; i++) {
3861 int frag;
3862
3863 if (skbs[i] == NULL)
3864 continue;
3865
3866 skb = skbs[i];
3867 skbs[i] = NULL;
3868
3869 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
3870 int ent = i & (size - 1);
3871
3872
3873
3874
3875 daddr = le64_to_cpu(txd[ent].buffer);
3876 dlen = CAS_VAL(TX_DESC_BUFLEN,
3877 le64_to_cpu(txd[ent].control));
3878 dma_unmap_page(&cp->pdev->dev, daddr, dlen,
3879 DMA_TO_DEVICE);
3880
3881 if (frag != skb_shinfo(skb)->nr_frags) {
3882 i++;
3883
3884
3885
3886
3887 ent = i & (size - 1);
3888 if (cp->tx_tiny_use[ring][ent].used)
3889 i++;
3890 }
3891 }
3892 dev_kfree_skb_any(skb);
3893 }
3894
3895
3896 memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring]));
3897 }
3898
3899
3900 static inline void cas_free_rx_desc(struct cas *cp, int ring)
3901 {
3902 cas_page_t **page = cp->rx_pages[ring];
3903 int i, size;
3904
3905 size = RX_DESC_RINGN_SIZE(ring);
3906 for (i = 0; i < size; i++) {
3907 if (page[i]) {
3908 cas_page_free(cp, page[i]);
3909 page[i] = NULL;
3910 }
3911 }
3912 }
3913
3914 static void cas_free_rxds(struct cas *cp)
3915 {
3916 int i;
3917
3918 for (i = 0; i < N_RX_DESC_RINGS; i++)
3919 cas_free_rx_desc(cp, i);
3920 }
3921
3922
3923 static void cas_clean_rings(struct cas *cp)
3924 {
3925 int i;
3926
3927
3928 memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS);
3929 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS);
3930 for (i = 0; i < N_TX_RINGS; i++)
3931 cas_clean_txd(cp, i);
3932
3933
3934 memset(cp->init_block, 0, sizeof(struct cas_init_block));
3935 cas_clean_rxds(cp);
3936 cas_clean_rxcs(cp);
3937 }
3938
3939
3940 static inline int cas_alloc_rx_desc(struct cas *cp, int ring)
3941 {
3942 cas_page_t **page = cp->rx_pages[ring];
3943 int size, i = 0;
3944
3945 size = RX_DESC_RINGN_SIZE(ring);
3946 for (i = 0; i < size; i++) {
3947 if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL)
3948 return -1;
3949 }
3950 return 0;
3951 }
3952
3953 static int cas_alloc_rxds(struct cas *cp)
3954 {
3955 int i;
3956
3957 for (i = 0; i < N_RX_DESC_RINGS; i++) {
3958 if (cas_alloc_rx_desc(cp, i) < 0) {
3959 cas_free_rxds(cp);
3960 return -1;
3961 }
3962 }
3963 return 0;
3964 }
3965
3966 static void cas_reset_task(struct work_struct *work)
3967 {
3968 struct cas *cp = container_of(work, struct cas, reset_task);
3969 #if 0
3970 int pending = atomic_read(&cp->reset_task_pending);
3971 #else
3972 int pending_all = atomic_read(&cp->reset_task_pending_all);
3973 int pending_spare = atomic_read(&cp->reset_task_pending_spare);
3974 int pending_mtu = atomic_read(&cp->reset_task_pending_mtu);
3975
3976 if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) {
3977
3978
3979
3980 atomic_dec(&cp->reset_task_pending);
3981 return;
3982 }
3983 #endif
3984
3985
3986
3987
3988 if (cp->hw_running) {
3989 unsigned long flags;
3990
3991
3992 netif_device_detach(cp->dev);
3993 cas_lock_all_save(cp, flags);
3994
3995 if (cp->opened) {
3996
3997
3998
3999
4000 cas_spare_recover(cp, GFP_ATOMIC);
4001 }
4002 #if 1
4003
4004 if (!pending_all && !pending_mtu)
4005 goto done;
4006 #else
4007 if (pending == CAS_RESET_SPARE)
4008 goto done;
4009 #endif
4010
4011
4012
4013
4014
4015
4016
4017 #if 1
4018 cas_reset(cp, !(pending_all > 0));
4019 if (cp->opened)
4020 cas_clean_rings(cp);
4021 cas_init_hw(cp, (pending_all > 0));
4022 #else
4023 cas_reset(cp, !(pending == CAS_RESET_ALL));
4024 if (cp->opened)
4025 cas_clean_rings(cp);
4026 cas_init_hw(cp, pending == CAS_RESET_ALL);
4027 #endif
4028
4029 done:
4030 cas_unlock_all_restore(cp, flags);
4031 netif_device_attach(cp->dev);
4032 }
4033 #if 1
4034 atomic_sub(pending_all, &cp->reset_task_pending_all);
4035 atomic_sub(pending_spare, &cp->reset_task_pending_spare);
4036 atomic_sub(pending_mtu, &cp->reset_task_pending_mtu);
4037 atomic_dec(&cp->reset_task_pending);
4038 #else
4039 atomic_set(&cp->reset_task_pending, 0);
4040 #endif
4041 }
4042
4043 static void cas_link_timer(struct timer_list *t)
4044 {
4045 struct cas *cp = from_timer(cp, t, link_timer);
4046 int mask, pending = 0, reset = 0;
4047 unsigned long flags;
4048
4049 if (link_transition_timeout != 0 &&
4050 cp->link_transition_jiffies_valid &&
4051 time_is_before_jiffies(cp->link_transition_jiffies +
4052 link_transition_timeout)) {
4053
4054
4055
4056
4057 cp->link_transition_jiffies_valid = 0;
4058 }
4059
4060 if (!cp->hw_running)
4061 return;
4062
4063 spin_lock_irqsave(&cp->lock, flags);
4064 cas_lock_tx(cp);
4065 cas_entropy_gather(cp);
4066
4067
4068
4069
4070 #if 1
4071 if (atomic_read(&cp->reset_task_pending_all) ||
4072 atomic_read(&cp->reset_task_pending_spare) ||
4073 atomic_read(&cp->reset_task_pending_mtu))
4074 goto done;
4075 #else
4076 if (atomic_read(&cp->reset_task_pending))
4077 goto done;
4078 #endif
4079
4080
4081 if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) {
4082 int i, rmask;
4083
4084 for (i = 0; i < MAX_RX_DESC_RINGS; i++) {
4085 rmask = CAS_FLAG_RXD_POST(i);
4086 if ((mask & rmask) == 0)
4087 continue;
4088
4089
4090 if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) {
4091 pending = 1;
4092 continue;
4093 }
4094 cp->cas_flags &= ~rmask;
4095 }
4096 }
4097
4098 if (CAS_PHY_MII(cp->phy_type)) {
4099 u16 bmsr;
4100 cas_mif_poll(cp, 0);
4101 bmsr = cas_phy_read(cp, MII_BMSR);
4102
4103
4104
4105
4106
4107 bmsr = cas_phy_read(cp, MII_BMSR);
4108 cas_mif_poll(cp, 1);
4109 readl(cp->regs + REG_MIF_STATUS);
4110 reset = cas_mii_link_check(cp, bmsr);
4111 } else {
4112 reset = cas_pcs_link_check(cp);
4113 }
4114
4115 if (reset)
4116 goto done;
4117
4118
4119 if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) {
4120 u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE);
4121 u32 wptr, rptr;
4122 int tlm = CAS_VAL(MAC_SM_TLM, val);
4123
4124 if (((tlm == 0x5) || (tlm == 0x3)) &&
4125 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4126 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4127 "tx err: MAC_STATE[%08x]\n", val);
4128 reset = 1;
4129 goto done;
4130 }
4131
4132 val = readl(cp->regs + REG_TX_FIFO_PKT_CNT);
4133 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4134 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4135 if ((val == 0) && (wptr != rptr)) {
4136 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4137 "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4138 val, wptr, rptr);
4139 reset = 1;
4140 }
4141
4142 if (reset)
4143 cas_hard_reset(cp);
4144 }
4145
4146 done:
4147 if (reset) {
4148 #if 1
4149 atomic_inc(&cp->reset_task_pending);
4150 atomic_inc(&cp->reset_task_pending_all);
4151 schedule_work(&cp->reset_task);
4152 #else
4153 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4154 pr_err("reset called in cas_link_timer\n");
4155 schedule_work(&cp->reset_task);
4156 #endif
4157 }
4158
4159 if (!pending)
4160 mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
4161 cas_unlock_tx(cp);
4162 spin_unlock_irqrestore(&cp->lock, flags);
4163 }
4164
4165
4166
4167
4168 static void cas_tx_tiny_free(struct cas *cp)
4169 {
4170 struct pci_dev *pdev = cp->pdev;
4171 int i;
4172
4173 for (i = 0; i < N_TX_RINGS; i++) {
4174 if (!cp->tx_tiny_bufs[i])
4175 continue;
4176
4177 dma_free_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
4178 cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]);
4179 cp->tx_tiny_bufs[i] = NULL;
4180 }
4181 }
4182
4183 static int cas_tx_tiny_alloc(struct cas *cp)
4184 {
4185 struct pci_dev *pdev = cp->pdev;
4186 int i;
4187
4188 for (i = 0; i < N_TX_RINGS; i++) {
4189 cp->tx_tiny_bufs[i] =
4190 dma_alloc_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
4191 &cp->tx_tiny_dvma[i], GFP_KERNEL);
4192 if (!cp->tx_tiny_bufs[i]) {
4193 cas_tx_tiny_free(cp);
4194 return -1;
4195 }
4196 }
4197 return 0;
4198 }
4199
4200
4201 static int cas_open(struct net_device *dev)
4202 {
4203 struct cas *cp = netdev_priv(dev);
4204 int hw_was_up, err;
4205 unsigned long flags;
4206
4207 mutex_lock(&cp->pm_mutex);
4208
4209 hw_was_up = cp->hw_running;
4210
4211
4212
4213
4214 if (!cp->hw_running) {
4215
4216 cas_lock_all_save(cp, flags);
4217
4218
4219
4220
4221
4222 cas_reset(cp, 0);
4223 cp->hw_running = 1;
4224 cas_unlock_all_restore(cp, flags);
4225 }
4226
4227 err = -ENOMEM;
4228 if (cas_tx_tiny_alloc(cp) < 0)
4229 goto err_unlock;
4230
4231
4232 if (cas_alloc_rxds(cp) < 0)
4233 goto err_tx_tiny;
4234
4235
4236 cas_spare_init(cp);
4237 cas_spare_recover(cp, GFP_KERNEL);
4238
4239
4240
4241
4242
4243
4244 if (request_irq(cp->pdev->irq, cas_interrupt,
4245 IRQF_SHARED, dev->name, (void *) dev)) {
4246 netdev_err(cp->dev, "failed to request irq !\n");
4247 err = -EAGAIN;
4248 goto err_spare;
4249 }
4250
4251 #ifdef USE_NAPI
4252 napi_enable(&cp->napi);
4253 #endif
4254
4255 cas_lock_all_save(cp, flags);
4256 cas_clean_rings(cp);
4257 cas_init_hw(cp, !hw_was_up);
4258 cp->opened = 1;
4259 cas_unlock_all_restore(cp, flags);
4260
4261 netif_start_queue(dev);
4262 mutex_unlock(&cp->pm_mutex);
4263 return 0;
4264
4265 err_spare:
4266 cas_spare_free(cp);
4267 cas_free_rxds(cp);
4268 err_tx_tiny:
4269 cas_tx_tiny_free(cp);
4270 err_unlock:
4271 mutex_unlock(&cp->pm_mutex);
4272 return err;
4273 }
4274
4275 static int cas_close(struct net_device *dev)
4276 {
4277 unsigned long flags;
4278 struct cas *cp = netdev_priv(dev);
4279
4280 #ifdef USE_NAPI
4281 napi_disable(&cp->napi);
4282 #endif
4283
4284 mutex_lock(&cp->pm_mutex);
4285
4286 netif_stop_queue(dev);
4287
4288
4289 cas_lock_all_save(cp, flags);
4290 cp->opened = 0;
4291 cas_reset(cp, 0);
4292 cas_phy_init(cp);
4293 cas_begin_auto_negotiation(cp, NULL);
4294 cas_clean_rings(cp);
4295 cas_unlock_all_restore(cp, flags);
4296
4297 free_irq(cp->pdev->irq, (void *) dev);
4298 cas_spare_free(cp);
4299 cas_free_rxds(cp);
4300 cas_tx_tiny_free(cp);
4301 mutex_unlock(&cp->pm_mutex);
4302 return 0;
4303 }
4304
4305 static struct {
4306 const char name[ETH_GSTRING_LEN];
4307 } ethtool_cassini_statnames[] = {
4308 {"collisions"},
4309 {"rx_bytes"},
4310 {"rx_crc_errors"},
4311 {"rx_dropped"},
4312 {"rx_errors"},
4313 {"rx_fifo_errors"},
4314 {"rx_frame_errors"},
4315 {"rx_length_errors"},
4316 {"rx_over_errors"},
4317 {"rx_packets"},
4318 {"tx_aborted_errors"},
4319 {"tx_bytes"},
4320 {"tx_dropped"},
4321 {"tx_errors"},
4322 {"tx_fifo_errors"},
4323 {"tx_packets"}
4324 };
4325 #define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames)
4326
4327 static struct {
4328 const int offsets;
4329 } ethtool_register_table[] = {
4330 {-MII_BMSR},
4331 {-MII_BMCR},
4332 {REG_CAWR},
4333 {REG_INF_BURST},
4334 {REG_BIM_CFG},
4335 {REG_RX_CFG},
4336 {REG_HP_CFG},
4337 {REG_MAC_TX_CFG},
4338 {REG_MAC_RX_CFG},
4339 {REG_MAC_CTRL_CFG},
4340 {REG_MAC_XIF_CFG},
4341 {REG_MIF_CFG},
4342 {REG_PCS_CFG},
4343 {REG_SATURN_PCFG},
4344 {REG_PCS_MII_STATUS},
4345 {REG_PCS_STATE_MACHINE},
4346 {REG_MAC_COLL_EXCESS},
4347 {REG_MAC_COLL_LATE}
4348 };
4349 #define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table)
4350 #define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN)
4351
4352 static void cas_read_regs(struct cas *cp, u8 *ptr, int len)
4353 {
4354 u8 *p;
4355 int i;
4356 unsigned long flags;
4357
4358 spin_lock_irqsave(&cp->lock, flags);
4359 for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) {
4360 u16 hval;
4361 u32 val;
4362 if (ethtool_register_table[i].offsets < 0) {
4363 hval = cas_phy_read(cp,
4364 -ethtool_register_table[i].offsets);
4365 val = hval;
4366 } else {
4367 val= readl(cp->regs+ethtool_register_table[i].offsets);
4368 }
4369 memcpy(p, (u8 *)&val, sizeof(u32));
4370 }
4371 spin_unlock_irqrestore(&cp->lock, flags);
4372 }
4373
4374 static struct net_device_stats *cas_get_stats(struct net_device *dev)
4375 {
4376 struct cas *cp = netdev_priv(dev);
4377 struct net_device_stats *stats = cp->net_stats;
4378 unsigned long flags;
4379 int i;
4380 unsigned long tmp;
4381
4382
4383 if (!cp->hw_running)
4384 return stats + N_TX_RINGS;
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394 spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags);
4395 stats[N_TX_RINGS].rx_crc_errors +=
4396 readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff;
4397 stats[N_TX_RINGS].rx_frame_errors +=
4398 readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff;
4399 stats[N_TX_RINGS].rx_length_errors +=
4400 readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff;
4401 #if 1
4402 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) +
4403 (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff);
4404 stats[N_TX_RINGS].tx_aborted_errors += tmp;
4405 stats[N_TX_RINGS].collisions +=
4406 tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff);
4407 #else
4408 stats[N_TX_RINGS].tx_aborted_errors +=
4409 readl(cp->regs + REG_MAC_COLL_EXCESS);
4410 stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) +
4411 readl(cp->regs + REG_MAC_COLL_LATE);
4412 #endif
4413 cas_clear_mac_err(cp);
4414
4415
4416 spin_lock(&cp->stat_lock[0]);
4417 stats[N_TX_RINGS].collisions += stats[0].collisions;
4418 stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors;
4419 stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors;
4420 stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors;
4421 stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors;
4422 stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors;
4423 spin_unlock(&cp->stat_lock[0]);
4424
4425 for (i = 0; i < N_TX_RINGS; i++) {
4426 spin_lock(&cp->stat_lock[i]);
4427 stats[N_TX_RINGS].rx_length_errors +=
4428 stats[i].rx_length_errors;
4429 stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors;
4430 stats[N_TX_RINGS].rx_packets += stats[i].rx_packets;
4431 stats[N_TX_RINGS].tx_packets += stats[i].tx_packets;
4432 stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes;
4433 stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes;
4434 stats[N_TX_RINGS].rx_errors += stats[i].rx_errors;
4435 stats[N_TX_RINGS].tx_errors += stats[i].tx_errors;
4436 stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped;
4437 stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped;
4438 memset(stats + i, 0, sizeof(struct net_device_stats));
4439 spin_unlock(&cp->stat_lock[i]);
4440 }
4441 spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags);
4442 return stats + N_TX_RINGS;
4443 }
4444
4445
4446 static void cas_set_multicast(struct net_device *dev)
4447 {
4448 struct cas *cp = netdev_priv(dev);
4449 u32 rxcfg, rxcfg_new;
4450 unsigned long flags;
4451 int limit = STOP_TRIES;
4452
4453 if (!cp->hw_running)
4454 return;
4455
4456 spin_lock_irqsave(&cp->lock, flags);
4457 rxcfg = readl(cp->regs + REG_MAC_RX_CFG);
4458
4459
4460 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4461 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) {
4462 if (!limit--)
4463 break;
4464 udelay(10);
4465 }
4466
4467
4468 limit = STOP_TRIES;
4469 rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN);
4470 writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
4471 while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) {
4472 if (!limit--)
4473 break;
4474 udelay(10);
4475 }
4476
4477
4478 cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp);
4479 rxcfg |= rxcfg_new;
4480 writel(rxcfg, cp->regs + REG_MAC_RX_CFG);
4481 spin_unlock_irqrestore(&cp->lock, flags);
4482 }
4483
4484 static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4485 {
4486 struct cas *cp = netdev_priv(dev);
4487 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
4488 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
4489 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
4490 }
4491
4492 static int cas_get_link_ksettings(struct net_device *dev,
4493 struct ethtool_link_ksettings *cmd)
4494 {
4495 struct cas *cp = netdev_priv(dev);
4496 u16 bmcr;
4497 int full_duplex, speed, pause;
4498 unsigned long flags;
4499 enum link_state linkstate = link_up;
4500 u32 supported, advertising;
4501
4502 advertising = 0;
4503 supported = SUPPORTED_Autoneg;
4504 if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
4505 supported |= SUPPORTED_1000baseT_Full;
4506 advertising |= ADVERTISED_1000baseT_Full;
4507 }
4508
4509
4510 spin_lock_irqsave(&cp->lock, flags);
4511 bmcr = 0;
4512 linkstate = cp->lstate;
4513 if (CAS_PHY_MII(cp->phy_type)) {
4514 cmd->base.port = PORT_MII;
4515 cmd->base.phy_address = cp->phy_addr;
4516 advertising |= ADVERTISED_TP | ADVERTISED_MII |
4517 ADVERTISED_10baseT_Half |
4518 ADVERTISED_10baseT_Full |
4519 ADVERTISED_100baseT_Half |
4520 ADVERTISED_100baseT_Full;
4521
4522 supported |=
4523 (SUPPORTED_10baseT_Half |
4524 SUPPORTED_10baseT_Full |
4525 SUPPORTED_100baseT_Half |
4526 SUPPORTED_100baseT_Full |
4527 SUPPORTED_TP | SUPPORTED_MII);
4528
4529 if (cp->hw_running) {
4530 cas_mif_poll(cp, 0);
4531 bmcr = cas_phy_read(cp, MII_BMCR);
4532 cas_read_mii_link_mode(cp, &full_duplex,
4533 &speed, &pause);
4534 cas_mif_poll(cp, 1);
4535 }
4536
4537 } else {
4538 cmd->base.port = PORT_FIBRE;
4539 cmd->base.phy_address = 0;
4540 supported |= SUPPORTED_FIBRE;
4541 advertising |= ADVERTISED_FIBRE;
4542
4543 if (cp->hw_running) {
4544
4545 bmcr = readl(cp->regs + REG_PCS_MII_CTRL);
4546 cas_read_pcs_link_mode(cp, &full_duplex,
4547 &speed, &pause);
4548 }
4549 }
4550 spin_unlock_irqrestore(&cp->lock, flags);
4551
4552 if (bmcr & BMCR_ANENABLE) {
4553 advertising |= ADVERTISED_Autoneg;
4554 cmd->base.autoneg = AUTONEG_ENABLE;
4555 cmd->base.speed = ((speed == 10) ?
4556 SPEED_10 :
4557 ((speed == 1000) ?
4558 SPEED_1000 : SPEED_100));
4559 cmd->base.duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
4560 } else {
4561 cmd->base.autoneg = AUTONEG_DISABLE;
4562 cmd->base.speed = ((bmcr & CAS_BMCR_SPEED1000) ?
4563 SPEED_1000 :
4564 ((bmcr & BMCR_SPEED100) ?
4565 SPEED_100 : SPEED_10));
4566 cmd->base.duplex = (bmcr & BMCR_FULLDPLX) ?
4567 DUPLEX_FULL : DUPLEX_HALF;
4568 }
4569 if (linkstate != link_up) {
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580 if (cp->link_cntl & BMCR_ANENABLE) {
4581 cmd->base.speed = 0;
4582 cmd->base.duplex = 0xff;
4583 } else {
4584 cmd->base.speed = SPEED_10;
4585 if (cp->link_cntl & BMCR_SPEED100) {
4586 cmd->base.speed = SPEED_100;
4587 } else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
4588 cmd->base.speed = SPEED_1000;
4589 }
4590 cmd->base.duplex = (cp->link_cntl & BMCR_FULLDPLX) ?
4591 DUPLEX_FULL : DUPLEX_HALF;
4592 }
4593 }
4594
4595 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
4596 supported);
4597 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
4598 advertising);
4599
4600 return 0;
4601 }
4602
4603 static int cas_set_link_ksettings(struct net_device *dev,
4604 const struct ethtool_link_ksettings *cmd)
4605 {
4606 struct cas *cp = netdev_priv(dev);
4607 unsigned long flags;
4608 u32 speed = cmd->base.speed;
4609
4610
4611 if (cmd->base.autoneg != AUTONEG_ENABLE &&
4612 cmd->base.autoneg != AUTONEG_DISABLE)
4613 return -EINVAL;
4614
4615 if (cmd->base.autoneg == AUTONEG_DISABLE &&
4616 ((speed != SPEED_1000 &&
4617 speed != SPEED_100 &&
4618 speed != SPEED_10) ||
4619 (cmd->base.duplex != DUPLEX_HALF &&
4620 cmd->base.duplex != DUPLEX_FULL)))
4621 return -EINVAL;
4622
4623
4624 spin_lock_irqsave(&cp->lock, flags);
4625 cas_begin_auto_negotiation(cp, cmd);
4626 spin_unlock_irqrestore(&cp->lock, flags);
4627 return 0;
4628 }
4629
4630 static int cas_nway_reset(struct net_device *dev)
4631 {
4632 struct cas *cp = netdev_priv(dev);
4633 unsigned long flags;
4634
4635 if ((cp->link_cntl & BMCR_ANENABLE) == 0)
4636 return -EINVAL;
4637
4638
4639 spin_lock_irqsave(&cp->lock, flags);
4640 cas_begin_auto_negotiation(cp, NULL);
4641 spin_unlock_irqrestore(&cp->lock, flags);
4642
4643 return 0;
4644 }
4645
4646 static u32 cas_get_link(struct net_device *dev)
4647 {
4648 struct cas *cp = netdev_priv(dev);
4649 return cp->lstate == link_up;
4650 }
4651
4652 static u32 cas_get_msglevel(struct net_device *dev)
4653 {
4654 struct cas *cp = netdev_priv(dev);
4655 return cp->msg_enable;
4656 }
4657
4658 static void cas_set_msglevel(struct net_device *dev, u32 value)
4659 {
4660 struct cas *cp = netdev_priv(dev);
4661 cp->msg_enable = value;
4662 }
4663
4664 static int cas_get_regs_len(struct net_device *dev)
4665 {
4666 struct cas *cp = netdev_priv(dev);
4667 return min_t(int, cp->casreg_len, CAS_MAX_REGS);
4668 }
4669
4670 static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4671 void *p)
4672 {
4673 struct cas *cp = netdev_priv(dev);
4674 regs->version = 0;
4675
4676 cas_read_regs(cp, p, regs->len / sizeof(u32));
4677 }
4678
4679 static int cas_get_sset_count(struct net_device *dev, int sset)
4680 {
4681 switch (sset) {
4682 case ETH_SS_STATS:
4683 return CAS_NUM_STAT_KEYS;
4684 default:
4685 return -EOPNOTSUPP;
4686 }
4687 }
4688
4689 static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data)
4690 {
4691 memcpy(data, ðtool_cassini_statnames,
4692 CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN);
4693 }
4694
4695 static void cas_get_ethtool_stats(struct net_device *dev,
4696 struct ethtool_stats *estats, u64 *data)
4697 {
4698 struct cas *cp = netdev_priv(dev);
4699 struct net_device_stats *stats = cas_get_stats(cp->dev);
4700 int i = 0;
4701 data[i++] = stats->collisions;
4702 data[i++] = stats->rx_bytes;
4703 data[i++] = stats->rx_crc_errors;
4704 data[i++] = stats->rx_dropped;
4705 data[i++] = stats->rx_errors;
4706 data[i++] = stats->rx_fifo_errors;
4707 data[i++] = stats->rx_frame_errors;
4708 data[i++] = stats->rx_length_errors;
4709 data[i++] = stats->rx_over_errors;
4710 data[i++] = stats->rx_packets;
4711 data[i++] = stats->tx_aborted_errors;
4712 data[i++] = stats->tx_bytes;
4713 data[i++] = stats->tx_dropped;
4714 data[i++] = stats->tx_errors;
4715 data[i++] = stats->tx_fifo_errors;
4716 data[i++] = stats->tx_packets;
4717 BUG_ON(i != CAS_NUM_STAT_KEYS);
4718 }
4719
4720 static const struct ethtool_ops cas_ethtool_ops = {
4721 .get_drvinfo = cas_get_drvinfo,
4722 .nway_reset = cas_nway_reset,
4723 .get_link = cas_get_link,
4724 .get_msglevel = cas_get_msglevel,
4725 .set_msglevel = cas_set_msglevel,
4726 .get_regs_len = cas_get_regs_len,
4727 .get_regs = cas_get_regs,
4728 .get_sset_count = cas_get_sset_count,
4729 .get_strings = cas_get_strings,
4730 .get_ethtool_stats = cas_get_ethtool_stats,
4731 .get_link_ksettings = cas_get_link_ksettings,
4732 .set_link_ksettings = cas_set_link_ksettings,
4733 };
4734
4735 static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4736 {
4737 struct cas *cp = netdev_priv(dev);
4738 struct mii_ioctl_data *data = if_mii(ifr);
4739 unsigned long flags;
4740 int rc = -EOPNOTSUPP;
4741
4742
4743
4744
4745 mutex_lock(&cp->pm_mutex);
4746 switch (cmd) {
4747 case SIOCGMIIPHY:
4748 data->phy_id = cp->phy_addr;
4749 fallthrough;
4750
4751 case SIOCGMIIREG:
4752 spin_lock_irqsave(&cp->lock, flags);
4753 cas_mif_poll(cp, 0);
4754 data->val_out = cas_phy_read(cp, data->reg_num & 0x1f);
4755 cas_mif_poll(cp, 1);
4756 spin_unlock_irqrestore(&cp->lock, flags);
4757 rc = 0;
4758 break;
4759
4760 case SIOCSMIIREG:
4761 spin_lock_irqsave(&cp->lock, flags);
4762 cas_mif_poll(cp, 0);
4763 rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in);
4764 cas_mif_poll(cp, 1);
4765 spin_unlock_irqrestore(&cp->lock, flags);
4766 break;
4767 default:
4768 break;
4769 }
4770
4771 mutex_unlock(&cp->pm_mutex);
4772 return rc;
4773 }
4774
4775
4776
4777
4778
4779 static void cas_program_bridge(struct pci_dev *cas_pdev)
4780 {
4781 struct pci_dev *pdev = cas_pdev->bus->self;
4782 u32 val;
4783
4784 if (!pdev)
4785 return;
4786
4787 if (pdev->vendor != 0x8086 || pdev->device != 0x537c)
4788 return;
4789
4790
4791
4792
4793
4794
4795 pci_read_config_dword(pdev, 0x40, &val);
4796 val &= ~0x00040000;
4797 pci_write_config_dword(pdev, 0x40, val);
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821 pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff);
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843 pci_write_config_word(pdev, 0x52,
4844 (0x7 << 13) |
4845 (0x7 << 10) |
4846 (0x7 << 7) |
4847 (0x7 << 4) |
4848 (0xf << 0));
4849
4850
4851 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4852
4853
4854
4855
4856 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff);
4857 }
4858
4859 static const struct net_device_ops cas_netdev_ops = {
4860 .ndo_open = cas_open,
4861 .ndo_stop = cas_close,
4862 .ndo_start_xmit = cas_start_xmit,
4863 .ndo_get_stats = cas_get_stats,
4864 .ndo_set_rx_mode = cas_set_multicast,
4865 .ndo_eth_ioctl = cas_ioctl,
4866 .ndo_tx_timeout = cas_tx_timeout,
4867 .ndo_change_mtu = cas_change_mtu,
4868 .ndo_set_mac_address = eth_mac_addr,
4869 .ndo_validate_addr = eth_validate_addr,
4870 #ifdef CONFIG_NET_POLL_CONTROLLER
4871 .ndo_poll_controller = cas_netpoll,
4872 #endif
4873 };
4874
4875 static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4876 {
4877 static int cas_version_printed = 0;
4878 unsigned long casreg_len;
4879 struct net_device *dev;
4880 struct cas *cp;
4881 u16 pci_cmd;
4882 int i, err;
4883 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4884
4885 if (cas_version_printed++ == 0)
4886 pr_info("%s", version);
4887
4888 err = pci_enable_device(pdev);
4889 if (err) {
4890 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
4891 return err;
4892 }
4893
4894 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
4895 dev_err(&pdev->dev, "Cannot find proper PCI device "
4896 "base address, aborting\n");
4897 err = -ENODEV;
4898 goto err_out_disable_pdev;
4899 }
4900
4901 dev = alloc_etherdev(sizeof(*cp));
4902 if (!dev) {
4903 err = -ENOMEM;
4904 goto err_out_disable_pdev;
4905 }
4906 SET_NETDEV_DEV(dev, &pdev->dev);
4907
4908 err = pci_request_regions(pdev, dev->name);
4909 if (err) {
4910 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
4911 goto err_out_free_netdev;
4912 }
4913 pci_set_master(pdev);
4914
4915
4916
4917
4918
4919 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
4920 pci_cmd &= ~PCI_COMMAND_SERR;
4921 pci_cmd |= PCI_COMMAND_PARITY;
4922 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
4923 if (pci_try_set_mwi(pdev))
4924 pr_warn("Could not enable MWI for %s\n", pci_name(pdev));
4925
4926 cas_program_bridge(pdev);
4927
4928
4929
4930
4931
4932
4933
4934 #if 1
4935 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE,
4936 &orig_cacheline_size);
4937 if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) {
4938 cas_cacheline_size =
4939 (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ?
4940 CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES;
4941 if (pci_write_config_byte(pdev,
4942 PCI_CACHE_LINE_SIZE,
4943 cas_cacheline_size)) {
4944 dev_err(&pdev->dev, "Could not set PCI cache "
4945 "line size\n");
4946 goto err_out_free_res;
4947 }
4948 }
4949 #endif
4950
4951
4952
4953 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4954 if (err) {
4955 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4956 goto err_out_free_res;
4957 }
4958
4959 casreg_len = pci_resource_len(pdev, 0);
4960
4961 cp = netdev_priv(dev);
4962 cp->pdev = pdev;
4963 #if 1
4964
4965 cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0;
4966 #endif
4967 cp->dev = dev;
4968 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
4969 cassini_debug;
4970
4971 #if defined(CONFIG_SPARC)
4972 cp->of_node = pci_device_to_OF_node(pdev);
4973 #endif
4974
4975 cp->link_transition = LINK_TRANSITION_UNKNOWN;
4976 cp->link_transition_jiffies_valid = 0;
4977
4978 spin_lock_init(&cp->lock);
4979 spin_lock_init(&cp->rx_inuse_lock);
4980 spin_lock_init(&cp->rx_spare_lock);
4981 for (i = 0; i < N_TX_RINGS; i++) {
4982 spin_lock_init(&cp->stat_lock[i]);
4983 spin_lock_init(&cp->tx_lock[i]);
4984 }
4985 spin_lock_init(&cp->stat_lock[N_TX_RINGS]);
4986 mutex_init(&cp->pm_mutex);
4987
4988 timer_setup(&cp->link_timer, cas_link_timer, 0);
4989
4990 #if 1
4991
4992
4993
4994 atomic_set(&cp->reset_task_pending, 0);
4995 atomic_set(&cp->reset_task_pending_all, 0);
4996 atomic_set(&cp->reset_task_pending_spare, 0);
4997 atomic_set(&cp->reset_task_pending_mtu, 0);
4998 #endif
4999 INIT_WORK(&cp->reset_task, cas_reset_task);
5000
5001
5002 if (link_mode >= 0 && link_mode < 6)
5003 cp->link_cntl = link_modes[link_mode];
5004 else
5005 cp->link_cntl = BMCR_ANENABLE;
5006 cp->lstate = link_down;
5007 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
5008 netif_carrier_off(cp->dev);
5009 cp->timer_ticks = 0;
5010
5011
5012 cp->regs = pci_iomap(pdev, 0, casreg_len);
5013 if (!cp->regs) {
5014 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5015 goto err_out_free_res;
5016 }
5017 cp->casreg_len = casreg_len;
5018
5019 pci_save_state(pdev);
5020 cas_check_pci_invariants(cp);
5021 cas_hard_reset(cp);
5022 cas_reset(cp, 0);
5023 if (cas_check_invariants(cp))
5024 goto err_out_iounmap;
5025 if (cp->cas_flags & CAS_FLAG_SATURN)
5026 cas_saturn_firmware_init(cp);
5027
5028 cp->init_block =
5029 dma_alloc_coherent(&pdev->dev, sizeof(struct cas_init_block),
5030 &cp->block_dvma, GFP_KERNEL);
5031 if (!cp->init_block) {
5032 dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5033 goto err_out_iounmap;
5034 }
5035
5036 for (i = 0; i < N_TX_RINGS; i++)
5037 cp->init_txds[i] = cp->init_block->txds[i];
5038
5039 for (i = 0; i < N_RX_DESC_RINGS; i++)
5040 cp->init_rxds[i] = cp->init_block->rxds[i];
5041
5042 for (i = 0; i < N_RX_COMP_RINGS; i++)
5043 cp->init_rxcs[i] = cp->init_block->rxcs[i];
5044
5045 for (i = 0; i < N_RX_FLOWS; i++)
5046 skb_queue_head_init(&cp->rx_flows[i]);
5047
5048 dev->netdev_ops = &cas_netdev_ops;
5049 dev->ethtool_ops = &cas_ethtool_ops;
5050 dev->watchdog_timeo = CAS_TX_TIMEOUT;
5051
5052 #ifdef USE_NAPI
5053 netif_napi_add(dev, &cp->napi, cas_poll, 64);
5054 #endif
5055 dev->irq = pdev->irq;
5056 dev->dma = 0;
5057
5058
5059 if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0)
5060 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
5061
5062 dev->features |= NETIF_F_HIGHDMA;
5063
5064
5065 dev->min_mtu = CAS_MIN_MTU;
5066 dev->max_mtu = CAS_MAX_MTU;
5067
5068 if (register_netdev(dev)) {
5069 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5070 goto err_out_free_consistent;
5071 }
5072
5073 i = readl(cp->regs + REG_BIM_CFG);
5074 netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5075 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5076 (i & BIM_CFG_32BIT) ? "32" : "64",
5077 (i & BIM_CFG_66MHZ) ? "66" : "33",
5078 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5079 dev->dev_addr);
5080
5081 pci_set_drvdata(pdev, dev);
5082 cp->hw_running = 1;
5083 cas_entropy_reset(cp);
5084 cas_phy_init(cp);
5085 cas_begin_auto_negotiation(cp, NULL);
5086 return 0;
5087
5088 err_out_free_consistent:
5089 dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
5090 cp->init_block, cp->block_dvma);
5091
5092 err_out_iounmap:
5093 mutex_lock(&cp->pm_mutex);
5094 if (cp->hw_running)
5095 cas_shutdown(cp);
5096 mutex_unlock(&cp->pm_mutex);
5097
5098 pci_iounmap(pdev, cp->regs);
5099
5100
5101 err_out_free_res:
5102 pci_release_regions(pdev);
5103
5104
5105
5106
5107 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
5108
5109 err_out_free_netdev:
5110 free_netdev(dev);
5111
5112 err_out_disable_pdev:
5113 pci_disable_device(pdev);
5114 return -ENODEV;
5115 }
5116
5117 static void cas_remove_one(struct pci_dev *pdev)
5118 {
5119 struct net_device *dev = pci_get_drvdata(pdev);
5120 struct cas *cp;
5121 if (!dev)
5122 return;
5123
5124 cp = netdev_priv(dev);
5125 unregister_netdev(dev);
5126
5127 vfree(cp->fw_data);
5128
5129 mutex_lock(&cp->pm_mutex);
5130 cancel_work_sync(&cp->reset_task);
5131 if (cp->hw_running)
5132 cas_shutdown(cp);
5133 mutex_unlock(&cp->pm_mutex);
5134
5135 #if 1
5136 if (cp->orig_cacheline_size) {
5137
5138
5139
5140 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
5141 cp->orig_cacheline_size);
5142 }
5143 #endif
5144 dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
5145 cp->init_block, cp->block_dvma);
5146 pci_iounmap(pdev, cp->regs);
5147 free_netdev(dev);
5148 pci_release_regions(pdev);
5149 pci_disable_device(pdev);
5150 }
5151
5152 static int __maybe_unused cas_suspend(struct device *dev_d)
5153 {
5154 struct net_device *dev = dev_get_drvdata(dev_d);
5155 struct cas *cp = netdev_priv(dev);
5156 unsigned long flags;
5157
5158 mutex_lock(&cp->pm_mutex);
5159
5160
5161 if (cp->opened) {
5162 netif_device_detach(dev);
5163
5164 cas_lock_all_save(cp, flags);
5165
5166
5167
5168
5169
5170
5171 cas_reset(cp, 0);
5172 cas_clean_rings(cp);
5173 cas_unlock_all_restore(cp, flags);
5174 }
5175
5176 if (cp->hw_running)
5177 cas_shutdown(cp);
5178 mutex_unlock(&cp->pm_mutex);
5179
5180 return 0;
5181 }
5182
5183 static int __maybe_unused cas_resume(struct device *dev_d)
5184 {
5185 struct net_device *dev = dev_get_drvdata(dev_d);
5186 struct cas *cp = netdev_priv(dev);
5187
5188 netdev_info(dev, "resuming\n");
5189
5190 mutex_lock(&cp->pm_mutex);
5191 cas_hard_reset(cp);
5192 if (cp->opened) {
5193 unsigned long flags;
5194 cas_lock_all_save(cp, flags);
5195 cas_reset(cp, 0);
5196 cp->hw_running = 1;
5197 cas_clean_rings(cp);
5198 cas_init_hw(cp, 1);
5199 cas_unlock_all_restore(cp, flags);
5200
5201 netif_device_attach(dev);
5202 }
5203 mutex_unlock(&cp->pm_mutex);
5204 return 0;
5205 }
5206
5207 static SIMPLE_DEV_PM_OPS(cas_pm_ops, cas_suspend, cas_resume);
5208
5209 static struct pci_driver cas_driver = {
5210 .name = DRV_MODULE_NAME,
5211 .id_table = cas_pci_tbl,
5212 .probe = cas_init_one,
5213 .remove = cas_remove_one,
5214 .driver.pm = &cas_pm_ops,
5215 };
5216
5217 static int __init cas_init(void)
5218 {
5219 if (linkdown_timeout > 0)
5220 link_transition_timeout = linkdown_timeout * HZ;
5221 else
5222 link_transition_timeout = 0;
5223
5224 return pci_register_driver(&cas_driver);
5225 }
5226
5227 static void __exit cas_cleanup(void)
5228 {
5229 pci_unregister_driver(&cas_driver);
5230 }
5231
5232 module_init(cas_init);
5233 module_exit(cas_cleanup);