0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0036
0037 #include <linux/module.h>
0038 #include <linux/types.h>
0039 #include <linux/bitops.h>
0040 #include <linux/init.h>
0041 #include <linux/dma-mapping.h>
0042 #include <linux/mm.h>
0043 #include <linux/errno.h>
0044 #include <linux/ioport.h>
0045 #include <linux/pci.h>
0046 #include <linux/kernel.h>
0047 #include <linux/netdevice.h>
0048 #include <linux/etherdevice.h>
0049 #include <linux/skbuff.h>
0050 #include <linux/delay.h>
0051 #include <linux/timer.h>
0052 #include <linux/slab.h>
0053 #include <linux/interrupt.h>
0054 #include <linux/string.h>
0055 #include <linux/wait.h>
0056 #include <linux/io.h>
0057 #include <linux/if.h>
0058 #include <linux/uaccess.h>
0059 #include <linux/proc_fs.h>
0060 #include <linux/of_address.h>
0061 #include <linux/of_device.h>
0062 #include <linux/of_irq.h>
0063 #include <linux/inetdevice.h>
0064 #include <linux/platform_device.h>
0065 #include <linux/reboot.h>
0066 #include <linux/ethtool.h>
0067 #include <linux/mii.h>
0068 #include <linux/in.h>
0069 #include <linux/if_arp.h>
0070 #include <linux/if_vlan.h>
0071 #include <linux/ip.h>
0072 #include <linux/tcp.h>
0073 #include <linux/udp.h>
0074 #include <linux/crc-ccitt.h>
0075 #include <linux/crc32.h>
0076
0077 #include "via-velocity.h"
0078
0079 enum velocity_bus_type {
0080 BUS_PCI,
0081 BUS_PLATFORM,
0082 };
0083
0084 static int velocity_nics;
0085
0086 static void velocity_set_power_state(struct velocity_info *vptr, char state)
0087 {
0088 void *addr = vptr->mac_regs;
0089
0090 if (vptr->pdev)
0091 pci_set_power_state(vptr->pdev, state);
0092 else
0093 writeb(state, addr + 0x154);
0094 }
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104 static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
0105 {
0106 int i;
0107
0108
0109 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
0110
0111 writeb(0, ®s->CAMADDR);
0112
0113
0114 for (i = 0; i < 8; i++)
0115 *mask++ = readb(&(regs->MARCAM[i]));
0116
0117
0118 writeb(0, ®s->CAMADDR);
0119
0120
0121 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
0122 }
0123
0124
0125
0126
0127
0128
0129
0130
0131 static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
0132 {
0133 int i;
0134
0135 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
0136
0137 writeb(CAMADDR_CAMEN, ®s->CAMADDR);
0138
0139 for (i = 0; i < 8; i++)
0140 writeb(*mask++, &(regs->MARCAM[i]));
0141
0142
0143 writeb(0, ®s->CAMADDR);
0144
0145
0146 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
0147 }
0148
0149 static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
0150 {
0151 int i;
0152
0153 BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
0154
0155 writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, ®s->CAMADDR);
0156
0157 for (i = 0; i < 8; i++)
0158 writeb(*mask++, &(regs->MARCAM[i]));
0159
0160
0161 writeb(0, ®s->CAMADDR);
0162
0163
0164 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
0165 }
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175 static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
0176 {
0177 int i;
0178
0179
0180 BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
0181
0182 idx &= (64 - 1);
0183
0184 writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR);
0185
0186 for (i = 0; i < 6; i++)
0187 writeb(*addr++, &(regs->MARCAM[i]));
0188
0189 BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR);
0190
0191 udelay(10);
0192
0193 writeb(0, ®s->CAMADDR);
0194
0195
0196 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
0197 }
0198
0199 static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
0200 const u8 *addr)
0201 {
0202
0203
0204 BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
0205
0206 idx &= (64 - 1);
0207
0208 writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR);
0209 writew(*((u16 *) addr), ®s->MARCAM[0]);
0210
0211 BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR);
0212
0213 udelay(10);
0214
0215 writeb(0, ®s->CAMADDR);
0216
0217
0218 BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR);
0219 }
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230 static void mac_wol_reset(struct mac_regs __iomem *regs)
0231 {
0232
0233
0234 BYTE_REG_BITS_OFF(STICKHW_SWPTAG, ®s->STICKHW);
0235
0236 BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW);
0237
0238 BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, ®s->CHIPGCR);
0239 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR);
0240
0241 writeb(WOLCFG_PMEOVR, ®s->WOLCFGClr);
0242
0243 writew(0xFFFF, ®s->WOLCRClr);
0244
0245 writew(0xFFFF, ®s->WOLSRClr);
0246 }
0247
0248 static const struct ethtool_ops velocity_ethtool_ops;
0249
0250
0251
0252
0253
0254 MODULE_AUTHOR("VIA Networking Technologies, Inc.");
0255 MODULE_LICENSE("GPL");
0256 MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
0257
0258 #define VELOCITY_PARAM(N, D) \
0259 static int N[MAX_UNITS] = OPTION_DEFAULT;\
0260 module_param_array(N, int, NULL, 0); \
0261 MODULE_PARM_DESC(N, D);
0262
0263 #define RX_DESC_MIN 64
0264 #define RX_DESC_MAX 255
0265 #define RX_DESC_DEF 64
0266 VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
0267
0268 #define TX_DESC_MIN 16
0269 #define TX_DESC_MAX 256
0270 #define TX_DESC_DEF 64
0271 VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
0272
0273 #define RX_THRESH_MIN 0
0274 #define RX_THRESH_MAX 3
0275 #define RX_THRESH_DEF 0
0276
0277
0278
0279
0280
0281
0282 VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
0283
0284 #define DMA_LENGTH_MIN 0
0285 #define DMA_LENGTH_MAX 7
0286 #define DMA_LENGTH_DEF 6
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298 VELOCITY_PARAM(DMA_length, "DMA length");
0299
0300 #define IP_ALIG_DEF 0
0301
0302
0303
0304
0305
0306
0307 VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
0308
0309 #define FLOW_CNTL_DEF 1
0310 #define FLOW_CNTL_MIN 1
0311 #define FLOW_CNTL_MAX 5
0312
0313
0314
0315
0316
0317
0318
0319
0320 VELOCITY_PARAM(flow_control, "Enable flow control ability");
0321
0322 #define MED_LNK_DEF 0
0323 #define MED_LNK_MIN 0
0324 #define MED_LNK_MAX 5
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337 VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
0338
0339 #define WOL_OPT_DEF 0
0340 #define WOL_OPT_MIN 0
0341 #define WOL_OPT_MAX 7
0342
0343
0344
0345
0346
0347
0348
0349 VELOCITY_PARAM(wol_opts, "Wake On Lan options");
0350
0351 static int rx_copybreak = 200;
0352 module_param(rx_copybreak, int, 0644);
0353 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
0354
0355
0356
0357
0358 static struct velocity_info_tbl chip_info_table[] = {
0359 {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
0360 { }
0361 };
0362
0363
0364
0365
0366
0367
0368 static const struct pci_device_id velocity_pci_id_table[] = {
0369 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
0370 { }
0371 };
0372
0373 MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
0374
0375
0376
0377
0378
0379 static const struct of_device_id velocity_of_ids[] = {
0380 { .compatible = "via,velocity-vt6110", .data = &chip_info_table[0] },
0381 { },
0382 };
0383 MODULE_DEVICE_TABLE(of, velocity_of_ids);
0384
0385
0386
0387
0388
0389
0390
0391
0392 static const char *get_chip_name(enum chip_type chip_id)
0393 {
0394 int i;
0395 for (i = 0; chip_info_table[i].name != NULL; i++)
0396 if (chip_info_table[i].chip_id == chip_id)
0397 break;
0398 return chip_info_table[i].name;
0399 }
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414 static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
0415 char *name)
0416 {
0417 if (val == -1)
0418 *opt = def;
0419 else if (val < min || val > max) {
0420 pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
0421 name, min, max);
0422 *opt = def;
0423 } else {
0424 pr_info("set value of parameter %s to %d\n", name, val);
0425 *opt = val;
0426 }
0427 }
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441 static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
0442 char *name)
0443 {
0444 (*opt) &= (~flag);
0445 if (val == -1)
0446 *opt |= (def ? flag : 0);
0447 else if (val < 0 || val > 1) {
0448 pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n",
0449 name, 0, 1);
0450 *opt |= (def ? flag : 0);
0451 } else {
0452 pr_info("set parameter %s to %s\n",
0453 name, val ? "TRUE" : "FALSE");
0454 *opt |= (val ? flag : 0);
0455 }
0456 }
0457
0458
0459
0460
0461
0462
0463
0464
0465
0466 static void velocity_get_options(struct velocity_opt *opts, int index)
0467 {
0468
0469 velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index],
0470 RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF,
0471 "rx_thresh");
0472 velocity_set_int_opt(&opts->DMA_length, DMA_length[index],
0473 DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF,
0474 "DMA_length");
0475 velocity_set_int_opt(&opts->numrx, RxDescriptors[index],
0476 RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF,
0477 "RxDescriptors");
0478 velocity_set_int_opt(&opts->numtx, TxDescriptors[index],
0479 TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF,
0480 "TxDescriptors");
0481
0482 velocity_set_int_opt(&opts->flow_cntl, flow_control[index],
0483 FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF,
0484 "flow_control");
0485 velocity_set_bool_opt(&opts->flags, IP_byte_align[index],
0486 IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN,
0487 "IP_byte_align");
0488 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index],
0489 MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF,
0490 "Media link mode");
0491 velocity_set_int_opt(&opts->wol_opts, wol_opts[index],
0492 WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF,
0493 "Wake On Lan options");
0494 opts->numrx = (opts->numrx & ~3);
0495 }
0496
0497
0498
0499
0500
0501
0502
0503
0504 static void velocity_init_cam_filter(struct velocity_info *vptr)
0505 {
0506 struct mac_regs __iomem *regs = vptr->mac_regs;
0507 unsigned int vid, i = 0;
0508
0509
0510 WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG);
0511 WORD_REG_BITS_ON(MCFG_VIDFR, ®s->MCFG);
0512
0513
0514 memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
0515 memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
0516 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
0517 mac_set_cam_mask(regs, vptr->mCAMmask);
0518
0519
0520 for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) {
0521 mac_set_vlan_cam(regs, i, (u8 *) &vid);
0522 vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
0523 if (++i >= VCAM_SIZE)
0524 break;
0525 }
0526 mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
0527 }
0528
0529 static int velocity_vlan_rx_add_vid(struct net_device *dev,
0530 __be16 proto, u16 vid)
0531 {
0532 struct velocity_info *vptr = netdev_priv(dev);
0533
0534 spin_lock_irq(&vptr->lock);
0535 set_bit(vid, vptr->active_vlans);
0536 velocity_init_cam_filter(vptr);
0537 spin_unlock_irq(&vptr->lock);
0538 return 0;
0539 }
0540
0541 static int velocity_vlan_rx_kill_vid(struct net_device *dev,
0542 __be16 proto, u16 vid)
0543 {
0544 struct velocity_info *vptr = netdev_priv(dev);
0545
0546 spin_lock_irq(&vptr->lock);
0547 clear_bit(vid, vptr->active_vlans);
0548 velocity_init_cam_filter(vptr);
0549 spin_unlock_irq(&vptr->lock);
0550 return 0;
0551 }
0552
0553 static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
0554 {
0555 vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
0556 }
0557
0558
0559
0560
0561
0562
0563
0564
0565 static void velocity_rx_reset(struct velocity_info *vptr)
0566 {
0567
0568 struct mac_regs __iomem *regs = vptr->mac_regs;
0569 int i;
0570
0571 velocity_init_rx_ring_indexes(vptr);
0572
0573
0574
0575
0576 for (i = 0; i < vptr->options.numrx; ++i)
0577 vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
0578
0579 writew(vptr->options.numrx, ®s->RBRDU);
0580 writel(vptr->rx.pool_dma, ®s->RDBaseLo);
0581 writew(0, ®s->RDIdx);
0582 writew(vptr->options.numrx - 1, ®s->RDCSize);
0583 }
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
0594 {
0595 u32 status = 0;
0596
0597 switch (vptr->options.spd_dpx) {
0598 case SPD_DPX_AUTO:
0599 status = VELOCITY_AUTONEG_ENABLE;
0600 break;
0601 case SPD_DPX_100_FULL:
0602 status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
0603 break;
0604 case SPD_DPX_10_FULL:
0605 status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
0606 break;
0607 case SPD_DPX_100_HALF:
0608 status = VELOCITY_SPEED_100;
0609 break;
0610 case SPD_DPX_10_HALF:
0611 status = VELOCITY_SPEED_10;
0612 break;
0613 case SPD_DPX_1000_FULL:
0614 status = VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
0615 break;
0616 }
0617 vptr->mii_status = status;
0618 return status;
0619 }
0620
0621
0622
0623
0624
0625
0626
0627 static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
0628 {
0629 u16 ww;
0630
0631
0632 writeb(0, ®s->MIICR);
0633 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
0634 udelay(1);
0635 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
0636 break;
0637 }
0638 }
0639
0640
0641
0642
0643
0644
0645
0646
0647 static void enable_mii_autopoll(struct mac_regs __iomem *regs)
0648 {
0649 int ii;
0650
0651 writeb(0, &(regs->MIICR));
0652 writeb(MIIADR_SWMPL, ®s->MIIADR);
0653
0654 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
0655 udelay(1);
0656 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
0657 break;
0658 }
0659
0660 writeb(MIICR_MAUTO, ®s->MIICR);
0661
0662 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
0663 udelay(1);
0664 if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR))
0665 break;
0666 }
0667
0668 }
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679 static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
0680 {
0681 u16 ww;
0682
0683
0684
0685
0686 safe_disable_mii_autopoll(regs);
0687
0688 writeb(index, ®s->MIIADR);
0689
0690 BYTE_REG_BITS_ON(MIICR_RCMD, ®s->MIICR);
0691
0692 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
0693 if (!(readb(®s->MIICR) & MIICR_RCMD))
0694 break;
0695 }
0696
0697 *data = readw(®s->MIIDATA);
0698
0699 enable_mii_autopoll(regs);
0700 if (ww == W_MAX_TIMEOUT)
0701 return -ETIMEDOUT;
0702 return 0;
0703 }
0704
0705
0706
0707
0708
0709
0710
0711
0712 static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
0713 {
0714 u32 status = 0;
0715 u16 ANAR;
0716
0717 if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
0718 status |= VELOCITY_LINK_FAIL;
0719
0720 if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
0721 status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
0722 else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
0723 status |= (VELOCITY_SPEED_1000);
0724 else {
0725 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
0726 if (ANAR & ADVERTISE_100FULL)
0727 status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
0728 else if (ANAR & ADVERTISE_100HALF)
0729 status |= VELOCITY_SPEED_100;
0730 else if (ANAR & ADVERTISE_10FULL)
0731 status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
0732 else
0733 status |= (VELOCITY_SPEED_10);
0734 }
0735
0736 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
0737 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
0738 if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
0739 == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
0740 if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
0741 status |= VELOCITY_AUTONEG_ENABLE;
0742 }
0743 }
0744
0745 return status;
0746 }
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757 static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
0758 {
0759 u16 ww;
0760
0761
0762
0763
0764 safe_disable_mii_autopoll(regs);
0765
0766
0767 writeb(mii_addr, ®s->MIIADR);
0768
0769 writew(data, ®s->MIIDATA);
0770
0771
0772 BYTE_REG_BITS_ON(MIICR_WCMD, ®s->MIICR);
0773
0774
0775 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
0776 udelay(5);
0777 if (!(readb(®s->MIICR) & MIICR_WCMD))
0778 break;
0779 }
0780 enable_mii_autopoll(regs);
0781
0782 if (ww == W_MAX_TIMEOUT)
0783 return -ETIMEDOUT;
0784 return 0;
0785 }
0786
0787
0788
0789
0790
0791
0792
0793
0794 static void set_mii_flow_control(struct velocity_info *vptr)
0795 {
0796
0797 switch (vptr->options.flow_cntl) {
0798 case FLOW_CNTL_TX:
0799 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
0800 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
0801 break;
0802
0803 case FLOW_CNTL_RX:
0804 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
0805 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
0806 break;
0807
0808 case FLOW_CNTL_TX_RX:
0809 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
0810 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
0811 break;
0812
0813 case FLOW_CNTL_DISABLE:
0814 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
0815 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
0816 break;
0817 default:
0818 break;
0819 }
0820 }
0821
0822
0823
0824
0825
0826
0827
0828 static void mii_set_auto_on(struct velocity_info *vptr)
0829 {
0830 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
0831 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
0832 else
0833 MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
0834 }
0835
0836 static u32 check_connection_type(struct mac_regs __iomem *regs)
0837 {
0838 u32 status = 0;
0839 u8 PHYSR0;
0840 u16 ANAR;
0841 PHYSR0 = readb(®s->PHYSR0);
0842
0843
0844
0845
0846
0847
0848 if (PHYSR0 & PHYSR0_FDPX)
0849 status |= VELOCITY_DUPLEX_FULL;
0850
0851 if (PHYSR0 & PHYSR0_SPDG)
0852 status |= VELOCITY_SPEED_1000;
0853 else if (PHYSR0 & PHYSR0_SPD10)
0854 status |= VELOCITY_SPEED_10;
0855 else
0856 status |= VELOCITY_SPEED_100;
0857
0858 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
0859 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
0860 if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
0861 == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
0862 if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
0863 status |= VELOCITY_AUTONEG_ENABLE;
0864 }
0865 }
0866
0867 return status;
0868 }
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879 static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
0880 {
0881 struct mac_regs __iomem *regs = vptr->mac_regs;
0882
0883 vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
0884
0885
0886 set_mii_flow_control(vptr);
0887
0888 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
0889 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
0890
0891
0892
0893
0894 if (mii_status & VELOCITY_AUTONEG_ENABLE) {
0895 netdev_info(vptr->netdev, "Velocity is in AUTO mode\n");
0896
0897 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR);
0898
0899 MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
0900 MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
0901 MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
0902
0903
0904 mii_set_auto_on(vptr);
0905 } else {
0906 u16 CTRL1000;
0907 u16 ANAR;
0908 u8 CHIPGCR;
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR);
0919
0920 CHIPGCR = readb(®s->CHIPGCR);
0921
0922 if (mii_status & VELOCITY_SPEED_1000)
0923 CHIPGCR |= CHIPGCR_FCGMII;
0924 else
0925 CHIPGCR &= ~CHIPGCR_FCGMII;
0926
0927 if (mii_status & VELOCITY_DUPLEX_FULL) {
0928 CHIPGCR |= CHIPGCR_FCFDX;
0929 writeb(CHIPGCR, ®s->CHIPGCR);
0930 netdev_info(vptr->netdev,
0931 "set Velocity to forced full mode\n");
0932 if (vptr->rev_id < REV_ID_VT3216_A0)
0933 BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR);
0934 } else {
0935 CHIPGCR &= ~CHIPGCR_FCFDX;
0936 netdev_info(vptr->netdev,
0937 "set Velocity to forced half mode\n");
0938 writeb(CHIPGCR, ®s->CHIPGCR);
0939 if (vptr->rev_id < REV_ID_VT3216_A0)
0940 BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR);
0941 }
0942
0943 velocity_mii_read(vptr->mac_regs, MII_CTRL1000, &CTRL1000);
0944 CTRL1000 &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
0945 if ((mii_status & VELOCITY_SPEED_1000) &&
0946 (mii_status & VELOCITY_DUPLEX_FULL)) {
0947 CTRL1000 |= ADVERTISE_1000FULL;
0948 }
0949 velocity_mii_write(vptr->mac_regs, MII_CTRL1000, CTRL1000);
0950
0951 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
0952 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG);
0953 else
0954 BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG);
0955
0956
0957 velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
0958 ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
0959 if (mii_status & VELOCITY_SPEED_100) {
0960 if (mii_status & VELOCITY_DUPLEX_FULL)
0961 ANAR |= ADVERTISE_100FULL;
0962 else
0963 ANAR |= ADVERTISE_100HALF;
0964 } else if (mii_status & VELOCITY_SPEED_10) {
0965 if (mii_status & VELOCITY_DUPLEX_FULL)
0966 ANAR |= ADVERTISE_10FULL;
0967 else
0968 ANAR |= ADVERTISE_10HALF;
0969 }
0970 velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
0971
0972 mii_set_auto_on(vptr);
0973
0974 }
0975
0976
0977 return VELOCITY_LINK_CHANGE;
0978 }
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988 static void velocity_print_link_status(struct velocity_info *vptr)
0989 {
0990 const char *link;
0991 const char *speed;
0992 const char *duplex;
0993
0994 if (vptr->mii_status & VELOCITY_LINK_FAIL) {
0995 netdev_notice(vptr->netdev, "failed to detect cable link\n");
0996 return;
0997 }
0998
0999 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1000 link = "auto-negotiation";
1001
1002 if (vptr->mii_status & VELOCITY_SPEED_1000)
1003 speed = "1000";
1004 else if (vptr->mii_status & VELOCITY_SPEED_100)
1005 speed = "100";
1006 else
1007 speed = "10";
1008
1009 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1010 duplex = "full";
1011 else
1012 duplex = "half";
1013 } else {
1014 link = "forced";
1015
1016 switch (vptr->options.spd_dpx) {
1017 case SPD_DPX_1000_FULL:
1018 speed = "1000";
1019 duplex = "full";
1020 break;
1021 case SPD_DPX_100_HALF:
1022 speed = "100";
1023 duplex = "half";
1024 break;
1025 case SPD_DPX_100_FULL:
1026 speed = "100";
1027 duplex = "full";
1028 break;
1029 case SPD_DPX_10_HALF:
1030 speed = "10";
1031 duplex = "half";
1032 break;
1033 case SPD_DPX_10_FULL:
1034 speed = "10";
1035 duplex = "full";
1036 break;
1037 default:
1038 speed = "unknown";
1039 duplex = "unknown";
1040 break;
1041 }
1042 }
1043 netdev_notice(vptr->netdev, "Link %s speed %sM bps %s duplex\n",
1044 link, speed, duplex);
1045 }
1046
1047
1048
1049
1050
1051
1052
1053
1054 static void enable_flow_control_ability(struct velocity_info *vptr)
1055 {
1056
1057 struct mac_regs __iomem *regs = vptr->mac_regs;
1058
1059 switch (vptr->options.flow_cntl) {
1060
1061 case FLOW_CNTL_DEFAULT:
1062 if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, ®s->PHYSR0))
1063 writel(CR0_FDXRFCEN, ®s->CR0Set);
1064 else
1065 writel(CR0_FDXRFCEN, ®s->CR0Clr);
1066
1067 if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, ®s->PHYSR0))
1068 writel(CR0_FDXTFCEN, ®s->CR0Set);
1069 else
1070 writel(CR0_FDXTFCEN, ®s->CR0Clr);
1071 break;
1072
1073 case FLOW_CNTL_TX:
1074 writel(CR0_FDXTFCEN, ®s->CR0Set);
1075 writel(CR0_FDXRFCEN, ®s->CR0Clr);
1076 break;
1077
1078 case FLOW_CNTL_RX:
1079 writel(CR0_FDXRFCEN, ®s->CR0Set);
1080 writel(CR0_FDXTFCEN, ®s->CR0Clr);
1081 break;
1082
1083 case FLOW_CNTL_TX_RX:
1084 writel(CR0_FDXTFCEN, ®s->CR0Set);
1085 writel(CR0_FDXRFCEN, ®s->CR0Set);
1086 break;
1087
1088 case FLOW_CNTL_DISABLE:
1089 writel(CR0_FDXRFCEN, ®s->CR0Clr);
1090 writel(CR0_FDXTFCEN, ®s->CR0Clr);
1091 break;
1092
1093 default:
1094 break;
1095 }
1096
1097 }
1098
1099
1100
1101
1102
1103
1104
1105
1106 static int velocity_soft_reset(struct velocity_info *vptr)
1107 {
1108 struct mac_regs __iomem *regs = vptr->mac_regs;
1109 int i = 0;
1110
1111 writel(CR0_SFRST, ®s->CR0Set);
1112
1113 for (i = 0; i < W_MAX_TIMEOUT; i++) {
1114 udelay(5);
1115 if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, ®s->CR0Set))
1116 break;
1117 }
1118
1119 if (i == W_MAX_TIMEOUT) {
1120 writel(CR0_FORSRST, ®s->CR0Set);
1121
1122
1123 mdelay(2);
1124 }
1125 return 0;
1126 }
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136 static void velocity_set_multi(struct net_device *dev)
1137 {
1138 struct velocity_info *vptr = netdev_priv(dev);
1139 struct mac_regs __iomem *regs = vptr->mac_regs;
1140 u8 rx_mode;
1141 int i;
1142 struct netdev_hw_addr *ha;
1143
1144 if (dev->flags & IFF_PROMISC) {
1145 writel(0xffffffff, ®s->MARCAM[0]);
1146 writel(0xffffffff, ®s->MARCAM[4]);
1147 rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1148 } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1149 (dev->flags & IFF_ALLMULTI)) {
1150 writel(0xffffffff, ®s->MARCAM[0]);
1151 writel(0xffffffff, ®s->MARCAM[4]);
1152 rx_mode = (RCR_AM | RCR_AB);
1153 } else {
1154 int offset = MCAM_SIZE - vptr->multicast_limit;
1155 mac_get_cam_mask(regs, vptr->mCAMmask);
1156
1157 i = 0;
1158 netdev_for_each_mc_addr(ha, dev) {
1159 mac_set_cam(regs, i + offset, ha->addr);
1160 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1161 i++;
1162 }
1163
1164 mac_set_cam_mask(regs, vptr->mCAMmask);
1165 rx_mode = RCR_AM | RCR_AB | RCR_AP;
1166 }
1167 if (dev->mtu > 1500)
1168 rx_mode |= RCR_AL;
1169
1170 BYTE_REG_BITS_ON(rx_mode, ®s->RCR);
1171
1172 }
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185 static void mii_init(struct velocity_info *vptr, u32 mii_status)
1186 {
1187 u16 BMCR;
1188
1189 switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1190 case PHYID_ICPLUS_IP101A:
1191 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP),
1192 MII_ADVERTISE, vptr->mac_regs);
1193 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1194 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION,
1195 vptr->mac_regs);
1196 else
1197 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION,
1198 vptr->mac_regs);
1199 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1200 break;
1201 case PHYID_CICADA_CS8201:
1202
1203
1204
1205 MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1206
1207
1208
1209
1210
1211 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1212 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1213 else
1214 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1215
1216
1217
1218 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1219 break;
1220 case PHYID_VT3216_32BIT:
1221 case PHYID_VT3216_64BIT:
1222
1223
1224
1225 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1226
1227
1228
1229
1230
1231 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1232 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1233 else
1234 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1235 break;
1236
1237 case PHYID_MARVELL_1000:
1238 case PHYID_MARVELL_1000S:
1239
1240
1241
1242 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1243
1244
1245
1246 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1247 break;
1248 default:
1249 ;
1250 }
1251 velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1252 if (BMCR & BMCR_ISOLATE) {
1253 BMCR &= ~BMCR_ISOLATE;
1254 velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1255 }
1256 }
1257
1258
1259
1260
1261
1262
1263
1264
1265 static void setup_queue_timers(struct velocity_info *vptr)
1266 {
1267
1268 if (vptr->rev_id >= REV_ID_VT3216_A0) {
1269 u8 txqueue_timer = 0;
1270 u8 rxqueue_timer = 0;
1271
1272 if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1273 VELOCITY_SPEED_100)) {
1274 txqueue_timer = vptr->options.txqueue_timer;
1275 rxqueue_timer = vptr->options.rxqueue_timer;
1276 }
1277
1278 writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1279 writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1280 }
1281 }
1282
1283
1284
1285
1286
1287
1288
1289
1290 static void setup_adaptive_interrupts(struct velocity_info *vptr)
1291 {
1292 struct mac_regs __iomem *regs = vptr->mac_regs;
1293 u16 tx_intsup = vptr->options.tx_intsup;
1294 u16 rx_intsup = vptr->options.rx_intsup;
1295
1296
1297 vptr->int_mask = INT_MASK_DEF;
1298
1299
1300 writeb(CAMCR_PS0, ®s->CAMCR);
1301 if (tx_intsup != 0) {
1302 vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1303 ISR_PTX2I | ISR_PTX3I);
1304 writew(tx_intsup, ®s->ISRCTL);
1305 } else
1306 writew(ISRCTL_TSUPDIS, ®s->ISRCTL);
1307
1308
1309 writeb(CAMCR_PS1, ®s->CAMCR);
1310 if (rx_intsup != 0) {
1311 vptr->int_mask &= ~ISR_PRXI;
1312 writew(rx_intsup, ®s->ISRCTL);
1313 } else
1314 writew(ISRCTL_RSUPDIS, ®s->ISRCTL);
1315
1316
1317 writeb(0, ®s->CAMCR);
1318 }
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328 static void velocity_init_registers(struct velocity_info *vptr,
1329 enum velocity_init_type type)
1330 {
1331 struct mac_regs __iomem *regs = vptr->mac_regs;
1332 struct net_device *netdev = vptr->netdev;
1333 int i, mii_status;
1334
1335 mac_wol_reset(regs);
1336
1337 switch (type) {
1338 case VELOCITY_INIT_RESET:
1339 case VELOCITY_INIT_WOL:
1340
1341 netif_stop_queue(netdev);
1342
1343
1344
1345
1346 velocity_rx_reset(vptr);
1347 mac_rx_queue_run(regs);
1348 mac_rx_queue_wake(regs);
1349
1350 mii_status = velocity_get_opt_media_mode(vptr);
1351 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1352 velocity_print_link_status(vptr);
1353 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1354 netif_wake_queue(netdev);
1355 }
1356
1357 enable_flow_control_ability(vptr);
1358
1359 mac_clear_isr(regs);
1360 writel(CR0_STOP, ®s->CR0Clr);
1361 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1362 ®s->CR0Set);
1363
1364 break;
1365
1366 case VELOCITY_INIT_COLD:
1367 default:
1368
1369
1370
1371 velocity_soft_reset(vptr);
1372 mdelay(5);
1373
1374 if (!vptr->no_eeprom) {
1375 mac_eeprom_reload(regs);
1376 for (i = 0; i < 6; i++)
1377 writeb(netdev->dev_addr[i], regs->PAR + i);
1378 }
1379
1380
1381
1382
1383 BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1384 mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1385 mac_set_dma_length(regs, vptr->options.DMA_length);
1386
1387 writeb(WOLCFG_SAM | WOLCFG_SAB, ®s->WOLCFGSet);
1388
1389
1390
1391 BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), ®s->CFGB);
1392
1393
1394
1395
1396 velocity_init_cam_filter(vptr);
1397
1398
1399
1400
1401 velocity_set_multi(netdev);
1402
1403
1404
1405
1406 enable_mii_autopoll(regs);
1407
1408 setup_adaptive_interrupts(vptr);
1409
1410 writel(vptr->rx.pool_dma, ®s->RDBaseLo);
1411 writew(vptr->options.numrx - 1, ®s->RDCSize);
1412 mac_rx_queue_run(regs);
1413 mac_rx_queue_wake(regs);
1414
1415 writew(vptr->options.numtx - 1, ®s->TDCSize);
1416
1417 for (i = 0; i < vptr->tx.numq; i++) {
1418 writel(vptr->tx.pool_dma[i], ®s->TDBaseLo[i]);
1419 mac_tx_queue_run(regs, i);
1420 }
1421
1422 init_flow_control_register(vptr);
1423
1424 writel(CR0_STOP, ®s->CR0Clr);
1425 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set);
1426
1427 mii_status = velocity_get_opt_media_mode(vptr);
1428 netif_stop_queue(netdev);
1429
1430 mii_init(vptr, mii_status);
1431
1432 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1433 velocity_print_link_status(vptr);
1434 if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1435 netif_wake_queue(netdev);
1436 }
1437
1438 enable_flow_control_ability(vptr);
1439 mac_hw_mibs_init(regs);
1440 mac_write_int_mask(vptr->int_mask, regs);
1441 mac_clear_isr(regs);
1442
1443 }
1444 }
1445
1446 static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1447 {
1448 struct mac_regs __iomem *regs = vptr->mac_regs;
1449 int avail, dirty, unusable;
1450
1451
1452
1453
1454
1455 if (vptr->rx.filled < 4)
1456 return;
1457
1458 wmb();
1459
1460 unusable = vptr->rx.filled & 0x0003;
1461 dirty = vptr->rx.dirty - unusable;
1462 for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1463 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1464 vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1465 }
1466
1467 writew(vptr->rx.filled & 0xfffc, ®s->RBRDU);
1468 vptr->rx.filled = unusable;
1469 }
1470
1471
1472
1473
1474
1475
1476
1477
1478 static int velocity_init_dma_rings(struct velocity_info *vptr)
1479 {
1480 struct velocity_opt *opt = &vptr->options;
1481 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1482 const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1483 dma_addr_t pool_dma;
1484 void *pool;
1485 unsigned int i;
1486
1487
1488
1489
1490
1491
1492
1493 pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
1494 rx_ring_size, &pool_dma, GFP_ATOMIC);
1495 if (!pool) {
1496 dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
1497 vptr->netdev->name);
1498 return -ENOMEM;
1499 }
1500
1501 vptr->rx.ring = pool;
1502 vptr->rx.pool_dma = pool_dma;
1503
1504 pool += rx_ring_size;
1505 pool_dma += rx_ring_size;
1506
1507 for (i = 0; i < vptr->tx.numq; i++) {
1508 vptr->tx.rings[i] = pool;
1509 vptr->tx.pool_dma[i] = pool_dma;
1510 pool += tx_ring_size;
1511 pool_dma += tx_ring_size;
1512 }
1513
1514 return 0;
1515 }
1516
1517 static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1518 {
1519 vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1520 }
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532 static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1533 {
1534 struct rx_desc *rd = &(vptr->rx.ring[idx]);
1535 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1536
1537 rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64);
1538 if (rd_info->skb == NULL)
1539 return -ENOMEM;
1540
1541
1542
1543
1544
1545 skb_reserve(rd_info->skb,
1546 64 - ((unsigned long) rd_info->skb->data & 63));
1547 rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
1548 vptr->rx.buf_sz, DMA_FROM_DEVICE);
1549
1550
1551
1552
1553
1554 *((u32 *) & (rd->rdesc0)) = 0;
1555 rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1556 rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1557 rd->pa_high = 0;
1558 return 0;
1559 }
1560
1561
1562 static int velocity_rx_refill(struct velocity_info *vptr)
1563 {
1564 int dirty = vptr->rx.dirty, done = 0;
1565
1566 do {
1567 struct rx_desc *rd = vptr->rx.ring + dirty;
1568
1569
1570 if (rd->rdesc0.len & OWNED_BY_NIC)
1571 break;
1572
1573 if (!vptr->rx.info[dirty].skb) {
1574 if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1575 break;
1576 }
1577 done++;
1578 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1579 } while (dirty != vptr->rx.curr);
1580
1581 if (done) {
1582 vptr->rx.dirty = dirty;
1583 vptr->rx.filled += done;
1584 }
1585
1586 return done;
1587 }
1588
1589
1590
1591
1592
1593
1594
1595
1596 static void velocity_free_rd_ring(struct velocity_info *vptr)
1597 {
1598 int i;
1599
1600 if (vptr->rx.info == NULL)
1601 return;
1602
1603 for (i = 0; i < vptr->options.numrx; i++) {
1604 struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1605 struct rx_desc *rd = vptr->rx.ring + i;
1606
1607 memset(rd, 0, sizeof(*rd));
1608
1609 if (!rd_info->skb)
1610 continue;
1611 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
1612 DMA_FROM_DEVICE);
1613 rd_info->skb_dma = 0;
1614
1615 dev_kfree_skb(rd_info->skb);
1616 rd_info->skb = NULL;
1617 }
1618
1619 kfree(vptr->rx.info);
1620 vptr->rx.info = NULL;
1621 }
1622
1623
1624
1625
1626
1627
1628
1629
1630 static int velocity_init_rd_ring(struct velocity_info *vptr)
1631 {
1632 int ret = -ENOMEM;
1633
1634 vptr->rx.info = kcalloc(vptr->options.numrx,
1635 sizeof(struct velocity_rd_info), GFP_KERNEL);
1636 if (!vptr->rx.info)
1637 goto out;
1638
1639 velocity_init_rx_ring_indexes(vptr);
1640
1641 if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1642 netdev_err(vptr->netdev, "failed to allocate RX buffer\n");
1643 velocity_free_rd_ring(vptr);
1644 goto out;
1645 }
1646
1647 ret = 0;
1648 out:
1649 return ret;
1650 }
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660 static int velocity_init_td_ring(struct velocity_info *vptr)
1661 {
1662 int j;
1663
1664
1665 for (j = 0; j < vptr->tx.numq; j++) {
1666
1667 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1668 sizeof(struct velocity_td_info),
1669 GFP_KERNEL);
1670 if (!vptr->tx.infos[j]) {
1671 while (--j >= 0)
1672 kfree(vptr->tx.infos[j]);
1673 return -ENOMEM;
1674 }
1675
1676 vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1677 }
1678 return 0;
1679 }
1680
1681
1682
1683
1684
1685
1686
1687 static void velocity_free_dma_rings(struct velocity_info *vptr)
1688 {
1689 const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1690 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1691
1692 dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
1693 }
1694
1695 static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1696 {
1697 int ret;
1698
1699 velocity_set_rxbufsize(vptr, mtu);
1700
1701 ret = velocity_init_dma_rings(vptr);
1702 if (ret < 0)
1703 goto out;
1704
1705 ret = velocity_init_rd_ring(vptr);
1706 if (ret < 0)
1707 goto err_free_dma_rings_0;
1708
1709 ret = velocity_init_td_ring(vptr);
1710 if (ret < 0)
1711 goto err_free_rd_ring_1;
1712 out:
1713 return ret;
1714
1715 err_free_rd_ring_1:
1716 velocity_free_rd_ring(vptr);
1717 err_free_dma_rings_0:
1718 velocity_free_dma_rings(vptr);
1719 goto out;
1720 }
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731 static void velocity_free_tx_buf(struct velocity_info *vptr,
1732 struct velocity_td_info *tdinfo, struct tx_desc *td)
1733 {
1734 struct sk_buff *skb = tdinfo->skb;
1735 int i;
1736
1737
1738
1739
1740 for (i = 0; i < tdinfo->nskb_dma; i++) {
1741 size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1742
1743
1744 if (skb_shinfo(skb)->nr_frags > 0)
1745 pktlen = max_t(size_t, pktlen,
1746 td->td_buf[i].size & ~TD_QUEUE);
1747
1748 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1749 le16_to_cpu(pktlen), DMA_TO_DEVICE);
1750 }
1751 dev_consume_skb_irq(skb);
1752 tdinfo->skb = NULL;
1753 }
1754
1755
1756
1757
1758 static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1759 int q, int n)
1760 {
1761 struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1762 int i;
1763
1764 if (td_info == NULL)
1765 return;
1766
1767 if (td_info->skb) {
1768 for (i = 0; i < td_info->nskb_dma; i++) {
1769 if (td_info->skb_dma[i]) {
1770 dma_unmap_single(vptr->dev, td_info->skb_dma[i],
1771 td_info->skb->len, DMA_TO_DEVICE);
1772 td_info->skb_dma[i] = 0;
1773 }
1774 }
1775 dev_kfree_skb(td_info->skb);
1776 td_info->skb = NULL;
1777 }
1778 }
1779
1780
1781
1782
1783
1784
1785
1786
1787 static void velocity_free_td_ring(struct velocity_info *vptr)
1788 {
1789 int i, j;
1790
1791 for (j = 0; j < vptr->tx.numq; j++) {
1792 if (vptr->tx.infos[j] == NULL)
1793 continue;
1794 for (i = 0; i < vptr->options.numtx; i++)
1795 velocity_free_td_ring_entry(vptr, j, i);
1796
1797 kfree(vptr->tx.infos[j]);
1798 vptr->tx.infos[j] = NULL;
1799 }
1800 }
1801
1802 static void velocity_free_rings(struct velocity_info *vptr)
1803 {
1804 velocity_free_td_ring(vptr);
1805 velocity_free_rd_ring(vptr);
1806 velocity_free_dma_rings(vptr);
1807 }
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820 static void velocity_error(struct velocity_info *vptr, int status)
1821 {
1822
1823 if (status & ISR_TXSTLI) {
1824 struct mac_regs __iomem *regs = vptr->mac_regs;
1825
1826 netdev_err(vptr->netdev, "TD structure error TDindex=%hx\n",
1827 readw(®s->TDIdx[0]));
1828 BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR);
1829 writew(TRDCSR_RUN, ®s->TDCSRClr);
1830 netif_stop_queue(vptr->netdev);
1831
1832
1833
1834 }
1835
1836 if (status & ISR_SRCI) {
1837 struct mac_regs __iomem *regs = vptr->mac_regs;
1838 int linked;
1839
1840 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1841 vptr->mii_status = check_connection_type(regs);
1842
1843
1844
1845
1846
1847
1848 if (vptr->rev_id < REV_ID_VT3216_A0) {
1849 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1850 BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR);
1851 else
1852 BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR);
1853 }
1854
1855
1856
1857 if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1858 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG);
1859 else
1860 BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG);
1861
1862 setup_queue_timers(vptr);
1863 }
1864
1865
1866
1867 linked = readb(®s->PHYSR0) & PHYSR0_LINKGD;
1868
1869 if (linked) {
1870 vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1871 netif_carrier_on(vptr->netdev);
1872 } else {
1873 vptr->mii_status |= VELOCITY_LINK_FAIL;
1874 netif_carrier_off(vptr->netdev);
1875 }
1876
1877 velocity_print_link_status(vptr);
1878 enable_flow_control_ability(vptr);
1879
1880
1881
1882
1883
1884
1885 enable_mii_autopoll(regs);
1886
1887 if (vptr->mii_status & VELOCITY_LINK_FAIL)
1888 netif_stop_queue(vptr->netdev);
1889 else
1890 netif_wake_queue(vptr->netdev);
1891
1892 }
1893 if (status & ISR_MIBFI)
1894 velocity_update_hw_mibs(vptr);
1895 if (status & ISR_LSTEI)
1896 mac_rx_queue_wake(vptr->mac_regs);
1897 }
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907 static int velocity_tx_srv(struct velocity_info *vptr)
1908 {
1909 struct tx_desc *td;
1910 int qnum;
1911 int full = 0;
1912 int idx;
1913 int works = 0;
1914 struct velocity_td_info *tdinfo;
1915 struct net_device_stats *stats = &vptr->netdev->stats;
1916
1917 for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1918 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1919 idx = (idx + 1) % vptr->options.numtx) {
1920
1921
1922
1923
1924 td = &(vptr->tx.rings[qnum][idx]);
1925 tdinfo = &(vptr->tx.infos[qnum][idx]);
1926
1927 if (td->tdesc0.len & OWNED_BY_NIC)
1928 break;
1929
1930 if ((works++ > 15))
1931 break;
1932
1933 if (td->tdesc0.TSR & TSR0_TERR) {
1934 stats->tx_errors++;
1935 stats->tx_dropped++;
1936 if (td->tdesc0.TSR & TSR0_CDH)
1937 stats->tx_heartbeat_errors++;
1938 if (td->tdesc0.TSR & TSR0_CRS)
1939 stats->tx_carrier_errors++;
1940 if (td->tdesc0.TSR & TSR0_ABT)
1941 stats->tx_aborted_errors++;
1942 if (td->tdesc0.TSR & TSR0_OWC)
1943 stats->tx_window_errors++;
1944 } else {
1945 stats->tx_packets++;
1946 stats->tx_bytes += tdinfo->skb->len;
1947 }
1948 velocity_free_tx_buf(vptr, tdinfo, td);
1949 vptr->tx.used[qnum]--;
1950 }
1951 vptr->tx.tail[qnum] = idx;
1952
1953 if (AVAIL_TD(vptr, qnum) < 1)
1954 full = 1;
1955 }
1956
1957
1958
1959
1960 if (netif_queue_stopped(vptr->netdev) && (full == 0) &&
1961 (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1962 netif_wake_queue(vptr->netdev);
1963 }
1964 return works;
1965 }
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975 static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1976 {
1977 skb_checksum_none_assert(skb);
1978
1979 if (rd->rdesc1.CSM & CSM_IPKT) {
1980 if (rd->rdesc1.CSM & CSM_IPOK) {
1981 if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1982 (rd->rdesc1.CSM & CSM_UDPKT)) {
1983 if (!(rd->rdesc1.CSM & CSM_TUPOK))
1984 return;
1985 }
1986 skb->ip_summed = CHECKSUM_UNNECESSARY;
1987 }
1988 }
1989 }
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002 static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
2003 struct velocity_info *vptr)
2004 {
2005 int ret = -1;
2006 if (pkt_size < rx_copybreak) {
2007 struct sk_buff *new_skb;
2008
2009 new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size);
2010 if (new_skb) {
2011 new_skb->ip_summed = rx_skb[0]->ip_summed;
2012 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
2013 *rx_skb = new_skb;
2014 ret = 0;
2015 }
2016
2017 }
2018 return ret;
2019 }
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030 static inline void velocity_iph_realign(struct velocity_info *vptr,
2031 struct sk_buff *skb, int pkt_size)
2032 {
2033 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2034 memmove(skb->data + 2, skb->data, pkt_size);
2035 skb_reserve(skb, 2);
2036 }
2037 }
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047 static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2048 {
2049 struct net_device_stats *stats = &vptr->netdev->stats;
2050 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2051 struct rx_desc *rd = &(vptr->rx.ring[idx]);
2052 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2053 struct sk_buff *skb;
2054
2055 if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
2056 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
2057 netdev_err(vptr->netdev, "received frame spans multiple RDs\n");
2058 stats->rx_length_errors++;
2059 return -EINVAL;
2060 }
2061
2062 if (rd->rdesc0.RSR & RSR_MAR)
2063 stats->multicast++;
2064
2065 skb = rd_info->skb;
2066
2067 dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
2068 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2069
2070 velocity_rx_csum(rd, skb);
2071
2072 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2073 velocity_iph_realign(vptr, skb, pkt_len);
2074 rd_info->skb = NULL;
2075 dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
2076 DMA_FROM_DEVICE);
2077 } else {
2078 dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
2079 vptr->rx.buf_sz, DMA_FROM_DEVICE);
2080 }
2081
2082 skb_put(skb, pkt_len - 4);
2083 skb->protocol = eth_type_trans(skb, vptr->netdev);
2084
2085 if (rd->rdesc0.RSR & RSR_DETAG) {
2086 u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG));
2087
2088 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
2089 }
2090 netif_receive_skb(skb);
2091
2092 stats->rx_bytes += pkt_len;
2093 stats->rx_packets++;
2094
2095 return 0;
2096 }
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107 static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2108 {
2109 struct net_device_stats *stats = &vptr->netdev->stats;
2110 int rd_curr = vptr->rx.curr;
2111 int works = 0;
2112
2113 while (works < budget_left) {
2114 struct rx_desc *rd = vptr->rx.ring + rd_curr;
2115
2116 if (!vptr->rx.info[rd_curr].skb)
2117 break;
2118
2119 if (rd->rdesc0.len & OWNED_BY_NIC)
2120 break;
2121
2122 rmb();
2123
2124
2125
2126
2127 if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2128 if (velocity_receive_frame(vptr, rd_curr) < 0)
2129 stats->rx_dropped++;
2130 } else {
2131 if (rd->rdesc0.RSR & RSR_CRC)
2132 stats->rx_crc_errors++;
2133 if (rd->rdesc0.RSR & RSR_FAE)
2134 stats->rx_frame_errors++;
2135
2136 stats->rx_dropped++;
2137 }
2138
2139 rd->size |= RX_INTEN;
2140
2141 rd_curr++;
2142 if (rd_curr >= vptr->options.numrx)
2143 rd_curr = 0;
2144 works++;
2145 }
2146
2147 vptr->rx.curr = rd_curr;
2148
2149 if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2150 velocity_give_many_rx_descs(vptr);
2151
2152 VAR_USED(stats);
2153 return works;
2154 }
2155
2156 static int velocity_poll(struct napi_struct *napi, int budget)
2157 {
2158 struct velocity_info *vptr = container_of(napi,
2159 struct velocity_info, napi);
2160 unsigned int rx_done;
2161 unsigned long flags;
2162
2163
2164
2165
2166
2167 rx_done = velocity_rx_srv(vptr, budget);
2168 spin_lock_irqsave(&vptr->lock, flags);
2169 velocity_tx_srv(vptr);
2170
2171 if (rx_done < budget) {
2172 napi_complete_done(napi, rx_done);
2173 mac_enable_int(vptr->mac_regs);
2174 }
2175 spin_unlock_irqrestore(&vptr->lock, flags);
2176
2177 return rx_done;
2178 }
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190 static irqreturn_t velocity_intr(int irq, void *dev_instance)
2191 {
2192 struct net_device *dev = dev_instance;
2193 struct velocity_info *vptr = netdev_priv(dev);
2194 u32 isr_status;
2195
2196 spin_lock(&vptr->lock);
2197 isr_status = mac_read_isr(vptr->mac_regs);
2198
2199
2200 if (isr_status == 0) {
2201 spin_unlock(&vptr->lock);
2202 return IRQ_NONE;
2203 }
2204
2205
2206 mac_write_isr(vptr->mac_regs, isr_status);
2207
2208 if (likely(napi_schedule_prep(&vptr->napi))) {
2209 mac_disable_int(vptr->mac_regs);
2210 __napi_schedule(&vptr->napi);
2211 }
2212
2213 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2214 velocity_error(vptr, isr_status);
2215
2216 spin_unlock(&vptr->lock);
2217
2218 return IRQ_HANDLED;
2219 }
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231 static int velocity_open(struct net_device *dev)
2232 {
2233 struct velocity_info *vptr = netdev_priv(dev);
2234 int ret;
2235
2236 ret = velocity_init_rings(vptr, dev->mtu);
2237 if (ret < 0)
2238 goto out;
2239
2240
2241 velocity_set_power_state(vptr, PCI_D0);
2242
2243 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2244
2245 ret = request_irq(dev->irq, velocity_intr, IRQF_SHARED,
2246 dev->name, dev);
2247 if (ret < 0) {
2248
2249 velocity_set_power_state(vptr, PCI_D3hot);
2250 velocity_free_rings(vptr);
2251 goto out;
2252 }
2253
2254 velocity_give_many_rx_descs(vptr);
2255
2256 mac_enable_int(vptr->mac_regs);
2257 netif_start_queue(dev);
2258 napi_enable(&vptr->napi);
2259 vptr->flags |= VELOCITY_FLAGS_OPENED;
2260 out:
2261 return ret;
2262 }
2263
2264
2265
2266
2267
2268
2269
2270
2271 static void velocity_shutdown(struct velocity_info *vptr)
2272 {
2273 struct mac_regs __iomem *regs = vptr->mac_regs;
2274 mac_disable_int(regs);
2275 writel(CR0_STOP, ®s->CR0Set);
2276 writew(0xFFFF, ®s->TDCSRClr);
2277 writeb(0xFF, ®s->RDCSRClr);
2278 safe_disable_mii_autopoll(regs);
2279 mac_clear_isr(regs);
2280 }
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291 static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2292 {
2293 struct velocity_info *vptr = netdev_priv(dev);
2294 int ret = 0;
2295
2296 if (!netif_running(dev)) {
2297 dev->mtu = new_mtu;
2298 goto out_0;
2299 }
2300
2301 if (dev->mtu != new_mtu) {
2302 struct velocity_info *tmp_vptr;
2303 unsigned long flags;
2304 struct rx_info rx;
2305 struct tx_info tx;
2306
2307 tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2308 if (!tmp_vptr) {
2309 ret = -ENOMEM;
2310 goto out_0;
2311 }
2312
2313 tmp_vptr->netdev = dev;
2314 tmp_vptr->pdev = vptr->pdev;
2315 tmp_vptr->dev = vptr->dev;
2316 tmp_vptr->options = vptr->options;
2317 tmp_vptr->tx.numq = vptr->tx.numq;
2318
2319 ret = velocity_init_rings(tmp_vptr, new_mtu);
2320 if (ret < 0)
2321 goto out_free_tmp_vptr_1;
2322
2323 napi_disable(&vptr->napi);
2324
2325 spin_lock_irqsave(&vptr->lock, flags);
2326
2327 netif_stop_queue(dev);
2328 velocity_shutdown(vptr);
2329
2330 rx = vptr->rx;
2331 tx = vptr->tx;
2332
2333 vptr->rx = tmp_vptr->rx;
2334 vptr->tx = tmp_vptr->tx;
2335
2336 tmp_vptr->rx = rx;
2337 tmp_vptr->tx = tx;
2338
2339 dev->mtu = new_mtu;
2340
2341 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2342
2343 velocity_give_many_rx_descs(vptr);
2344
2345 napi_enable(&vptr->napi);
2346
2347 mac_enable_int(vptr->mac_regs);
2348 netif_start_queue(dev);
2349
2350 spin_unlock_irqrestore(&vptr->lock, flags);
2351
2352 velocity_free_rings(tmp_vptr);
2353
2354 out_free_tmp_vptr_1:
2355 kfree(tmp_vptr);
2356 }
2357 out_0:
2358 return ret;
2359 }
2360
2361 #ifdef CONFIG_NET_POLL_CONTROLLER
2362
2363
2364
2365
2366
2367
2368
2369
2370 static void velocity_poll_controller(struct net_device *dev)
2371 {
2372 disable_irq(dev->irq);
2373 velocity_intr(dev->irq, dev);
2374 enable_irq(dev->irq);
2375 }
2376 #endif
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388 static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2389 {
2390 struct velocity_info *vptr = netdev_priv(dev);
2391 struct mac_regs __iomem *regs = vptr->mac_regs;
2392 unsigned long flags;
2393 struct mii_ioctl_data *miidata = if_mii(ifr);
2394 int err;
2395
2396 switch (cmd) {
2397 case SIOCGMIIPHY:
2398 miidata->phy_id = readb(®s->MIIADR) & 0x1f;
2399 break;
2400 case SIOCGMIIREG:
2401 if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2402 return -ETIMEDOUT;
2403 break;
2404 case SIOCSMIIREG:
2405 spin_lock_irqsave(&vptr->lock, flags);
2406 err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2407 spin_unlock_irqrestore(&vptr->lock, flags);
2408 check_connection_type(vptr->mac_regs);
2409 if (err)
2410 return err;
2411 break;
2412 default:
2413 return -EOPNOTSUPP;
2414 }
2415 return 0;
2416 }
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427 static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2428 {
2429 struct velocity_info *vptr = netdev_priv(dev);
2430 int ret;
2431
2432
2433
2434
2435 if (!netif_running(dev))
2436 velocity_set_power_state(vptr, PCI_D0);
2437
2438 switch (cmd) {
2439 case SIOCGMIIPHY:
2440 case SIOCGMIIREG:
2441 case SIOCSMIIREG:
2442 ret = velocity_mii_ioctl(dev, rq, cmd);
2443 break;
2444
2445 default:
2446 ret = -EOPNOTSUPP;
2447 }
2448 if (!netif_running(dev))
2449 velocity_set_power_state(vptr, PCI_D3hot);
2450
2451
2452 return ret;
2453 }
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465 static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2466 {
2467 struct velocity_info *vptr = netdev_priv(dev);
2468
2469
2470 if (!netif_running(dev))
2471 return &dev->stats;
2472
2473 spin_lock_irq(&vptr->lock);
2474 velocity_update_hw_mibs(vptr);
2475 spin_unlock_irq(&vptr->lock);
2476
2477 dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2478 dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2479 dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2480
2481
2482 dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2483
2484
2485
2486 dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2487
2488
2489
2490
2491
2492
2493
2494 return &dev->stats;
2495 }
2496
2497
2498
2499
2500
2501
2502
2503
2504 static int velocity_close(struct net_device *dev)
2505 {
2506 struct velocity_info *vptr = netdev_priv(dev);
2507
2508 napi_disable(&vptr->napi);
2509 netif_stop_queue(dev);
2510 velocity_shutdown(vptr);
2511
2512 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2513 velocity_get_ip(vptr);
2514
2515 free_irq(dev->irq, dev);
2516
2517 velocity_free_rings(vptr);
2518
2519 vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2520 return 0;
2521 }
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531 static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2532 struct net_device *dev)
2533 {
2534 struct velocity_info *vptr = netdev_priv(dev);
2535 int qnum = 0;
2536 struct tx_desc *td_ptr;
2537 struct velocity_td_info *tdinfo;
2538 unsigned long flags;
2539 int pktlen;
2540 int index, prev;
2541 int i = 0;
2542
2543 if (skb_padto(skb, ETH_ZLEN))
2544 goto out;
2545
2546
2547
2548 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2549 dev_kfree_skb_any(skb);
2550 return NETDEV_TX_OK;
2551 }
2552
2553 pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2554 max_t(unsigned int, skb->len, ETH_ZLEN) :
2555 skb_headlen(skb);
2556
2557 spin_lock_irqsave(&vptr->lock, flags);
2558
2559 index = vptr->tx.curr[qnum];
2560 td_ptr = &(vptr->tx.rings[qnum][index]);
2561 tdinfo = &(vptr->tx.infos[qnum][index]);
2562
2563 td_ptr->tdesc1.TCR = TCR0_TIC;
2564 td_ptr->td_buf[0].size &= ~TD_QUEUE;
2565
2566
2567
2568
2569
2570 tdinfo->skb = skb;
2571 tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
2572 DMA_TO_DEVICE);
2573 td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2574 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2575 td_ptr->td_buf[0].pa_high = 0;
2576 td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2577
2578
2579 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2580 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2581
2582 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
2583 frag, 0,
2584 skb_frag_size(frag),
2585 DMA_TO_DEVICE);
2586
2587 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2588 td_ptr->td_buf[i + 1].pa_high = 0;
2589 td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag));
2590 }
2591 tdinfo->nskb_dma = i + 1;
2592
2593 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2594
2595 if (skb_vlan_tag_present(skb)) {
2596 td_ptr->tdesc1.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
2597 td_ptr->tdesc1.TCR |= TCR0_VETAG;
2598 }
2599
2600
2601
2602
2603 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2604 const struct iphdr *ip = ip_hdr(skb);
2605 if (ip->protocol == IPPROTO_TCP)
2606 td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2607 else if (ip->protocol == IPPROTO_UDP)
2608 td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2609 td_ptr->tdesc1.TCR |= TCR0_IPCK;
2610 }
2611
2612 prev = index - 1;
2613 if (prev < 0)
2614 prev = vptr->options.numtx - 1;
2615 td_ptr->tdesc0.len |= OWNED_BY_NIC;
2616 vptr->tx.used[qnum]++;
2617 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2618
2619 if (AVAIL_TD(vptr, qnum) < 1)
2620 netif_stop_queue(dev);
2621
2622 td_ptr = &(vptr->tx.rings[qnum][prev]);
2623 td_ptr->td_buf[0].size |= TD_QUEUE;
2624 mac_tx_queue_wake(vptr->mac_regs, qnum);
2625
2626 spin_unlock_irqrestore(&vptr->lock, flags);
2627 out:
2628 return NETDEV_TX_OK;
2629 }
2630
2631 static const struct net_device_ops velocity_netdev_ops = {
2632 .ndo_open = velocity_open,
2633 .ndo_stop = velocity_close,
2634 .ndo_start_xmit = velocity_xmit,
2635 .ndo_get_stats = velocity_get_stats,
2636 .ndo_validate_addr = eth_validate_addr,
2637 .ndo_set_mac_address = eth_mac_addr,
2638 .ndo_set_rx_mode = velocity_set_multi,
2639 .ndo_change_mtu = velocity_change_mtu,
2640 .ndo_eth_ioctl = velocity_ioctl,
2641 .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
2642 .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
2643 #ifdef CONFIG_NET_POLL_CONTROLLER
2644 .ndo_poll_controller = velocity_poll_controller,
2645 #endif
2646 };
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656 static void velocity_init_info(struct velocity_info *vptr,
2657 const struct velocity_info_tbl *info)
2658 {
2659 vptr->chip_id = info->chip_id;
2660 vptr->tx.numq = info->txqueue;
2661 vptr->multicast_limit = MCAM_SIZE;
2662 spin_lock_init(&vptr->lock);
2663 }
2664
2665
2666
2667
2668
2669
2670
2671
2672 static int velocity_get_pci_info(struct velocity_info *vptr)
2673 {
2674 struct pci_dev *pdev = vptr->pdev;
2675
2676 pci_set_master(pdev);
2677
2678 vptr->ioaddr = pci_resource_start(pdev, 0);
2679 vptr->memaddr = pci_resource_start(pdev, 1);
2680
2681 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2682 dev_err(&pdev->dev,
2683 "region #0 is not an I/O resource, aborting.\n");
2684 return -EINVAL;
2685 }
2686
2687 if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2688 dev_err(&pdev->dev,
2689 "region #1 is an I/O resource, aborting.\n");
2690 return -EINVAL;
2691 }
2692
2693 if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2694 dev_err(&pdev->dev, "region #1 is too small.\n");
2695 return -EINVAL;
2696 }
2697
2698 return 0;
2699 }
2700
2701
2702
2703
2704
2705
2706
2707 static int velocity_get_platform_info(struct velocity_info *vptr)
2708 {
2709 struct resource res;
2710 int ret;
2711
2712 if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL))
2713 vptr->no_eeprom = 1;
2714
2715 ret = of_address_to_resource(vptr->dev->of_node, 0, &res);
2716 if (ret) {
2717 dev_err(vptr->dev, "unable to find memory address\n");
2718 return ret;
2719 }
2720
2721 vptr->memaddr = res.start;
2722
2723 if (resource_size(&res) < VELOCITY_IO_SIZE) {
2724 dev_err(vptr->dev, "memory region is too small.\n");
2725 return -EINVAL;
2726 }
2727
2728 return 0;
2729 }
2730
2731
2732
2733
2734
2735
2736
2737
2738 static void velocity_print_info(struct velocity_info *vptr)
2739 {
2740 netdev_info(vptr->netdev, "%s - Ethernet Address: %pM\n",
2741 get_chip_name(vptr->chip_id), vptr->netdev->dev_addr);
2742 }
2743
2744 static u32 velocity_get_link(struct net_device *dev)
2745 {
2746 struct velocity_info *vptr = netdev_priv(dev);
2747 struct mac_regs __iomem *regs = vptr->mac_regs;
2748 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 1 : 0;
2749 }
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761 static int velocity_probe(struct device *dev, int irq,
2762 const struct velocity_info_tbl *info,
2763 enum velocity_bus_type bustype)
2764 {
2765 struct net_device *netdev;
2766 int i;
2767 struct velocity_info *vptr;
2768 struct mac_regs __iomem *regs;
2769 int ret = -ENOMEM;
2770 u8 addr[ETH_ALEN];
2771
2772
2773
2774
2775 if (velocity_nics >= MAX_UNITS) {
2776 dev_notice(dev, "already found %d NICs.\n", velocity_nics);
2777 return -ENODEV;
2778 }
2779
2780 netdev = alloc_etherdev(sizeof(struct velocity_info));
2781 if (!netdev)
2782 goto out;
2783
2784
2785
2786 SET_NETDEV_DEV(netdev, dev);
2787 vptr = netdev_priv(netdev);
2788
2789 pr_info_once("%s Ver. %s\n", VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2790 pr_info_once("Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2791 pr_info_once("Copyright (c) 2004 Red Hat Inc.\n");
2792
2793 netdev->irq = irq;
2794 vptr->netdev = netdev;
2795 vptr->dev = dev;
2796
2797 velocity_init_info(vptr, info);
2798
2799 if (bustype == BUS_PCI) {
2800 vptr->pdev = to_pci_dev(dev);
2801
2802 ret = velocity_get_pci_info(vptr);
2803 if (ret < 0)
2804 goto err_free_dev;
2805 } else {
2806 vptr->pdev = NULL;
2807 ret = velocity_get_platform_info(vptr);
2808 if (ret < 0)
2809 goto err_free_dev;
2810 }
2811
2812 regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2813 if (regs == NULL) {
2814 ret = -EIO;
2815 goto err_free_dev;
2816 }
2817
2818 vptr->mac_regs = regs;
2819 vptr->rev_id = readb(®s->rev_id);
2820
2821 mac_wol_reset(regs);
2822
2823 for (i = 0; i < 6; i++)
2824 addr[i] = readb(®s->PAR[i]);
2825 eth_hw_addr_set(netdev, addr);
2826
2827
2828 velocity_get_options(&vptr->options, velocity_nics);
2829
2830
2831
2832
2833
2834 vptr->options.flags &= info->flags;
2835
2836
2837
2838
2839
2840 vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2841
2842 vptr->wol_opts = vptr->options.wol_opts;
2843 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2844
2845 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2846
2847 netdev->netdev_ops = &velocity_netdev_ops;
2848 netdev->ethtool_ops = &velocity_ethtool_ops;
2849 netif_napi_add(netdev, &vptr->napi, velocity_poll, NAPI_POLL_WEIGHT);
2850
2851 netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
2852 NETIF_F_HW_VLAN_CTAG_TX;
2853 netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
2854 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX |
2855 NETIF_F_IP_CSUM;
2856
2857
2858 netdev->min_mtu = VELOCITY_MIN_MTU;
2859 netdev->max_mtu = VELOCITY_MAX_MTU;
2860
2861 ret = register_netdev(netdev);
2862 if (ret < 0)
2863 goto err_iounmap;
2864
2865 if (!velocity_get_link(netdev)) {
2866 netif_carrier_off(netdev);
2867 vptr->mii_status |= VELOCITY_LINK_FAIL;
2868 }
2869
2870 velocity_print_info(vptr);
2871 dev_set_drvdata(vptr->dev, netdev);
2872
2873
2874
2875 velocity_set_power_state(vptr, PCI_D3hot);
2876 velocity_nics++;
2877 out:
2878 return ret;
2879
2880 err_iounmap:
2881 netif_napi_del(&vptr->napi);
2882 iounmap(regs);
2883 err_free_dev:
2884 free_netdev(netdev);
2885 goto out;
2886 }
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896 static int velocity_remove(struct device *dev)
2897 {
2898 struct net_device *netdev = dev_get_drvdata(dev);
2899 struct velocity_info *vptr = netdev_priv(netdev);
2900
2901 unregister_netdev(netdev);
2902 netif_napi_del(&vptr->napi);
2903 iounmap(vptr->mac_regs);
2904 free_netdev(netdev);
2905 velocity_nics--;
2906
2907 return 0;
2908 }
2909
2910 static int velocity_pci_probe(struct pci_dev *pdev,
2911 const struct pci_device_id *ent)
2912 {
2913 const struct velocity_info_tbl *info =
2914 &chip_info_table[ent->driver_data];
2915 int ret;
2916
2917 ret = pci_enable_device(pdev);
2918 if (ret < 0)
2919 return ret;
2920
2921 ret = pci_request_regions(pdev, VELOCITY_NAME);
2922 if (ret < 0) {
2923 dev_err(&pdev->dev, "No PCI resources.\n");
2924 goto fail1;
2925 }
2926
2927 ret = velocity_probe(&pdev->dev, pdev->irq, info, BUS_PCI);
2928 if (ret == 0)
2929 return 0;
2930
2931 pci_release_regions(pdev);
2932 fail1:
2933 pci_disable_device(pdev);
2934 return ret;
2935 }
2936
2937 static void velocity_pci_remove(struct pci_dev *pdev)
2938 {
2939 velocity_remove(&pdev->dev);
2940
2941 pci_release_regions(pdev);
2942 pci_disable_device(pdev);
2943 }
2944
2945 static int velocity_platform_probe(struct platform_device *pdev)
2946 {
2947 const struct velocity_info_tbl *info;
2948 int irq;
2949
2950 info = of_device_get_match_data(&pdev->dev);
2951 if (!info)
2952 return -EINVAL;
2953
2954 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
2955 if (!irq)
2956 return -EINVAL;
2957
2958 return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
2959 }
2960
2961 static int velocity_platform_remove(struct platform_device *pdev)
2962 {
2963 velocity_remove(&pdev->dev);
2964
2965 return 0;
2966 }
2967
2968 #ifdef CONFIG_PM_SLEEP
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978 static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2979 {
2980 u16 crc = 0xFFFF;
2981 u8 mask;
2982 int i, j;
2983
2984 for (i = 0; i < size; i++) {
2985 mask = mask_pattern[i];
2986
2987
2988 if (mask == 0x00)
2989 continue;
2990
2991 for (j = 0; j < 8; j++) {
2992 if ((mask & 0x01) == 0) {
2993 mask >>= 1;
2994 continue;
2995 }
2996 mask >>= 1;
2997 crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2998 }
2999 }
3000
3001 crc = ~crc;
3002 return bitrev32(crc) >> 16;
3003 }
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014 static int velocity_set_wol(struct velocity_info *vptr)
3015 {
3016 struct mac_regs __iomem *regs = vptr->mac_regs;
3017 enum speed_opt spd_dpx = vptr->options.spd_dpx;
3018 static u8 buf[256];
3019 int i;
3020
3021 static u32 mask_pattern[2][4] = {
3022 {0x00203000, 0x000003C0, 0x00000000, 0x0000000},
3023 {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}
3024 };
3025
3026 writew(0xFFFF, ®s->WOLCRClr);
3027 writeb(WOLCFG_SAB | WOLCFG_SAM, ®s->WOLCFGSet);
3028 writew(WOLCR_MAGIC_EN, ®s->WOLCRSet);
3029
3030
3031
3032
3033
3034
3035 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3036 writew(WOLCR_UNICAST_EN, ®s->WOLCRSet);
3037
3038 if (vptr->wol_opts & VELOCITY_WOL_ARP) {
3039 struct arp_packet *arp = (struct arp_packet *) buf;
3040 u16 crc;
3041 memset(buf, 0, sizeof(struct arp_packet) + 7);
3042
3043 for (i = 0; i < 4; i++)
3044 writel(mask_pattern[0][i], ®s->ByteMask[0][i]);
3045
3046 arp->type = htons(ETH_P_ARP);
3047 arp->ar_op = htons(1);
3048
3049 memcpy(arp->ar_tip, vptr->ip_addr, 4);
3050
3051 crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
3052 (u8 *) & mask_pattern[0][0]);
3053
3054 writew(crc, ®s->PatternCRC[0]);
3055 writew(WOLCR_ARP_EN, ®s->WOLCRSet);
3056 }
3057
3058 BYTE_REG_BITS_ON(PWCFG_WOLTYPE, ®s->PWCFGSet);
3059 BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, ®s->PWCFGSet);
3060
3061 writew(0x0FFF, ®s->WOLSRClr);
3062
3063 if (spd_dpx == SPD_DPX_1000_FULL)
3064 goto mac_done;
3065
3066 if (spd_dpx != SPD_DPX_AUTO)
3067 goto advertise_done;
3068
3069 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
3070 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
3071 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
3072
3073 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
3074 }
3075
3076 if (vptr->mii_status & VELOCITY_SPEED_1000)
3077 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
3078
3079 advertise_done:
3080 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR);
3081
3082 {
3083 u8 GCR;
3084 GCR = readb(®s->CHIPGCR);
3085 GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
3086 writeb(GCR, ®s->CHIPGCR);
3087 }
3088
3089 mac_done:
3090 BYTE_REG_BITS_OFF(ISR_PWEI, ®s->ISR);
3091
3092 BYTE_REG_BITS_ON(STICKHW_SWPTAG, ®s->STICKHW);
3093
3094 BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW);
3095
3096 return 0;
3097 }
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109 static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
3110 {
3111 struct mac_regs __iomem *regs = vptr->mac_regs;
3112 u16 i;
3113 u8 __iomem *ptr = (u8 __iomem *)regs;
3114
3115 for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
3116 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3117
3118 for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
3119 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3120
3121 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3122 *((u32 *) (context->mac_reg + i)) = readl(ptr + i);
3123
3124 }
3125
3126 static int velocity_suspend(struct device *dev)
3127 {
3128 struct net_device *netdev = dev_get_drvdata(dev);
3129 struct velocity_info *vptr = netdev_priv(netdev);
3130 unsigned long flags;
3131
3132 if (!netif_running(vptr->netdev))
3133 return 0;
3134
3135 netif_device_detach(vptr->netdev);
3136
3137 spin_lock_irqsave(&vptr->lock, flags);
3138 if (vptr->pdev)
3139 pci_save_state(vptr->pdev);
3140
3141 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3142 velocity_get_ip(vptr);
3143 velocity_save_context(vptr, &vptr->context);
3144 velocity_shutdown(vptr);
3145 velocity_set_wol(vptr);
3146 if (vptr->pdev)
3147 pci_enable_wake(vptr->pdev, PCI_D3hot, 1);
3148 velocity_set_power_state(vptr, PCI_D3hot);
3149 } else {
3150 velocity_save_context(vptr, &vptr->context);
3151 velocity_shutdown(vptr);
3152 if (vptr->pdev)
3153 pci_disable_device(vptr->pdev);
3154 velocity_set_power_state(vptr, PCI_D3hot);
3155 }
3156
3157 spin_unlock_irqrestore(&vptr->lock, flags);
3158 return 0;
3159 }
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169 static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3170 {
3171 struct mac_regs __iomem *regs = vptr->mac_regs;
3172 int i;
3173 u8 __iomem *ptr = (u8 __iomem *)regs;
3174
3175 for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3176 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3177
3178
3179 for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3180
3181 writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3182
3183 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3184 }
3185
3186 for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3187 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3188
3189 for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3190 writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3191
3192 for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3193 writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3194 }
3195
3196 static int velocity_resume(struct device *dev)
3197 {
3198 struct net_device *netdev = dev_get_drvdata(dev);
3199 struct velocity_info *vptr = netdev_priv(netdev);
3200 unsigned long flags;
3201 int i;
3202
3203 if (!netif_running(vptr->netdev))
3204 return 0;
3205
3206 velocity_set_power_state(vptr, PCI_D0);
3207
3208 if (vptr->pdev) {
3209 pci_enable_wake(vptr->pdev, PCI_D0, 0);
3210 pci_restore_state(vptr->pdev);
3211 }
3212
3213 mac_wol_reset(vptr->mac_regs);
3214
3215 spin_lock_irqsave(&vptr->lock, flags);
3216 velocity_restore_context(vptr, &vptr->context);
3217 velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3218 mac_disable_int(vptr->mac_regs);
3219
3220 velocity_tx_srv(vptr);
3221
3222 for (i = 0; i < vptr->tx.numq; i++) {
3223 if (vptr->tx.used[i])
3224 mac_tx_queue_wake(vptr->mac_regs, i);
3225 }
3226
3227 mac_enable_int(vptr->mac_regs);
3228 spin_unlock_irqrestore(&vptr->lock, flags);
3229 netif_device_attach(vptr->netdev);
3230
3231 return 0;
3232 }
3233 #endif
3234
3235 static SIMPLE_DEV_PM_OPS(velocity_pm_ops, velocity_suspend, velocity_resume);
3236
3237
3238
3239
3240
3241 static struct pci_driver velocity_pci_driver = {
3242 .name = VELOCITY_NAME,
3243 .id_table = velocity_pci_id_table,
3244 .probe = velocity_pci_probe,
3245 .remove = velocity_pci_remove,
3246 .driver = {
3247 .pm = &velocity_pm_ops,
3248 },
3249 };
3250
3251 static struct platform_driver velocity_platform_driver = {
3252 .probe = velocity_platform_probe,
3253 .remove = velocity_platform_remove,
3254 .driver = {
3255 .name = "via-velocity",
3256 .of_match_table = velocity_of_ids,
3257 .pm = &velocity_pm_ops,
3258 },
3259 };
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269 static int velocity_ethtool_up(struct net_device *dev)
3270 {
3271 struct velocity_info *vptr = netdev_priv(dev);
3272
3273 if (vptr->ethtool_ops_nesting == U32_MAX)
3274 return -EBUSY;
3275 if (!vptr->ethtool_ops_nesting++ && !netif_running(dev))
3276 velocity_set_power_state(vptr, PCI_D0);
3277 return 0;
3278 }
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288 static void velocity_ethtool_down(struct net_device *dev)
3289 {
3290 struct velocity_info *vptr = netdev_priv(dev);
3291
3292 if (!--vptr->ethtool_ops_nesting && !netif_running(dev))
3293 velocity_set_power_state(vptr, PCI_D3hot);
3294 }
3295
3296 static int velocity_get_link_ksettings(struct net_device *dev,
3297 struct ethtool_link_ksettings *cmd)
3298 {
3299 struct velocity_info *vptr = netdev_priv(dev);
3300 struct mac_regs __iomem *regs = vptr->mac_regs;
3301 u32 status;
3302 u32 supported, advertising;
3303
3304 status = check_connection_type(vptr->mac_regs);
3305
3306 supported = SUPPORTED_TP |
3307 SUPPORTED_Autoneg |
3308 SUPPORTED_10baseT_Half |
3309 SUPPORTED_10baseT_Full |
3310 SUPPORTED_100baseT_Half |
3311 SUPPORTED_100baseT_Full |
3312 SUPPORTED_1000baseT_Half |
3313 SUPPORTED_1000baseT_Full;
3314
3315 advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
3316 if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
3317 advertising |=
3318 ADVERTISED_10baseT_Half |
3319 ADVERTISED_10baseT_Full |
3320 ADVERTISED_100baseT_Half |
3321 ADVERTISED_100baseT_Full |
3322 ADVERTISED_1000baseT_Half |
3323 ADVERTISED_1000baseT_Full;
3324 } else {
3325 switch (vptr->options.spd_dpx) {
3326 case SPD_DPX_1000_FULL:
3327 advertising |= ADVERTISED_1000baseT_Full;
3328 break;
3329 case SPD_DPX_100_HALF:
3330 advertising |= ADVERTISED_100baseT_Half;
3331 break;
3332 case SPD_DPX_100_FULL:
3333 advertising |= ADVERTISED_100baseT_Full;
3334 break;
3335 case SPD_DPX_10_HALF:
3336 advertising |= ADVERTISED_10baseT_Half;
3337 break;
3338 case SPD_DPX_10_FULL:
3339 advertising |= ADVERTISED_10baseT_Full;
3340 break;
3341 default:
3342 break;
3343 }
3344 }
3345
3346 if (status & VELOCITY_SPEED_1000)
3347 cmd->base.speed = SPEED_1000;
3348 else if (status & VELOCITY_SPEED_100)
3349 cmd->base.speed = SPEED_100;
3350 else
3351 cmd->base.speed = SPEED_10;
3352
3353 cmd->base.autoneg = (status & VELOCITY_AUTONEG_ENABLE) ?
3354 AUTONEG_ENABLE : AUTONEG_DISABLE;
3355 cmd->base.port = PORT_TP;
3356 cmd->base.phy_address = readb(®s->MIIADR) & 0x1F;
3357
3358 if (status & VELOCITY_DUPLEX_FULL)
3359 cmd->base.duplex = DUPLEX_FULL;
3360 else
3361 cmd->base.duplex = DUPLEX_HALF;
3362
3363 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3364 supported);
3365 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3366 advertising);
3367
3368 return 0;
3369 }
3370
3371 static int velocity_set_link_ksettings(struct net_device *dev,
3372 const struct ethtool_link_ksettings *cmd)
3373 {
3374 struct velocity_info *vptr = netdev_priv(dev);
3375 u32 speed = cmd->base.speed;
3376 u32 curr_status;
3377 u32 new_status = 0;
3378 int ret = 0;
3379
3380 curr_status = check_connection_type(vptr->mac_regs);
3381 curr_status &= (~VELOCITY_LINK_FAIL);
3382
3383 new_status |= ((cmd->base.autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3384 new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
3385 new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3386 new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3387 new_status |= ((cmd->base.duplex == DUPLEX_FULL) ?
3388 VELOCITY_DUPLEX_FULL : 0);
3389
3390 if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
3391 (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) {
3392 ret = -EINVAL;
3393 } else {
3394 enum speed_opt spd_dpx;
3395
3396 if (new_status & VELOCITY_AUTONEG_ENABLE)
3397 spd_dpx = SPD_DPX_AUTO;
3398 else if ((new_status & VELOCITY_SPEED_1000) &&
3399 (new_status & VELOCITY_DUPLEX_FULL)) {
3400 spd_dpx = SPD_DPX_1000_FULL;
3401 } else if (new_status & VELOCITY_SPEED_100)
3402 spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3403 SPD_DPX_100_FULL : SPD_DPX_100_HALF;
3404 else if (new_status & VELOCITY_SPEED_10)
3405 spd_dpx = (new_status & VELOCITY_DUPLEX_FULL) ?
3406 SPD_DPX_10_FULL : SPD_DPX_10_HALF;
3407 else
3408 return -EOPNOTSUPP;
3409
3410 vptr->options.spd_dpx = spd_dpx;
3411
3412 velocity_set_media_mode(vptr, new_status);
3413 }
3414
3415 return ret;
3416 }
3417
3418 static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3419 {
3420 struct velocity_info *vptr = netdev_priv(dev);
3421
3422 strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
3423 strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
3424 if (vptr->pdev)
3425 strlcpy(info->bus_info, pci_name(vptr->pdev),
3426 sizeof(info->bus_info));
3427 else
3428 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
3429 }
3430
3431 static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3432 {
3433 struct velocity_info *vptr = netdev_priv(dev);
3434 wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3435 wol->wolopts |= WAKE_MAGIC;
3436
3437
3438
3439
3440 if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3441 wol->wolopts |= WAKE_UCAST;
3442 if (vptr->wol_opts & VELOCITY_WOL_ARP)
3443 wol->wolopts |= WAKE_ARP;
3444 memcpy(&wol->sopass, vptr->wol_passwd, 6);
3445 }
3446
3447 static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3448 {
3449 struct velocity_info *vptr = netdev_priv(dev);
3450
3451 if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3452 return -EFAULT;
3453 vptr->wol_opts = VELOCITY_WOL_MAGIC;
3454
3455
3456
3457
3458
3459
3460
3461
3462 if (wol->wolopts & WAKE_MAGIC) {
3463 vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3464 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3465 }
3466 if (wol->wolopts & WAKE_UCAST) {
3467 vptr->wol_opts |= VELOCITY_WOL_UCAST;
3468 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3469 }
3470 if (wol->wolopts & WAKE_ARP) {
3471 vptr->wol_opts |= VELOCITY_WOL_ARP;
3472 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3473 }
3474 memcpy(vptr->wol_passwd, wol->sopass, 6);
3475 return 0;
3476 }
3477
3478 static int get_pending_timer_val(int val)
3479 {
3480 int mult_bits = val >> 6;
3481 int mult = 1;
3482
3483 switch (mult_bits)
3484 {
3485 case 1:
3486 mult = 4; break;
3487 case 2:
3488 mult = 16; break;
3489 case 3:
3490 mult = 64; break;
3491 case 0:
3492 default:
3493 break;
3494 }
3495
3496 return (val & 0x3f) * mult;
3497 }
3498
3499 static void set_pending_timer_val(int *val, u32 us)
3500 {
3501 u8 mult = 0;
3502 u8 shift = 0;
3503
3504 if (us >= 0x3f) {
3505 mult = 1;
3506 shift = 2;
3507 }
3508 if (us >= 0x3f * 4) {
3509 mult = 2;
3510 shift = 4;
3511 }
3512 if (us >= 0x3f * 16) {
3513 mult = 3;
3514 shift = 6;
3515 }
3516
3517 *val = (mult << 6) | ((us >> shift) & 0x3f);
3518 }
3519
3520
3521 static int velocity_get_coalesce(struct net_device *dev,
3522 struct ethtool_coalesce *ecmd,
3523 struct kernel_ethtool_coalesce *kernel_coal,
3524 struct netlink_ext_ack *extack)
3525 {
3526 struct velocity_info *vptr = netdev_priv(dev);
3527
3528 ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3529 ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3530
3531 ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3532 ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3533
3534 return 0;
3535 }
3536
3537 static int velocity_set_coalesce(struct net_device *dev,
3538 struct ethtool_coalesce *ecmd,
3539 struct kernel_ethtool_coalesce *kernel_coal,
3540 struct netlink_ext_ack *extack)
3541 {
3542 struct velocity_info *vptr = netdev_priv(dev);
3543 int max_us = 0x3f * 64;
3544 unsigned long flags;
3545
3546
3547 if (ecmd->tx_coalesce_usecs > max_us)
3548 return -EINVAL;
3549 if (ecmd->rx_coalesce_usecs > max_us)
3550 return -EINVAL;
3551
3552 if (ecmd->tx_max_coalesced_frames > 0xff)
3553 return -EINVAL;
3554 if (ecmd->rx_max_coalesced_frames > 0xff)
3555 return -EINVAL;
3556
3557 vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3558 vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3559
3560 set_pending_timer_val(&vptr->options.rxqueue_timer,
3561 ecmd->rx_coalesce_usecs);
3562 set_pending_timer_val(&vptr->options.txqueue_timer,
3563 ecmd->tx_coalesce_usecs);
3564
3565
3566 spin_lock_irqsave(&vptr->lock, flags);
3567 mac_disable_int(vptr->mac_regs);
3568 setup_adaptive_interrupts(vptr);
3569 setup_queue_timers(vptr);
3570
3571 mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3572 mac_clear_isr(vptr->mac_regs);
3573 mac_enable_int(vptr->mac_regs);
3574 spin_unlock_irqrestore(&vptr->lock, flags);
3575
3576 return 0;
3577 }
3578
3579 static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
3580 "rx_all",
3581 "rx_ok",
3582 "tx_ok",
3583 "rx_error",
3584 "rx_runt_ok",
3585 "rx_runt_err",
3586 "rx_64",
3587 "tx_64",
3588 "rx_65_to_127",
3589 "tx_65_to_127",
3590 "rx_128_to_255",
3591 "tx_128_to_255",
3592 "rx_256_to_511",
3593 "tx_256_to_511",
3594 "rx_512_to_1023",
3595 "tx_512_to_1023",
3596 "rx_1024_to_1518",
3597 "tx_1024_to_1518",
3598 "tx_ether_collisions",
3599 "rx_crc_errors",
3600 "rx_jumbo",
3601 "tx_jumbo",
3602 "rx_mac_control_frames",
3603 "tx_mac_control_frames",
3604 "rx_frame_alignment_errors",
3605 "rx_long_ok",
3606 "rx_long_err",
3607 "tx_sqe_errors",
3608 "rx_no_buf",
3609 "rx_symbol_errors",
3610 "in_range_length_errors",
3611 "late_collisions"
3612 };
3613
3614 static void velocity_get_strings(struct net_device *dev, u32 sset, u8 *data)
3615 {
3616 switch (sset) {
3617 case ETH_SS_STATS:
3618 memcpy(data, *velocity_gstrings, sizeof(velocity_gstrings));
3619 break;
3620 }
3621 }
3622
3623 static int velocity_get_sset_count(struct net_device *dev, int sset)
3624 {
3625 switch (sset) {
3626 case ETH_SS_STATS:
3627 return ARRAY_SIZE(velocity_gstrings);
3628 default:
3629 return -EOPNOTSUPP;
3630 }
3631 }
3632
3633 static void velocity_get_ethtool_stats(struct net_device *dev,
3634 struct ethtool_stats *stats, u64 *data)
3635 {
3636 if (netif_running(dev)) {
3637 struct velocity_info *vptr = netdev_priv(dev);
3638 u32 *p = vptr->mib_counter;
3639 int i;
3640
3641 spin_lock_irq(&vptr->lock);
3642 velocity_update_hw_mibs(vptr);
3643 spin_unlock_irq(&vptr->lock);
3644
3645 for (i = 0; i < ARRAY_SIZE(velocity_gstrings); i++)
3646 *data++ = *p++;
3647 }
3648 }
3649
3650 static const struct ethtool_ops velocity_ethtool_ops = {
3651 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
3652 ETHTOOL_COALESCE_MAX_FRAMES,
3653 .get_drvinfo = velocity_get_drvinfo,
3654 .get_wol = velocity_ethtool_get_wol,
3655 .set_wol = velocity_ethtool_set_wol,
3656 .get_link = velocity_get_link,
3657 .get_strings = velocity_get_strings,
3658 .get_sset_count = velocity_get_sset_count,
3659 .get_ethtool_stats = velocity_get_ethtool_stats,
3660 .get_coalesce = velocity_get_coalesce,
3661 .set_coalesce = velocity_set_coalesce,
3662 .begin = velocity_ethtool_up,
3663 .complete = velocity_ethtool_down,
3664 .get_link_ksettings = velocity_get_link_ksettings,
3665 .set_link_ksettings = velocity_set_link_ksettings,
3666 };
3667
3668 #if defined(CONFIG_PM) && defined(CONFIG_INET)
3669 static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3670 {
3671 struct in_ifaddr *ifa = ptr;
3672 struct net_device *dev = ifa->ifa_dev->dev;
3673
3674 if (dev_net(dev) == &init_net &&
3675 dev->netdev_ops == &velocity_netdev_ops)
3676 velocity_get_ip(netdev_priv(dev));
3677
3678 return NOTIFY_DONE;
3679 }
3680
3681 static struct notifier_block velocity_inetaddr_notifier = {
3682 .notifier_call = velocity_netdev_event,
3683 };
3684
3685 static void velocity_register_notifier(void)
3686 {
3687 register_inetaddr_notifier(&velocity_inetaddr_notifier);
3688 }
3689
3690 static void velocity_unregister_notifier(void)
3691 {
3692 unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3693 }
3694
3695 #else
3696
3697 #define velocity_register_notifier() do {} while (0)
3698 #define velocity_unregister_notifier() do {} while (0)
3699
3700 #endif
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710 static int __init velocity_init_module(void)
3711 {
3712 int ret_pci, ret_platform;
3713
3714 velocity_register_notifier();
3715
3716 ret_pci = pci_register_driver(&velocity_pci_driver);
3717 ret_platform = platform_driver_register(&velocity_platform_driver);
3718
3719
3720 if ((ret_pci < 0) && (ret_platform < 0)) {
3721 velocity_unregister_notifier();
3722 return ret_pci;
3723 }
3724
3725 return 0;
3726 }
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736 static void __exit velocity_cleanup_module(void)
3737 {
3738 velocity_unregister_notifier();
3739
3740 pci_unregister_driver(&velocity_pci_driver);
3741 platform_driver_unregister(&velocity_platform_driver);
3742 }
3743
3744 module_init(velocity_init_module);
3745 module_exit(velocity_cleanup_module);