0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0126
0127 #include <linux/hardirq.h>
0128 #include <linux/interrupt.h>
0129 #include <linux/module.h>
0130 #include <linux/moduleparam.h>
0131 #include <linux/kernel.h>
0132 #include <linux/types.h>
0133 #include <linux/sched.h>
0134 #include <linux/slab.h>
0135 #include <linux/delay.h>
0136 #include <linux/init.h>
0137 #include <linux/pci.h>
0138 #include <linux/dma-mapping.h>
0139 #include <linux/dmapool.h>
0140 #include <linux/netdevice.h>
0141 #include <linux/etherdevice.h>
0142 #include <linux/mii.h>
0143 #include <linux/if_vlan.h>
0144 #include <linux/skbuff.h>
0145 #include <linux/ethtool.h>
0146 #include <linux/string.h>
0147 #include <linux/firmware.h>
0148 #include <linux/rtnetlink.h>
0149 #include <asm/unaligned.h>
0150
0151
0152 #define DRV_NAME "e100"
0153 #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
0154 #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
0155
0156 #define E100_WATCHDOG_PERIOD (2 * HZ)
0157 #define E100_NAPI_WEIGHT 16
0158
0159 #define FIRMWARE_D101M "e100/d101m_ucode.bin"
0160 #define FIRMWARE_D101S "e100/d101s_ucode.bin"
0161 #define FIRMWARE_D102E "e100/d102e_ucode.bin"
0162
0163 MODULE_DESCRIPTION(DRV_DESCRIPTION);
0164 MODULE_AUTHOR(DRV_COPYRIGHT);
0165 MODULE_LICENSE("GPL v2");
0166 MODULE_FIRMWARE(FIRMWARE_D101M);
0167 MODULE_FIRMWARE(FIRMWARE_D101S);
0168 MODULE_FIRMWARE(FIRMWARE_D102E);
0169
0170 static int debug = 3;
0171 static int eeprom_bad_csum_allow = 0;
0172 static int use_io = 0;
0173 module_param(debug, int, 0);
0174 module_param(eeprom_bad_csum_allow, int, 0);
0175 module_param(use_io, int, 0);
0176 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
0177 MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
0178 MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
0179
0180 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
0181 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
0182 PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
0183 static const struct pci_device_id e100_id_table[] = {
0184 INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
0185 INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
0186 INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
0187 INTEL_8255X_ETHERNET_DEVICE(0x1032, 3),
0188 INTEL_8255X_ETHERNET_DEVICE(0x1033, 3),
0189 INTEL_8255X_ETHERNET_DEVICE(0x1034, 3),
0190 INTEL_8255X_ETHERNET_DEVICE(0x1038, 3),
0191 INTEL_8255X_ETHERNET_DEVICE(0x1039, 4),
0192 INTEL_8255X_ETHERNET_DEVICE(0x103A, 4),
0193 INTEL_8255X_ETHERNET_DEVICE(0x103B, 4),
0194 INTEL_8255X_ETHERNET_DEVICE(0x103C, 4),
0195 INTEL_8255X_ETHERNET_DEVICE(0x103D, 4),
0196 INTEL_8255X_ETHERNET_DEVICE(0x103E, 4),
0197 INTEL_8255X_ETHERNET_DEVICE(0x1050, 5),
0198 INTEL_8255X_ETHERNET_DEVICE(0x1051, 5),
0199 INTEL_8255X_ETHERNET_DEVICE(0x1052, 5),
0200 INTEL_8255X_ETHERNET_DEVICE(0x1053, 5),
0201 INTEL_8255X_ETHERNET_DEVICE(0x1054, 5),
0202 INTEL_8255X_ETHERNET_DEVICE(0x1055, 5),
0203 INTEL_8255X_ETHERNET_DEVICE(0x1056, 5),
0204 INTEL_8255X_ETHERNET_DEVICE(0x1057, 5),
0205 INTEL_8255X_ETHERNET_DEVICE(0x1059, 0),
0206 INTEL_8255X_ETHERNET_DEVICE(0x1064, 6),
0207 INTEL_8255X_ETHERNET_DEVICE(0x1065, 6),
0208 INTEL_8255X_ETHERNET_DEVICE(0x1066, 6),
0209 INTEL_8255X_ETHERNET_DEVICE(0x1067, 6),
0210 INTEL_8255X_ETHERNET_DEVICE(0x1068, 6),
0211 INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
0212 INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
0213 INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
0214 INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
0215 INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
0216 INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
0217 INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
0218 INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
0219 INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7),
0220 INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
0221 INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
0222 INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
0223 INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
0224 INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
0225 INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
0226 { 0, }
0227 };
0228 MODULE_DEVICE_TABLE(pci, e100_id_table);
0229
0230 enum mac {
0231 mac_82557_D100_A = 0,
0232 mac_82557_D100_B = 1,
0233 mac_82557_D100_C = 2,
0234 mac_82558_D101_A4 = 4,
0235 mac_82558_D101_B0 = 5,
0236 mac_82559_D101M = 8,
0237 mac_82559_D101S = 9,
0238 mac_82550_D102 = 12,
0239 mac_82550_D102_C = 13,
0240 mac_82551_E = 14,
0241 mac_82551_F = 15,
0242 mac_82551_10 = 16,
0243 mac_unknown = 0xFF,
0244 };
0245
0246 enum phy {
0247 phy_100a = 0x000003E0,
0248 phy_100c = 0x035002A8,
0249 phy_82555_tx = 0x015002A8,
0250 phy_nsc_tx = 0x5C002000,
0251 phy_82562_et = 0x033002A8,
0252 phy_82562_em = 0x032002A8,
0253 phy_82562_ek = 0x031002A8,
0254 phy_82562_eh = 0x017002A8,
0255 phy_82552_v = 0xd061004d,
0256 phy_unknown = 0xFFFFFFFF,
0257 };
0258
0259
0260 struct csr {
0261 struct {
0262 u8 status;
0263 u8 stat_ack;
0264 u8 cmd_lo;
0265 u8 cmd_hi;
0266 u32 gen_ptr;
0267 } scb;
0268 u32 port;
0269 u16 flash_ctrl;
0270 u8 eeprom_ctrl_lo;
0271 u8 eeprom_ctrl_hi;
0272 u32 mdi_ctrl;
0273 u32 rx_dma_count;
0274 };
0275
0276 enum scb_status {
0277 rus_no_res = 0x08,
0278 rus_ready = 0x10,
0279 rus_mask = 0x3C,
0280 };
0281
0282 enum ru_state {
0283 RU_SUSPENDED = 0,
0284 RU_RUNNING = 1,
0285 RU_UNINITIALIZED = -1,
0286 };
0287
0288 enum scb_stat_ack {
0289 stat_ack_not_ours = 0x00,
0290 stat_ack_sw_gen = 0x04,
0291 stat_ack_rnr = 0x10,
0292 stat_ack_cu_idle = 0x20,
0293 stat_ack_frame_rx = 0x40,
0294 stat_ack_cu_cmd_done = 0x80,
0295 stat_ack_not_present = 0xFF,
0296 stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx),
0297 stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done),
0298 };
0299
0300 enum scb_cmd_hi {
0301 irq_mask_none = 0x00,
0302 irq_mask_all = 0x01,
0303 irq_sw_gen = 0x02,
0304 };
0305
0306 enum scb_cmd_lo {
0307 cuc_nop = 0x00,
0308 ruc_start = 0x01,
0309 ruc_load_base = 0x06,
0310 cuc_start = 0x10,
0311 cuc_resume = 0x20,
0312 cuc_dump_addr = 0x40,
0313 cuc_dump_stats = 0x50,
0314 cuc_load_base = 0x60,
0315 cuc_dump_reset = 0x70,
0316 };
0317
0318 enum cuc_dump {
0319 cuc_dump_complete = 0x0000A005,
0320 cuc_dump_reset_complete = 0x0000A007,
0321 };
0322
0323 enum port {
0324 software_reset = 0x0000,
0325 selftest = 0x0001,
0326 selective_reset = 0x0002,
0327 };
0328
0329 enum eeprom_ctrl_lo {
0330 eesk = 0x01,
0331 eecs = 0x02,
0332 eedi = 0x04,
0333 eedo = 0x08,
0334 };
0335
0336 enum mdi_ctrl {
0337 mdi_write = 0x04000000,
0338 mdi_read = 0x08000000,
0339 mdi_ready = 0x10000000,
0340 };
0341
0342 enum eeprom_op {
0343 op_write = 0x05,
0344 op_read = 0x06,
0345 op_ewds = 0x10,
0346 op_ewen = 0x13,
0347 };
0348
0349 enum eeprom_offsets {
0350 eeprom_cnfg_mdix = 0x03,
0351 eeprom_phy_iface = 0x06,
0352 eeprom_id = 0x0A,
0353 eeprom_config_asf = 0x0D,
0354 eeprom_smbus_addr = 0x90,
0355 };
0356
0357 enum eeprom_cnfg_mdix {
0358 eeprom_mdix_enabled = 0x0080,
0359 };
0360
0361 enum eeprom_phy_iface {
0362 NoSuchPhy = 0,
0363 I82553AB,
0364 I82553C,
0365 I82503,
0366 DP83840,
0367 S80C240,
0368 S80C24,
0369 I82555,
0370 DP83840A = 10,
0371 };
0372
0373 enum eeprom_id {
0374 eeprom_id_wol = 0x0020,
0375 };
0376
0377 enum eeprom_config_asf {
0378 eeprom_asf = 0x8000,
0379 eeprom_gcl = 0x4000,
0380 };
0381
0382 enum cb_status {
0383 cb_complete = 0x8000,
0384 cb_ok = 0x2000,
0385 };
0386
0387
0388
0389
0390
0391 enum cb_command {
0392 cb_nop = 0x0000,
0393 cb_iaaddr = 0x0001,
0394 cb_config = 0x0002,
0395 cb_multi = 0x0003,
0396 cb_tx = 0x0004,
0397 cb_ucode = 0x0005,
0398 cb_dump = 0x0006,
0399 cb_tx_sf = 0x0008,
0400 cb_tx_nc = 0x0010,
0401 cb_cid = 0x1f00,
0402 cb_i = 0x2000,
0403 cb_s = 0x4000,
0404 cb_el = 0x8000,
0405 };
0406
0407 struct rfd {
0408 __le16 status;
0409 __le16 command;
0410 __le32 link;
0411 __le32 rbd;
0412 __le16 actual_size;
0413 __le16 size;
0414 };
0415
0416 struct rx {
0417 struct rx *next, *prev;
0418 struct sk_buff *skb;
0419 dma_addr_t dma_addr;
0420 };
0421
0422 #if defined(__BIG_ENDIAN_BITFIELD)
0423 #define X(a,b) b,a
0424 #else
0425 #define X(a,b) a,b
0426 #endif
0427 struct config {
0428 u8 X(byte_count:6, pad0:2);
0429 u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1);
0430 u8 adaptive_ifs;
0431 u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1),
0432 term_write_cache_line:1), pad3:4);
0433 u8 X(rx_dma_max_count:7, pad4:1);
0434 u8 X(tx_dma_max_count:7, dma_max_count_enable:1);
0435 u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1),
0436 tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1),
0437 rx_save_overruns : 1), rx_save_bad_frames : 1);
0438 u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2),
0439 pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1),
0440 tx_dynamic_tbd:1);
0441 u8 X(X(mii_mode:1, pad8:6), csma_disabled:1);
0442 u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1),
0443 link_status_wake:1), arp_wake:1), mcmatch_wake:1);
0444 u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2),
0445 loopback:2);
0446 u8 X(linear_priority:3, pad11:5);
0447 u8 X(X(linear_priority_mode:1, pad12:3), ifs:4);
0448 u8 ip_addr_lo;
0449 u8 ip_addr_hi;
0450 u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1),
0451 wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1),
0452 pad15_2:1), crs_or_cdt:1);
0453 u8 fc_delay_lo;
0454 u8 fc_delay_hi;
0455 u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1),
0456 rx_long_ok:1), fc_priority_threshold:3), pad18:1);
0457 u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1),
0458 fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1),
0459 full_duplex_force:1), full_duplex_pin:1);
0460 u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1);
0461 u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4);
0462 u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6);
0463 u8 pad_d102[9];
0464 };
0465
0466 #define E100_MAX_MULTICAST_ADDRS 64
0467 struct multi {
0468 __le16 count;
0469 u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2];
0470 };
0471
0472
0473 #define UCODE_SIZE 134
0474 struct cb {
0475 __le16 status;
0476 __le16 command;
0477 __le32 link;
0478 union {
0479 u8 iaaddr[ETH_ALEN];
0480 __le32 ucode[UCODE_SIZE];
0481 struct config config;
0482 struct multi multi;
0483 struct {
0484 u32 tbd_array;
0485 u16 tcb_byte_count;
0486 u8 threshold;
0487 u8 tbd_count;
0488 struct {
0489 __le32 buf_addr;
0490 __le16 size;
0491 u16 eol;
0492 } tbd;
0493 } tcb;
0494 __le32 dump_buffer_addr;
0495 } u;
0496 struct cb *next, *prev;
0497 dma_addr_t dma_addr;
0498 struct sk_buff *skb;
0499 };
0500
0501 enum loopback {
0502 lb_none = 0, lb_mac = 1, lb_phy = 3,
0503 };
0504
0505 struct stats {
0506 __le32 tx_good_frames, tx_max_collisions, tx_late_collisions,
0507 tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions,
0508 tx_multiple_collisions, tx_total_collisions;
0509 __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors,
0510 rx_resource_errors, rx_overrun_errors, rx_cdt_errors,
0511 rx_short_frame_errors;
0512 __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported;
0513 __le16 xmt_tco_frames, rcv_tco_frames;
0514 __le32 complete;
0515 };
0516
0517 struct mem {
0518 struct {
0519 u32 signature;
0520 u32 result;
0521 } selftest;
0522 struct stats stats;
0523 u8 dump_buf[596];
0524 };
0525
0526 struct param_range {
0527 u32 min;
0528 u32 max;
0529 u32 count;
0530 };
0531
0532 struct params {
0533 struct param_range rfds;
0534 struct param_range cbs;
0535 };
0536
0537 struct nic {
0538
0539 u32 msg_enable ____cacheline_aligned;
0540 struct net_device *netdev;
0541 struct pci_dev *pdev;
0542 u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data);
0543
0544 struct rx *rxs ____cacheline_aligned;
0545 struct rx *rx_to_use;
0546 struct rx *rx_to_clean;
0547 struct rfd blank_rfd;
0548 enum ru_state ru_running;
0549
0550 spinlock_t cb_lock ____cacheline_aligned;
0551 spinlock_t cmd_lock;
0552 struct csr __iomem *csr;
0553 enum scb_cmd_lo cuc_cmd;
0554 unsigned int cbs_avail;
0555 struct napi_struct napi;
0556 struct cb *cbs;
0557 struct cb *cb_to_use;
0558 struct cb *cb_to_send;
0559 struct cb *cb_to_clean;
0560 __le16 tx_command;
0561
0562
0563 enum {
0564 ich = (1 << 0),
0565 promiscuous = (1 << 1),
0566 multicast_all = (1 << 2),
0567 wol_magic = (1 << 3),
0568 ich_10h_workaround = (1 << 4),
0569 } flags ____cacheline_aligned;
0570
0571 enum mac mac;
0572 enum phy phy;
0573 struct params params;
0574 struct timer_list watchdog;
0575 struct mii_if_info mii;
0576 struct work_struct tx_timeout_task;
0577 enum loopback loopback;
0578
0579 struct mem *mem;
0580 dma_addr_t dma_addr;
0581
0582 struct dma_pool *cbs_pool;
0583 dma_addr_t cbs_dma_addr;
0584 u8 adaptive_ifs;
0585 u8 tx_threshold;
0586 u32 tx_frames;
0587 u32 tx_collisions;
0588 u32 tx_deferred;
0589 u32 tx_single_collisions;
0590 u32 tx_multiple_collisions;
0591 u32 tx_fc_pause;
0592 u32 tx_tco_frames;
0593
0594 u32 rx_fc_pause;
0595 u32 rx_fc_unsupported;
0596 u32 rx_tco_frames;
0597 u32 rx_short_frame_errors;
0598 u32 rx_over_length_errors;
0599
0600 u16 eeprom_wc;
0601 __le16 eeprom[256];
0602 spinlock_t mdio_lock;
0603 const struct firmware *fw;
0604 };
0605
0606 static inline void e100_write_flush(struct nic *nic)
0607 {
0608
0609
0610 (void)ioread8(&nic->csr->scb.status);
0611 }
0612
0613 static void e100_enable_irq(struct nic *nic)
0614 {
0615 unsigned long flags;
0616
0617 spin_lock_irqsave(&nic->cmd_lock, flags);
0618 iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi);
0619 e100_write_flush(nic);
0620 spin_unlock_irqrestore(&nic->cmd_lock, flags);
0621 }
0622
0623 static void e100_disable_irq(struct nic *nic)
0624 {
0625 unsigned long flags;
0626
0627 spin_lock_irqsave(&nic->cmd_lock, flags);
0628 iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi);
0629 e100_write_flush(nic);
0630 spin_unlock_irqrestore(&nic->cmd_lock, flags);
0631 }
0632
0633 static void e100_hw_reset(struct nic *nic)
0634 {
0635
0636
0637 iowrite32(selective_reset, &nic->csr->port);
0638 e100_write_flush(nic); udelay(20);
0639
0640
0641 iowrite32(software_reset, &nic->csr->port);
0642 e100_write_flush(nic); udelay(20);
0643
0644
0645 e100_disable_irq(nic);
0646 }
0647
0648 static int e100_self_test(struct nic *nic)
0649 {
0650 u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest);
0651
0652
0653
0654
0655 nic->mem->selftest.signature = 0;
0656 nic->mem->selftest.result = 0xFFFFFFFF;
0657
0658 iowrite32(selftest | dma_addr, &nic->csr->port);
0659 e100_write_flush(nic);
0660
0661 msleep(10);
0662
0663
0664 e100_disable_irq(nic);
0665
0666
0667 if (nic->mem->selftest.result != 0) {
0668 netif_err(nic, hw, nic->netdev,
0669 "Self-test failed: result=0x%08X\n",
0670 nic->mem->selftest.result);
0671 return -ETIMEDOUT;
0672 }
0673 if (nic->mem->selftest.signature == 0) {
0674 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
0675 return -ETIMEDOUT;
0676 }
0677
0678 return 0;
0679 }
0680
0681 static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data)
0682 {
0683 u32 cmd_addr_data[3];
0684 u8 ctrl;
0685 int i, j;
0686
0687
0688 cmd_addr_data[0] = op_ewen << (addr_len - 2);
0689 cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) |
0690 le16_to_cpu(data);
0691 cmd_addr_data[2] = op_ewds << (addr_len - 2);
0692
0693
0694 for (j = 0; j < 3; j++) {
0695
0696
0697 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
0698 e100_write_flush(nic); udelay(4);
0699
0700 for (i = 31; i >= 0; i--) {
0701 ctrl = (cmd_addr_data[j] & (1 << i)) ?
0702 eecs | eedi : eecs;
0703 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
0704 e100_write_flush(nic); udelay(4);
0705
0706 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
0707 e100_write_flush(nic); udelay(4);
0708 }
0709
0710 msleep(10);
0711
0712
0713 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
0714 e100_write_flush(nic); udelay(4);
0715 }
0716 };
0717
0718
0719 static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
0720 {
0721 u32 cmd_addr_data;
0722 u16 data = 0;
0723 u8 ctrl;
0724 int i;
0725
0726 cmd_addr_data = ((op_read << *addr_len) | addr) << 16;
0727
0728
0729 iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo);
0730 e100_write_flush(nic); udelay(4);
0731
0732
0733 for (i = 31; i >= 0; i--) {
0734 ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
0735 iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo);
0736 e100_write_flush(nic); udelay(4);
0737
0738 iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
0739 e100_write_flush(nic); udelay(4);
0740
0741
0742
0743 ctrl = ioread8(&nic->csr->eeprom_ctrl_lo);
0744 if (!(ctrl & eedo) && i > 16) {
0745 *addr_len -= (i - 16);
0746 i = 17;
0747 }
0748
0749 data = (data << 1) | (ctrl & eedo ? 1 : 0);
0750 }
0751
0752
0753 iowrite8(0, &nic->csr->eeprom_ctrl_lo);
0754 e100_write_flush(nic); udelay(4);
0755
0756 return cpu_to_le16(data);
0757 };
0758
0759
0760 static int e100_eeprom_load(struct nic *nic)
0761 {
0762 u16 addr, addr_len = 8, checksum = 0;
0763
0764
0765 e100_eeprom_read(nic, &addr_len, 0);
0766 nic->eeprom_wc = 1 << addr_len;
0767
0768 for (addr = 0; addr < nic->eeprom_wc; addr++) {
0769 nic->eeprom[addr] = e100_eeprom_read(nic, &addr_len, addr);
0770 if (addr < nic->eeprom_wc - 1)
0771 checksum += le16_to_cpu(nic->eeprom[addr]);
0772 }
0773
0774
0775
0776 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
0777 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
0778 if (!eeprom_bad_csum_allow)
0779 return -EAGAIN;
0780 }
0781
0782 return 0;
0783 }
0784
0785
0786 static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
0787 {
0788 u16 addr, addr_len = 8, checksum = 0;
0789
0790
0791 e100_eeprom_read(nic, &addr_len, 0);
0792 nic->eeprom_wc = 1 << addr_len;
0793
0794 if (start + count >= nic->eeprom_wc)
0795 return -EINVAL;
0796
0797 for (addr = start; addr < start + count; addr++)
0798 e100_eeprom_write(nic, addr_len, addr, nic->eeprom[addr]);
0799
0800
0801
0802 for (addr = 0; addr < nic->eeprom_wc - 1; addr++)
0803 checksum += le16_to_cpu(nic->eeprom[addr]);
0804 nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum);
0805 e100_eeprom_write(nic, addr_len, nic->eeprom_wc - 1,
0806 nic->eeprom[nic->eeprom_wc - 1]);
0807
0808 return 0;
0809 }
0810
0811 #define E100_WAIT_SCB_TIMEOUT 20000
0812 #define E100_WAIT_SCB_FAST 20
0813 static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
0814 {
0815 unsigned long flags;
0816 unsigned int i;
0817 int err = 0;
0818
0819 spin_lock_irqsave(&nic->cmd_lock, flags);
0820
0821
0822 for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) {
0823 if (likely(!ioread8(&nic->csr->scb.cmd_lo)))
0824 break;
0825 cpu_relax();
0826 if (unlikely(i > E100_WAIT_SCB_FAST))
0827 udelay(5);
0828 }
0829 if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
0830 err = -EAGAIN;
0831 goto err_unlock;
0832 }
0833
0834 if (unlikely(cmd != cuc_resume))
0835 iowrite32(dma_addr, &nic->csr->scb.gen_ptr);
0836 iowrite8(cmd, &nic->csr->scb.cmd_lo);
0837
0838 err_unlock:
0839 spin_unlock_irqrestore(&nic->cmd_lock, flags);
0840
0841 return err;
0842 }
0843
0844 static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
0845 int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
0846 {
0847 struct cb *cb;
0848 unsigned long flags;
0849 int err;
0850
0851 spin_lock_irqsave(&nic->cb_lock, flags);
0852
0853 if (unlikely(!nic->cbs_avail)) {
0854 err = -ENOMEM;
0855 goto err_unlock;
0856 }
0857
0858 cb = nic->cb_to_use;
0859 nic->cb_to_use = cb->next;
0860 nic->cbs_avail--;
0861 cb->skb = skb;
0862
0863 err = cb_prepare(nic, cb, skb);
0864 if (err)
0865 goto err_unlock;
0866
0867 if (unlikely(!nic->cbs_avail))
0868 err = -ENOSPC;
0869
0870
0871
0872
0873 cb->command |= cpu_to_le16(cb_s);
0874 dma_wmb();
0875 cb->prev->command &= cpu_to_le16(~cb_s);
0876
0877 while (nic->cb_to_send != nic->cb_to_use) {
0878 if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd,
0879 nic->cb_to_send->dma_addr))) {
0880
0881
0882
0883
0884
0885 if (err == -ENOSPC) {
0886
0887 schedule_work(&nic->tx_timeout_task);
0888 }
0889 break;
0890 } else {
0891 nic->cuc_cmd = cuc_resume;
0892 nic->cb_to_send = nic->cb_to_send->next;
0893 }
0894 }
0895
0896 err_unlock:
0897 spin_unlock_irqrestore(&nic->cb_lock, flags);
0898
0899 return err;
0900 }
0901
0902 static int mdio_read(struct net_device *netdev, int addr, int reg)
0903 {
0904 struct nic *nic = netdev_priv(netdev);
0905 return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0);
0906 }
0907
0908 static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
0909 {
0910 struct nic *nic = netdev_priv(netdev);
0911
0912 nic->mdio_ctrl(nic, addr, mdi_write, reg, data);
0913 }
0914
0915
0916 static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
0917 {
0918 u32 data_out = 0;
0919 unsigned int i;
0920 unsigned long flags;
0921
0922
0923
0924
0925
0926
0927
0928
0929 spin_lock_irqsave(&nic->mdio_lock, flags);
0930 for (i = 100; i; --i) {
0931 if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready)
0932 break;
0933 udelay(20);
0934 }
0935 if (unlikely(!i)) {
0936 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
0937 spin_unlock_irqrestore(&nic->mdio_lock, flags);
0938 return 0;
0939 }
0940 iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
0941
0942 for (i = 0; i < 100; i++) {
0943 udelay(20);
0944 if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready)
0945 break;
0946 }
0947 spin_unlock_irqrestore(&nic->mdio_lock, flags);
0948 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
0949 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
0950 dir == mdi_read ? "READ" : "WRITE",
0951 addr, reg, data, data_out);
0952 return (u16)data_out;
0953 }
0954
0955
0956 static u16 mdio_ctrl_phy_82552_v(struct nic *nic,
0957 u32 addr,
0958 u32 dir,
0959 u32 reg,
0960 u16 data)
0961 {
0962 if ((reg == MII_BMCR) && (dir == mdi_write)) {
0963 if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) {
0964 u16 advert = mdio_read(nic->netdev, nic->mii.phy_id,
0965 MII_ADVERTISE);
0966
0967
0968
0969
0970
0971 if (advert & ADVERTISE_100FULL)
0972 data |= BMCR_SPEED100 | BMCR_FULLDPLX;
0973 else if (advert & ADVERTISE_100HALF)
0974 data |= BMCR_SPEED100;
0975 }
0976 }
0977 return mdio_ctrl_hw(nic, addr, dir, reg, data);
0978 }
0979
0980
0981
0982
0983
0984
0985
0986 static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
0987 u32 addr,
0988 u32 dir,
0989 u32 reg,
0990 u16 data)
0991 {
0992
0993
0994
0995
0996 if (dir == mdi_read) {
0997 switch (reg) {
0998 case MII_BMCR:
0999
1000 return BMCR_ANENABLE |
1001 BMCR_FULLDPLX;
1002 case MII_BMSR:
1003 return BMSR_LSTATUS |
1004 BMSR_ANEGCAPABLE |
1005 BMSR_10FULL;
1006 case MII_ADVERTISE:
1007
1008 return ADVERTISE_10HALF |
1009 ADVERTISE_10FULL;
1010 default:
1011 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1012 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1013 dir == mdi_read ? "READ" : "WRITE",
1014 addr, reg, data);
1015 return 0xFFFF;
1016 }
1017 } else {
1018 switch (reg) {
1019 default:
1020 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1021 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1022 dir == mdi_read ? "READ" : "WRITE",
1023 addr, reg, data);
1024 return 0xFFFF;
1025 }
1026 }
1027 }
1028 static inline int e100_phy_supports_mii(struct nic *nic)
1029 {
1030
1031
1032
1033 return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated);
1034 }
1035
1036 static void e100_get_defaults(struct nic *nic)
1037 {
1038 struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
1039 struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
1040
1041
1042 nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision;
1043 if (nic->mac == mac_unknown)
1044 nic->mac = mac_82557_D100_A;
1045
1046 nic->params.rfds = rfds;
1047 nic->params.cbs = cbs;
1048
1049
1050 nic->tx_threshold = 0xE0;
1051
1052
1053 nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
1054 ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
1055
1056
1057 nic->blank_rfd.command = 0;
1058 nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF);
1059 nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1060
1061
1062 nic->mii.phy_id_mask = 0x1F;
1063 nic->mii.reg_num_mask = 0x1F;
1064 nic->mii.dev = nic->netdev;
1065 nic->mii.mdio_read = mdio_read;
1066 nic->mii.mdio_write = mdio_write;
1067 }
1068
1069 static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1070 {
1071 struct config *config = &cb->u.config;
1072 u8 *c = (u8 *)config;
1073 struct net_device *netdev = nic->netdev;
1074
1075 cb->command = cpu_to_le16(cb_config);
1076
1077 memset(config, 0, sizeof(struct config));
1078
1079 config->byte_count = 0x16;
1080 config->rx_fifo_limit = 0x8;
1081 config->direct_rx_dma = 0x1;
1082 config->standard_tcb = 0x1;
1083 config->standard_stat_counter = 0x1;
1084 config->rx_discard_short_frames = 0x1;
1085 config->tx_underrun_retry = 0x3;
1086 if (e100_phy_supports_mii(nic))
1087 config->mii_mode = 1;
1088 config->pad10 = 0x6;
1089 config->no_source_addr_insertion = 0x1;
1090 config->preamble_length = 0x2;
1091 config->ifs = 0x6;
1092 config->ip_addr_hi = 0xF2;
1093 config->pad15_1 = 0x1;
1094 config->pad15_2 = 0x1;
1095 config->crs_or_cdt = 0x0;
1096 config->fc_delay_hi = 0x40;
1097 config->tx_padding = 0x1;
1098 config->fc_priority_threshold = 0x7;
1099 config->pad18 = 0x1;
1100 config->full_duplex_pin = 0x1;
1101 config->pad20_1 = 0x1F;
1102 config->fc_priority_location = 0x1;
1103 config->pad21_1 = 0x5;
1104
1105 config->adaptive_ifs = nic->adaptive_ifs;
1106 config->loopback = nic->loopback;
1107
1108 if (nic->mii.force_media && nic->mii.full_duplex)
1109 config->full_duplex_force = 0x1;
1110
1111 if (nic->flags & promiscuous || nic->loopback) {
1112 config->rx_save_bad_frames = 0x1;
1113 config->rx_discard_short_frames = 0x0;
1114 config->promiscuous_mode = 0x1;
1115 }
1116
1117 if (unlikely(netdev->features & NETIF_F_RXFCS))
1118 config->rx_crc_transfer = 0x1;
1119
1120 if (nic->flags & multicast_all)
1121 config->multicast_all = 0x1;
1122
1123
1124 if (netif_running(nic->netdev) || !(nic->flags & wol_magic))
1125 config->magic_packet_disable = 0x1;
1126
1127 if (nic->mac >= mac_82558_D101_A4) {
1128 config->fc_disable = 0x1;
1129 config->mwi_enable = 0x1;
1130 config->standard_tcb = 0x0;
1131 config->rx_long_ok = 0x1;
1132 if (nic->mac >= mac_82559_D101M) {
1133 config->tno_intr = 0x1;
1134
1135 if (nic->mac >= mac_82551_10) {
1136 config->byte_count = 0x20;
1137 config->rx_d102_mode = 0x1;
1138 }
1139 } else {
1140 config->standard_stat_counter = 0x0;
1141 }
1142 }
1143
1144 if (netdev->features & NETIF_F_RXALL) {
1145 config->rx_save_overruns = 0x1;
1146 config->rx_save_bad_frames = 0x1;
1147 config->rx_discard_short_frames = 0x0;
1148 }
1149
1150 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
1151 c + 0);
1152 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
1153 c + 8);
1154 netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
1155 c + 16);
1156 return 0;
1157 }
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214 #define BUNDLESMALL 1
1215 #define BUNDLEMAX (u16)6
1216 #define INTDELAY (u16)1536
1217
1218
1219 static const struct firmware *e100_request_firmware(struct nic *nic)
1220 {
1221 const char *fw_name;
1222 const struct firmware *fw = nic->fw;
1223 u8 timer, bundle, min_size;
1224 int err = 0;
1225 bool required = false;
1226
1227
1228 if (nic->flags & ich)
1229 return NULL;
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244 if (nic->mac == mac_82559_D101M) {
1245 fw_name = FIRMWARE_D101M;
1246 } else if (nic->mac == mac_82559_D101S) {
1247 fw_name = FIRMWARE_D101S;
1248 } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
1249 fw_name = FIRMWARE_D102E;
1250 required = true;
1251 } else {
1252 return NULL;
1253 }
1254
1255
1256
1257
1258
1259
1260 if (!fw)
1261 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1262
1263 if (err) {
1264 if (required) {
1265 netif_err(nic, probe, nic->netdev,
1266 "Failed to load firmware \"%s\": %d\n",
1267 fw_name, err);
1268 return ERR_PTR(err);
1269 } else {
1270 netif_info(nic, probe, nic->netdev,
1271 "CPUSaver disabled. Needs \"%s\": %d\n",
1272 fw_name, err);
1273 return NULL;
1274 }
1275 }
1276
1277
1278
1279 if (fw->size != UCODE_SIZE * 4 + 3) {
1280 netif_err(nic, probe, nic->netdev,
1281 "Firmware \"%s\" has wrong size %zu\n",
1282 fw_name, fw->size);
1283 release_firmware(fw);
1284 return ERR_PTR(-EINVAL);
1285 }
1286
1287
1288 timer = fw->data[UCODE_SIZE * 4];
1289 bundle = fw->data[UCODE_SIZE * 4 + 1];
1290 min_size = fw->data[UCODE_SIZE * 4 + 2];
1291
1292 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1293 min_size >= UCODE_SIZE) {
1294 netif_err(nic, probe, nic->netdev,
1295 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1296 fw_name, timer, bundle, min_size);
1297 release_firmware(fw);
1298 return ERR_PTR(-EINVAL);
1299 }
1300
1301
1302
1303 nic->fw = fw;
1304 return fw;
1305 }
1306
1307 static int e100_setup_ucode(struct nic *nic, struct cb *cb,
1308 struct sk_buff *skb)
1309 {
1310 const struct firmware *fw = (void *)skb;
1311 u8 timer, bundle, min_size;
1312
1313
1314
1315 cb->skb = NULL;
1316
1317
1318 memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4);
1319
1320
1321 timer = fw->data[UCODE_SIZE * 4];
1322 bundle = fw->data[UCODE_SIZE * 4 + 1];
1323 min_size = fw->data[UCODE_SIZE * 4 + 2];
1324
1325
1326 cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000);
1327 cb->u.ucode[timer] |= cpu_to_le32(INTDELAY);
1328 cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000);
1329 cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX);
1330 cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000);
1331 cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
1332
1333 cb->command = cpu_to_le16(cb_ucode | cb_el);
1334 return 0;
1335 }
1336
1337 static inline int e100_load_ucode_wait(struct nic *nic)
1338 {
1339 const struct firmware *fw;
1340 int err = 0, counter = 50;
1341 struct cb *cb = nic->cb_to_clean;
1342
1343 fw = e100_request_firmware(nic);
1344
1345 if (IS_ERR_OR_NULL(fw))
1346 return PTR_ERR_OR_ZERO(fw);
1347
1348 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1349 netif_err(nic, probe, nic->netdev,
1350 "ucode cmd failed with error %d\n", err);
1351
1352
1353 nic->cuc_cmd = cuc_start;
1354
1355
1356 e100_write_flush(nic);
1357 udelay(10);
1358
1359
1360 while (!(cb->status & cpu_to_le16(cb_complete))) {
1361 msleep(10);
1362 if (!--counter) break;
1363 }
1364
1365
1366 iowrite8(~0, &nic->csr->scb.stat_ack);
1367
1368
1369 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1370 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
1371 err = -EPERM;
1372 }
1373
1374 return err;
1375 }
1376
1377 static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
1378 struct sk_buff *skb)
1379 {
1380 cb->command = cpu_to_le16(cb_iaaddr);
1381 memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
1382 return 0;
1383 }
1384
1385 static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1386 {
1387 cb->command = cpu_to_le16(cb_dump);
1388 cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
1389 offsetof(struct mem, dump_buf));
1390 return 0;
1391 }
1392
1393 static int e100_phy_check_without_mii(struct nic *nic)
1394 {
1395 u8 phy_type;
1396 int without_mii;
1397
1398 phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f;
1399
1400 switch (phy_type) {
1401 case NoSuchPhy:
1402 case I82503:
1403 case S80C24:
1404
1405
1406
1407
1408
1409
1410 netif_info(nic, probe, nic->netdev,
1411 "found MII-less i82503 or 80c24 or other PHY\n");
1412
1413 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1414 nic->mii.phy_id = 0;
1415
1416
1417
1418
1419
1420 without_mii = 1;
1421 break;
1422 default:
1423 without_mii = 0;
1424 break;
1425 }
1426 return without_mii;
1427 }
1428
1429 #define NCONFIG_AUTO_SWITCH 0x0080
1430 #define MII_NSC_CONG MII_RESV1
1431 #define NSC_CONG_ENABLE 0x0100
1432 #define NSC_CONG_TXREADY 0x0400
1433 static int e100_phy_init(struct nic *nic)
1434 {
1435 struct net_device *netdev = nic->netdev;
1436 u32 addr;
1437 u16 bmcr, stat, id_lo, id_hi, cong;
1438
1439
1440 for (addr = 0; addr < 32; addr++) {
1441 nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
1442 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1443 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1444 stat = mdio_read(netdev, nic->mii.phy_id, MII_BMSR);
1445 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
1446 break;
1447 }
1448 if (addr == 32) {
1449
1450
1451
1452
1453 if (e100_phy_check_without_mii(nic))
1454 return 0;
1455 else {
1456
1457 netif_err(nic, hw, nic->netdev,
1458 "Failed to locate any known PHY, aborting\n");
1459 return -EAGAIN;
1460 }
1461 } else
1462 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1463 "phy_addr = %d\n", nic->mii.phy_id);
1464
1465
1466 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1467 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1468 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1469 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1470 "phy ID = 0x%08X\n", nic->phy);
1471
1472
1473 for (addr = 0; addr < 32; addr++) {
1474 if (addr != nic->mii.phy_id) {
1475 mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE);
1476 } else if (nic->phy != phy_82552_v) {
1477 bmcr = mdio_read(netdev, addr, MII_BMCR);
1478 mdio_write(netdev, addr, MII_BMCR,
1479 bmcr & ~BMCR_ISOLATE);
1480 }
1481 }
1482
1483
1484
1485
1486
1487 if (nic->phy == phy_82552_v)
1488 mdio_write(netdev, nic->mii.phy_id, MII_BMCR,
1489 bmcr & ~BMCR_ISOLATE);
1490
1491
1492 #define NCS_PHY_MODEL_MASK 0xFFF0FFFF
1493 if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) {
1494
1495 cong = mdio_read(netdev, nic->mii.phy_id, MII_NSC_CONG);
1496 cong |= NSC_CONG_TXREADY;
1497 cong &= ~NSC_CONG_ENABLE;
1498 mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
1499 }
1500
1501 if (nic->phy == phy_82552_v) {
1502 u16 advert = mdio_read(netdev, nic->mii.phy_id, MII_ADVERTISE);
1503
1504
1505 nic->mdio_ctrl = mdio_ctrl_phy_82552_v;
1506
1507
1508 advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1509 mdio_write(netdev, nic->mii.phy_id, MII_ADVERTISE, advert);
1510
1511
1512 bmcr = mdio_read(netdev, nic->mii.phy_id, MII_BMCR);
1513 bmcr |= BMCR_RESET;
1514 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
1515 } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1516 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1517 (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) {
1518
1519 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1520 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1521 }
1522
1523 return 0;
1524 }
1525
1526 static int e100_hw_init(struct nic *nic)
1527 {
1528 int err = 0;
1529
1530 e100_hw_reset(nic);
1531
1532 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
1533 if ((err = e100_self_test(nic)))
1534 return err;
1535
1536 if ((err = e100_phy_init(nic)))
1537 return err;
1538 if ((err = e100_exec_cmd(nic, cuc_load_base, 0)))
1539 return err;
1540 if ((err = e100_exec_cmd(nic, ruc_load_base, 0)))
1541 return err;
1542 if ((err = e100_load_ucode_wait(nic)))
1543 return err;
1544 if ((err = e100_exec_cb(nic, NULL, e100_configure)))
1545 return err;
1546 if ((err = e100_exec_cb(nic, NULL, e100_setup_iaaddr)))
1547 return err;
1548 if ((err = e100_exec_cmd(nic, cuc_dump_addr,
1549 nic->dma_addr + offsetof(struct mem, stats))))
1550 return err;
1551 if ((err = e100_exec_cmd(nic, cuc_dump_reset, 0)))
1552 return err;
1553
1554 e100_disable_irq(nic);
1555
1556 return 0;
1557 }
1558
1559 static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1560 {
1561 struct net_device *netdev = nic->netdev;
1562 struct netdev_hw_addr *ha;
1563 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1564
1565 cb->command = cpu_to_le16(cb_multi);
1566 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1567 i = 0;
1568 netdev_for_each_mc_addr(ha, netdev) {
1569 if (i == count)
1570 break;
1571 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1572 ETH_ALEN);
1573 }
1574 return 0;
1575 }
1576
1577 static void e100_set_multicast_list(struct net_device *netdev)
1578 {
1579 struct nic *nic = netdev_priv(netdev);
1580
1581 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1582 "mc_count=%d, flags=0x%04X\n",
1583 netdev_mc_count(netdev), netdev->flags);
1584
1585 if (netdev->flags & IFF_PROMISC)
1586 nic->flags |= promiscuous;
1587 else
1588 nic->flags &= ~promiscuous;
1589
1590 if (netdev->flags & IFF_ALLMULTI ||
1591 netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
1592 nic->flags |= multicast_all;
1593 else
1594 nic->flags &= ~multicast_all;
1595
1596 e100_exec_cb(nic, NULL, e100_configure);
1597 e100_exec_cb(nic, NULL, e100_multi);
1598 }
1599
1600 static void e100_update_stats(struct nic *nic)
1601 {
1602 struct net_device *dev = nic->netdev;
1603 struct net_device_stats *ns = &dev->stats;
1604 struct stats *s = &nic->mem->stats;
1605 __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause :
1606 (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames :
1607 &s->complete;
1608
1609
1610
1611
1612
1613 if (*complete == cpu_to_le32(cuc_dump_reset_complete)) {
1614 *complete = 0;
1615 nic->tx_frames = le32_to_cpu(s->tx_good_frames);
1616 nic->tx_collisions = le32_to_cpu(s->tx_total_collisions);
1617 ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions);
1618 ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions);
1619 ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs);
1620 ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns);
1621 ns->collisions += nic->tx_collisions;
1622 ns->tx_errors += le32_to_cpu(s->tx_max_collisions) +
1623 le32_to_cpu(s->tx_lost_crs);
1624 nic->rx_short_frame_errors +=
1625 le32_to_cpu(s->rx_short_frame_errors);
1626 ns->rx_length_errors = nic->rx_short_frame_errors +
1627 nic->rx_over_length_errors;
1628 ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors);
1629 ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors);
1630 ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors);
1631 ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors);
1632 ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors);
1633 ns->rx_errors += le32_to_cpu(s->rx_crc_errors) +
1634 le32_to_cpu(s->rx_alignment_errors) +
1635 le32_to_cpu(s->rx_short_frame_errors) +
1636 le32_to_cpu(s->rx_cdt_errors);
1637 nic->tx_deferred += le32_to_cpu(s->tx_deferred);
1638 nic->tx_single_collisions +=
1639 le32_to_cpu(s->tx_single_collisions);
1640 nic->tx_multiple_collisions +=
1641 le32_to_cpu(s->tx_multiple_collisions);
1642 if (nic->mac >= mac_82558_D101_A4) {
1643 nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause);
1644 nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause);
1645 nic->rx_fc_unsupported +=
1646 le32_to_cpu(s->fc_rcv_unsupported);
1647 if (nic->mac >= mac_82559_D101M) {
1648 nic->tx_tco_frames +=
1649 le16_to_cpu(s->xmt_tco_frames);
1650 nic->rx_tco_frames +=
1651 le16_to_cpu(s->rcv_tco_frames);
1652 }
1653 }
1654 }
1655
1656
1657 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1658 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1659 "exec cuc_dump_reset failed\n");
1660 }
1661
1662 static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
1663 {
1664
1665
1666
1667 if (duplex == DUPLEX_HALF) {
1668 u32 prev = nic->adaptive_ifs;
1669 u32 min_frames = (speed == SPEED_100) ? 1000 : 100;
1670
1671 if ((nic->tx_frames / 32 < nic->tx_collisions) &&
1672 (nic->tx_frames > min_frames)) {
1673 if (nic->adaptive_ifs < 60)
1674 nic->adaptive_ifs += 5;
1675 } else if (nic->tx_frames < min_frames) {
1676 if (nic->adaptive_ifs >= 5)
1677 nic->adaptive_ifs -= 5;
1678 }
1679 if (nic->adaptive_ifs != prev)
1680 e100_exec_cb(nic, NULL, e100_configure);
1681 }
1682 }
1683
1684 static void e100_watchdog(struct timer_list *t)
1685 {
1686 struct nic *nic = from_timer(nic, t, watchdog);
1687 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1688 u32 speed;
1689
1690 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1691 "right now = %ld\n", jiffies);
1692
1693
1694
1695 mii_ethtool_gset(&nic->mii, &cmd);
1696 speed = ethtool_cmd_speed(&cmd);
1697
1698 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1699 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1700 speed == SPEED_100 ? 100 : 10,
1701 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1702 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1703 netdev_info(nic->netdev, "NIC Link is Down\n");
1704 }
1705
1706 mii_check_link(&nic->mii);
1707
1708
1709
1710
1711
1712
1713 spin_lock_irq(&nic->cmd_lock);
1714 iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
1715 e100_write_flush(nic);
1716 spin_unlock_irq(&nic->cmd_lock);
1717
1718 e100_update_stats(nic);
1719 e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
1720
1721 if (nic->mac <= mac_82557_D100_C)
1722
1723 e100_set_multicast_list(nic->netdev);
1724
1725 if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
1726
1727 nic->flags |= ich_10h_workaround;
1728 else
1729 nic->flags &= ~ich_10h_workaround;
1730
1731 mod_timer(&nic->watchdog,
1732 round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
1733 }
1734
1735 static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
1736 struct sk_buff *skb)
1737 {
1738 dma_addr_t dma_addr;
1739 cb->command = nic->tx_command;
1740
1741 dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len,
1742 DMA_TO_DEVICE);
1743
1744 if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
1745 dev_kfree_skb_any(skb);
1746 skb = NULL;
1747 return -ENOMEM;
1748 }
1749
1750
1751
1752
1753
1754 if (unlikely(skb->no_fcs))
1755 cb->command |= cpu_to_le16(cb_tx_nc);
1756 else
1757 cb->command &= ~cpu_to_le16(cb_tx_nc);
1758
1759
1760 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
1761 cb->command |= cpu_to_le16(cb_i);
1762 cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
1763 cb->u.tcb.tcb_byte_count = 0;
1764 cb->u.tcb.threshold = nic->tx_threshold;
1765 cb->u.tcb.tbd_count = 1;
1766 cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
1767 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1768 skb_tx_timestamp(skb);
1769 return 0;
1770 }
1771
1772 static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1773 struct net_device *netdev)
1774 {
1775 struct nic *nic = netdev_priv(netdev);
1776 int err;
1777
1778 if (nic->flags & ich_10h_workaround) {
1779
1780
1781
1782 if (e100_exec_cmd(nic, cuc_nop, 0))
1783 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1784 "exec cuc_nop failed\n");
1785 udelay(1);
1786 }
1787
1788 err = e100_exec_cb(nic, skb, e100_xmit_prepare);
1789
1790 switch (err) {
1791 case -ENOSPC:
1792
1793 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1794 "No space for CB\n");
1795 netif_stop_queue(netdev);
1796 break;
1797 case -ENOMEM:
1798
1799 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1800 "Out of Tx resources, returning skb\n");
1801 netif_stop_queue(netdev);
1802 return NETDEV_TX_BUSY;
1803 }
1804
1805 return NETDEV_TX_OK;
1806 }
1807
1808 static int e100_tx_clean(struct nic *nic)
1809 {
1810 struct net_device *dev = nic->netdev;
1811 struct cb *cb;
1812 int tx_cleaned = 0;
1813
1814 spin_lock(&nic->cb_lock);
1815
1816
1817 for (cb = nic->cb_to_clean;
1818 cb->status & cpu_to_le16(cb_complete);
1819 cb = nic->cb_to_clean = cb->next) {
1820 dma_rmb();
1821 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1822 "cb[%d]->status = 0x%04X\n",
1823 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1824 cb->status);
1825
1826 if (likely(cb->skb != NULL)) {
1827 dev->stats.tx_packets++;
1828 dev->stats.tx_bytes += cb->skb->len;
1829
1830 dma_unmap_single(&nic->pdev->dev,
1831 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1832 le16_to_cpu(cb->u.tcb.tbd.size),
1833 DMA_TO_DEVICE);
1834 dev_kfree_skb_any(cb->skb);
1835 cb->skb = NULL;
1836 tx_cleaned = 1;
1837 }
1838 cb->status = 0;
1839 nic->cbs_avail++;
1840 }
1841
1842 spin_unlock(&nic->cb_lock);
1843
1844
1845 if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev)))
1846 netif_wake_queue(nic->netdev);
1847
1848 return tx_cleaned;
1849 }
1850
1851 static void e100_clean_cbs(struct nic *nic)
1852 {
1853 if (nic->cbs) {
1854 while (nic->cbs_avail != nic->params.cbs.count) {
1855 struct cb *cb = nic->cb_to_clean;
1856 if (cb->skb) {
1857 dma_unmap_single(&nic->pdev->dev,
1858 le32_to_cpu(cb->u.tcb.tbd.buf_addr),
1859 le16_to_cpu(cb->u.tcb.tbd.size),
1860 DMA_TO_DEVICE);
1861 dev_kfree_skb(cb->skb);
1862 }
1863 nic->cb_to_clean = nic->cb_to_clean->next;
1864 nic->cbs_avail++;
1865 }
1866 dma_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
1867 nic->cbs = NULL;
1868 nic->cbs_avail = 0;
1869 }
1870 nic->cuc_cmd = cuc_start;
1871 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean =
1872 nic->cbs;
1873 }
1874
1875 static int e100_alloc_cbs(struct nic *nic)
1876 {
1877 struct cb *cb;
1878 unsigned int i, count = nic->params.cbs.count;
1879
1880 nic->cuc_cmd = cuc_start;
1881 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
1882 nic->cbs_avail = 0;
1883
1884 nic->cbs = dma_pool_zalloc(nic->cbs_pool, GFP_KERNEL,
1885 &nic->cbs_dma_addr);
1886 if (!nic->cbs)
1887 return -ENOMEM;
1888
1889 for (cb = nic->cbs, i = 0; i < count; cb++, i++) {
1890 cb->next = (i + 1 < count) ? cb + 1 : nic->cbs;
1891 cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1;
1892
1893 cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb);
1894 cb->link = cpu_to_le32(nic->cbs_dma_addr +
1895 ((i+1) % count) * sizeof(struct cb));
1896 }
1897
1898 nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs;
1899 nic->cbs_avail = count;
1900
1901 return 0;
1902 }
1903
1904 static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1905 {
1906 if (!nic->rxs) return;
1907 if (RU_SUSPENDED != nic->ru_running) return;
1908
1909
1910 if (!rx) rx = nic->rxs;
1911
1912
1913 if (rx->skb) {
1914 e100_exec_cmd(nic, ruc_start, rx->dma_addr);
1915 nic->ru_running = RU_RUNNING;
1916 }
1917 }
1918
1919 #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
1920 static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1921 {
1922 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1923 return -ENOMEM;
1924
1925
1926 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1927 rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data,
1928 RFD_BUF_LEN, DMA_BIDIRECTIONAL);
1929
1930 if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) {
1931 dev_kfree_skb_any(rx->skb);
1932 rx->skb = NULL;
1933 rx->dma_addr = 0;
1934 return -ENOMEM;
1935 }
1936
1937
1938
1939
1940 if (rx->prev->skb) {
1941 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
1942 put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
1943 dma_sync_single_for_device(&nic->pdev->dev,
1944 rx->prev->dma_addr,
1945 sizeof(struct rfd),
1946 DMA_BIDIRECTIONAL);
1947 }
1948
1949 return 0;
1950 }
1951
1952 static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1953 unsigned int *work_done, unsigned int work_to_do)
1954 {
1955 struct net_device *dev = nic->netdev;
1956 struct sk_buff *skb = rx->skb;
1957 struct rfd *rfd = (struct rfd *)skb->data;
1958 u16 rfd_status, actual_size;
1959 u16 fcs_pad = 0;
1960
1961 if (unlikely(work_done && *work_done >= work_to_do))
1962 return -EAGAIN;
1963
1964
1965 dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr,
1966 sizeof(struct rfd), DMA_BIDIRECTIONAL);
1967 rfd_status = le16_to_cpu(rfd->status);
1968
1969 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1970 "status=0x%04X\n", rfd_status);
1971 dma_rmb();
1972
1973
1974 if (unlikely(!(rfd_status & cb_complete))) {
1975
1976
1977
1978
1979
1980 if ((le16_to_cpu(rfd->command) & cb_el) &&
1981 (RU_RUNNING == nic->ru_running))
1982
1983 if (ioread8(&nic->csr->scb.status) & rus_no_res)
1984 nic->ru_running = RU_SUSPENDED;
1985 dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr,
1986 sizeof(struct rfd),
1987 DMA_FROM_DEVICE);
1988 return -ENODATA;
1989 }
1990
1991
1992 if (unlikely(dev->features & NETIF_F_RXFCS))
1993 fcs_pad = 4;
1994 actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
1995 if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd)))
1996 actual_size = RFD_BUF_LEN - sizeof(struct rfd);
1997
1998
1999 dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN,
2000 DMA_BIDIRECTIONAL);
2001
2002
2003
2004
2005
2006
2007
2008 if ((le16_to_cpu(rfd->command) & cb_el) &&
2009 (RU_RUNNING == nic->ru_running)) {
2010
2011 if (ioread8(&nic->csr->scb.status) & rus_no_res)
2012 nic->ru_running = RU_SUSPENDED;
2013 }
2014
2015
2016 skb_reserve(skb, sizeof(struct rfd));
2017 skb_put(skb, actual_size);
2018 skb->protocol = eth_type_trans(skb, nic->netdev);
2019
2020
2021
2022
2023 if (unlikely(dev->features & NETIF_F_RXALL)) {
2024 if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad)
2025
2026 nic->rx_over_length_errors++;
2027 goto process_skb;
2028 }
2029
2030 if (unlikely(!(rfd_status & cb_ok))) {
2031
2032 dev_kfree_skb_any(skb);
2033 } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) {
2034
2035 nic->rx_over_length_errors++;
2036 dev_kfree_skb_any(skb);
2037 } else {
2038 process_skb:
2039 dev->stats.rx_packets++;
2040 dev->stats.rx_bytes += (actual_size - fcs_pad);
2041 netif_receive_skb(skb);
2042 if (work_done)
2043 (*work_done)++;
2044 }
2045
2046 rx->skb = NULL;
2047
2048 return 0;
2049 }
2050
2051 static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
2052 unsigned int work_to_do)
2053 {
2054 struct rx *rx;
2055 int restart_required = 0, err = 0;
2056 struct rx *old_before_last_rx, *new_before_last_rx;
2057 struct rfd *old_before_last_rfd, *new_before_last_rfd;
2058
2059
2060 for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
2061 err = e100_rx_indicate(nic, rx, work_done, work_to_do);
2062
2063 if (-EAGAIN == err || -ENODATA == err)
2064 break;
2065 }
2066
2067
2068
2069
2070
2071
2072
2073
2074 if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running)
2075 restart_required = 1;
2076
2077 old_before_last_rx = nic->rx_to_use->prev->prev;
2078 old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data;
2079
2080
2081 for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
2082 if (unlikely(e100_rx_alloc_skb(nic, rx)))
2083 break;
2084 }
2085
2086 new_before_last_rx = nic->rx_to_use->prev->prev;
2087 if (new_before_last_rx != old_before_last_rx) {
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097 new_before_last_rfd =
2098 (struct rfd *)new_before_last_rx->skb->data;
2099 new_before_last_rfd->size = 0;
2100 new_before_last_rfd->command |= cpu_to_le16(cb_el);
2101 dma_sync_single_for_device(&nic->pdev->dev,
2102 new_before_last_rx->dma_addr,
2103 sizeof(struct rfd),
2104 DMA_BIDIRECTIONAL);
2105
2106
2107
2108
2109 old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
2110 dma_sync_single_for_device(&nic->pdev->dev,
2111 old_before_last_rx->dma_addr,
2112 sizeof(struct rfd),
2113 DMA_BIDIRECTIONAL);
2114 old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
2115 + ETH_FCS_LEN);
2116 dma_sync_single_for_device(&nic->pdev->dev,
2117 old_before_last_rx->dma_addr,
2118 sizeof(struct rfd),
2119 DMA_BIDIRECTIONAL);
2120 }
2121
2122 if (restart_required) {
2123
2124 iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
2125 e100_start_receiver(nic, nic->rx_to_clean);
2126 if (work_done)
2127 (*work_done)++;
2128 }
2129 }
2130
2131 static void e100_rx_clean_list(struct nic *nic)
2132 {
2133 struct rx *rx;
2134 unsigned int i, count = nic->params.rfds.count;
2135
2136 nic->ru_running = RU_UNINITIALIZED;
2137
2138 if (nic->rxs) {
2139 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2140 if (rx->skb) {
2141 dma_unmap_single(&nic->pdev->dev,
2142 rx->dma_addr, RFD_BUF_LEN,
2143 DMA_BIDIRECTIONAL);
2144 dev_kfree_skb(rx->skb);
2145 }
2146 }
2147 kfree(nic->rxs);
2148 nic->rxs = NULL;
2149 }
2150
2151 nic->rx_to_use = nic->rx_to_clean = NULL;
2152 }
2153
2154 static int e100_rx_alloc_list(struct nic *nic)
2155 {
2156 struct rx *rx;
2157 unsigned int i, count = nic->params.rfds.count;
2158 struct rfd *before_last;
2159
2160 nic->rx_to_use = nic->rx_to_clean = NULL;
2161 nic->ru_running = RU_UNINITIALIZED;
2162
2163 if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL)))
2164 return -ENOMEM;
2165
2166 for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
2167 rx->next = (i + 1 < count) ? rx + 1 : nic->rxs;
2168 rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1;
2169 if (e100_rx_alloc_skb(nic, rx)) {
2170 e100_rx_clean_list(nic);
2171 return -ENOMEM;
2172 }
2173 }
2174
2175
2176
2177
2178
2179
2180
2181 rx = nic->rxs->prev->prev;
2182 before_last = (struct rfd *)rx->skb->data;
2183 before_last->command |= cpu_to_le16(cb_el);
2184 before_last->size = 0;
2185 dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr,
2186 sizeof(struct rfd), DMA_BIDIRECTIONAL);
2187
2188 nic->rx_to_use = nic->rx_to_clean = nic->rxs;
2189 nic->ru_running = RU_SUSPENDED;
2190
2191 return 0;
2192 }
2193
2194 static irqreturn_t e100_intr(int irq, void *dev_id)
2195 {
2196 struct net_device *netdev = dev_id;
2197 struct nic *nic = netdev_priv(netdev);
2198 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
2199
2200 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2201 "stat_ack = 0x%02X\n", stat_ack);
2202
2203 if (stat_ack == stat_ack_not_ours ||
2204 stat_ack == stat_ack_not_present)
2205 return IRQ_NONE;
2206
2207
2208 iowrite8(stat_ack, &nic->csr->scb.stat_ack);
2209
2210
2211 if (stat_ack & stat_ack_rnr)
2212 nic->ru_running = RU_SUSPENDED;
2213
2214 if (likely(napi_schedule_prep(&nic->napi))) {
2215 e100_disable_irq(nic);
2216 __napi_schedule(&nic->napi);
2217 }
2218
2219 return IRQ_HANDLED;
2220 }
2221
2222 static int e100_poll(struct napi_struct *napi, int budget)
2223 {
2224 struct nic *nic = container_of(napi, struct nic, napi);
2225 unsigned int work_done = 0;
2226
2227 e100_rx_clean(nic, &work_done, budget);
2228 e100_tx_clean(nic);
2229
2230
2231 if (work_done == budget)
2232 return budget;
2233
2234
2235 if (likely(napi_complete_done(napi, work_done)))
2236 e100_enable_irq(nic);
2237
2238 return work_done;
2239 }
2240
2241 #ifdef CONFIG_NET_POLL_CONTROLLER
2242 static void e100_netpoll(struct net_device *netdev)
2243 {
2244 struct nic *nic = netdev_priv(netdev);
2245
2246 e100_disable_irq(nic);
2247 e100_intr(nic->pdev->irq, netdev);
2248 e100_tx_clean(nic);
2249 e100_enable_irq(nic);
2250 }
2251 #endif
2252
2253 static int e100_set_mac_address(struct net_device *netdev, void *p)
2254 {
2255 struct nic *nic = netdev_priv(netdev);
2256 struct sockaddr *addr = p;
2257
2258 if (!is_valid_ether_addr(addr->sa_data))
2259 return -EADDRNOTAVAIL;
2260
2261 eth_hw_addr_set(netdev, addr->sa_data);
2262 e100_exec_cb(nic, NULL, e100_setup_iaaddr);
2263
2264 return 0;
2265 }
2266
2267 static int e100_asf(struct nic *nic)
2268 {
2269
2270 return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
2271 (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) &&
2272 !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) &&
2273 ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE);
2274 }
2275
2276 static int e100_up(struct nic *nic)
2277 {
2278 int err;
2279
2280 if ((err = e100_rx_alloc_list(nic)))
2281 return err;
2282 if ((err = e100_alloc_cbs(nic)))
2283 goto err_rx_clean_list;
2284 if ((err = e100_hw_init(nic)))
2285 goto err_clean_cbs;
2286 e100_set_multicast_list(nic->netdev);
2287 e100_start_receiver(nic, NULL);
2288 mod_timer(&nic->watchdog, jiffies);
2289 if ((err = request_irq(nic->pdev->irq, e100_intr, IRQF_SHARED,
2290 nic->netdev->name, nic->netdev)))
2291 goto err_no_irq;
2292 netif_wake_queue(nic->netdev);
2293 napi_enable(&nic->napi);
2294
2295
2296 e100_enable_irq(nic);
2297 return 0;
2298
2299 err_no_irq:
2300 del_timer_sync(&nic->watchdog);
2301 err_clean_cbs:
2302 e100_clean_cbs(nic);
2303 err_rx_clean_list:
2304 e100_rx_clean_list(nic);
2305 return err;
2306 }
2307
2308 static void e100_down(struct nic *nic)
2309 {
2310
2311 napi_disable(&nic->napi);
2312 netif_stop_queue(nic->netdev);
2313 e100_hw_reset(nic);
2314 free_irq(nic->pdev->irq, nic->netdev);
2315 del_timer_sync(&nic->watchdog);
2316 netif_carrier_off(nic->netdev);
2317 e100_clean_cbs(nic);
2318 e100_rx_clean_list(nic);
2319 }
2320
2321 static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2322 {
2323 struct nic *nic = netdev_priv(netdev);
2324
2325
2326
2327 schedule_work(&nic->tx_timeout_task);
2328 }
2329
2330 static void e100_tx_timeout_task(struct work_struct *work)
2331 {
2332 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2333 struct net_device *netdev = nic->netdev;
2334
2335 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2336 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
2337
2338 rtnl_lock();
2339 if (netif_running(netdev)) {
2340 e100_down(netdev_priv(netdev));
2341 e100_up(netdev_priv(netdev));
2342 }
2343 rtnl_unlock();
2344 }
2345
2346 static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2347 {
2348 int err;
2349 struct sk_buff *skb;
2350
2351
2352
2353
2354
2355
2356 if ((err = e100_rx_alloc_list(nic)))
2357 return err;
2358 if ((err = e100_alloc_cbs(nic)))
2359 goto err_clean_rx;
2360
2361
2362 if (nic->flags & ich && loopback_mode == lb_phy)
2363 loopback_mode = lb_mac;
2364
2365 nic->loopback = loopback_mode;
2366 if ((err = e100_hw_init(nic)))
2367 goto err_loopback_none;
2368
2369 if (loopback_mode == lb_phy)
2370 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
2371 BMCR_LOOPBACK);
2372
2373 e100_start_receiver(nic, NULL);
2374
2375 if (!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2376 err = -ENOMEM;
2377 goto err_loopback_none;
2378 }
2379 skb_put(skb, ETH_DATA_LEN);
2380 memset(skb->data, 0xFF, ETH_DATA_LEN);
2381 e100_xmit_frame(skb, nic->netdev);
2382
2383 msleep(10);
2384
2385 dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr,
2386 RFD_BUF_LEN, DMA_BIDIRECTIONAL);
2387
2388 if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
2389 skb->data, ETH_DATA_LEN))
2390 err = -EAGAIN;
2391
2392 err_loopback_none:
2393 mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, 0);
2394 nic->loopback = lb_none;
2395 e100_clean_cbs(nic);
2396 e100_hw_reset(nic);
2397 err_clean_rx:
2398 e100_rx_clean_list(nic);
2399 return err;
2400 }
2401
2402 #define MII_LED_CONTROL 0x1B
2403 #define E100_82552_LED_OVERRIDE 0x19
2404 #define E100_82552_LED_ON 0x000F
2405 #define E100_82552_LED_OFF 0x000A
2406
2407 static int e100_get_link_ksettings(struct net_device *netdev,
2408 struct ethtool_link_ksettings *cmd)
2409 {
2410 struct nic *nic = netdev_priv(netdev);
2411
2412 mii_ethtool_get_link_ksettings(&nic->mii, cmd);
2413
2414 return 0;
2415 }
2416
2417 static int e100_set_link_ksettings(struct net_device *netdev,
2418 const struct ethtool_link_ksettings *cmd)
2419 {
2420 struct nic *nic = netdev_priv(netdev);
2421 int err;
2422
2423 mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
2424 err = mii_ethtool_set_link_ksettings(&nic->mii, cmd);
2425 e100_exec_cb(nic, NULL, e100_configure);
2426
2427 return err;
2428 }
2429
2430 static void e100_get_drvinfo(struct net_device *netdev,
2431 struct ethtool_drvinfo *info)
2432 {
2433 struct nic *nic = netdev_priv(netdev);
2434 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2435 strlcpy(info->bus_info, pci_name(nic->pdev),
2436 sizeof(info->bus_info));
2437 }
2438
2439 #define E100_PHY_REGS 0x1D
2440 static int e100_get_regs_len(struct net_device *netdev)
2441 {
2442 struct nic *nic = netdev_priv(netdev);
2443
2444
2445
2446
2447 return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf);
2448 }
2449
2450 static void e100_get_regs(struct net_device *netdev,
2451 struct ethtool_regs *regs, void *p)
2452 {
2453 struct nic *nic = netdev_priv(netdev);
2454 u32 *buff = p;
2455 int i;
2456
2457 regs->version = (1 << 24) | nic->pdev->revision;
2458 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
2459 ioread8(&nic->csr->scb.cmd_lo) << 16 |
2460 ioread16(&nic->csr->scb.status);
2461 for (i = 0; i < E100_PHY_REGS; i++)
2462
2463
2464
2465
2466 buff[1 + i] = mdio_read(netdev, nic->mii.phy_id,
2467 E100_PHY_REGS - 1 - i);
2468 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
2469 e100_exec_cb(nic, NULL, e100_dump);
2470 msleep(10);
2471 memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf,
2472 sizeof(nic->mem->dump_buf));
2473 }
2474
2475 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2476 {
2477 struct nic *nic = netdev_priv(netdev);
2478 wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0;
2479 wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0;
2480 }
2481
2482 static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2483 {
2484 struct nic *nic = netdev_priv(netdev);
2485
2486 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2487 !device_can_wakeup(&nic->pdev->dev))
2488 return -EOPNOTSUPP;
2489
2490 if (wol->wolopts)
2491 nic->flags |= wol_magic;
2492 else
2493 nic->flags &= ~wol_magic;
2494
2495 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2496
2497 e100_exec_cb(nic, NULL, e100_configure);
2498
2499 return 0;
2500 }
2501
2502 static u32 e100_get_msglevel(struct net_device *netdev)
2503 {
2504 struct nic *nic = netdev_priv(netdev);
2505 return nic->msg_enable;
2506 }
2507
2508 static void e100_set_msglevel(struct net_device *netdev, u32 value)
2509 {
2510 struct nic *nic = netdev_priv(netdev);
2511 nic->msg_enable = value;
2512 }
2513
2514 static int e100_nway_reset(struct net_device *netdev)
2515 {
2516 struct nic *nic = netdev_priv(netdev);
2517 return mii_nway_restart(&nic->mii);
2518 }
2519
2520 static u32 e100_get_link(struct net_device *netdev)
2521 {
2522 struct nic *nic = netdev_priv(netdev);
2523 return mii_link_ok(&nic->mii);
2524 }
2525
2526 static int e100_get_eeprom_len(struct net_device *netdev)
2527 {
2528 struct nic *nic = netdev_priv(netdev);
2529 return nic->eeprom_wc << 1;
2530 }
2531
2532 #define E100_EEPROM_MAGIC 0x1234
2533 static int e100_get_eeprom(struct net_device *netdev,
2534 struct ethtool_eeprom *eeprom, u8 *bytes)
2535 {
2536 struct nic *nic = netdev_priv(netdev);
2537
2538 eeprom->magic = E100_EEPROM_MAGIC;
2539 memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len);
2540
2541 return 0;
2542 }
2543
2544 static int e100_set_eeprom(struct net_device *netdev,
2545 struct ethtool_eeprom *eeprom, u8 *bytes)
2546 {
2547 struct nic *nic = netdev_priv(netdev);
2548
2549 if (eeprom->magic != E100_EEPROM_MAGIC)
2550 return -EINVAL;
2551
2552 memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len);
2553
2554 return e100_eeprom_save(nic, eeprom->offset >> 1,
2555 (eeprom->len >> 1) + 1);
2556 }
2557
2558 static void e100_get_ringparam(struct net_device *netdev,
2559 struct ethtool_ringparam *ring,
2560 struct kernel_ethtool_ringparam *kernel_ring,
2561 struct netlink_ext_ack *extack)
2562 {
2563 struct nic *nic = netdev_priv(netdev);
2564 struct param_range *rfds = &nic->params.rfds;
2565 struct param_range *cbs = &nic->params.cbs;
2566
2567 ring->rx_max_pending = rfds->max;
2568 ring->tx_max_pending = cbs->max;
2569 ring->rx_pending = rfds->count;
2570 ring->tx_pending = cbs->count;
2571 }
2572
2573 static int e100_set_ringparam(struct net_device *netdev,
2574 struct ethtool_ringparam *ring,
2575 struct kernel_ethtool_ringparam *kernel_ring,
2576 struct netlink_ext_ack *extack)
2577 {
2578 struct nic *nic = netdev_priv(netdev);
2579 struct param_range *rfds = &nic->params.rfds;
2580 struct param_range *cbs = &nic->params.cbs;
2581
2582 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2583 return -EINVAL;
2584
2585 if (netif_running(netdev))
2586 e100_down(nic);
2587 rfds->count = max(ring->rx_pending, rfds->min);
2588 rfds->count = min(rfds->count, rfds->max);
2589 cbs->count = max(ring->tx_pending, cbs->min);
2590 cbs->count = min(cbs->count, cbs->max);
2591 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2592 rfds->count, cbs->count);
2593 if (netif_running(netdev))
2594 e100_up(nic);
2595
2596 return 0;
2597 }
2598
2599 static const char e100_gstrings_test[][ETH_GSTRING_LEN] = {
2600 "Link test (on/offline)",
2601 "Eeprom test (on/offline)",
2602 "Self test (offline)",
2603 "Mac loopback (offline)",
2604 "Phy loopback (offline)",
2605 };
2606 #define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test)
2607
2608 static void e100_diag_test(struct net_device *netdev,
2609 struct ethtool_test *test, u64 *data)
2610 {
2611 struct ethtool_cmd cmd;
2612 struct nic *nic = netdev_priv(netdev);
2613 int i;
2614
2615 memset(data, 0, E100_TEST_LEN * sizeof(u64));
2616 data[0] = !mii_link_ok(&nic->mii);
2617 data[1] = e100_eeprom_load(nic);
2618 if (test->flags & ETH_TEST_FL_OFFLINE) {
2619
2620
2621 mii_ethtool_gset(&nic->mii, &cmd);
2622
2623 if (netif_running(netdev))
2624 e100_down(nic);
2625 data[2] = e100_self_test(nic);
2626 data[3] = e100_loopback_test(nic, lb_mac);
2627 data[4] = e100_loopback_test(nic, lb_phy);
2628
2629
2630 mii_ethtool_sset(&nic->mii, &cmd);
2631
2632 if (netif_running(netdev))
2633 e100_up(nic);
2634 }
2635 for (i = 0; i < E100_TEST_LEN; i++)
2636 test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
2637
2638 msleep_interruptible(4 * 1000);
2639 }
2640
2641 static int e100_set_phys_id(struct net_device *netdev,
2642 enum ethtool_phys_id_state state)
2643 {
2644 struct nic *nic = netdev_priv(netdev);
2645 enum led_state {
2646 led_on = 0x01,
2647 led_off = 0x04,
2648 led_on_559 = 0x05,
2649 led_on_557 = 0x07,
2650 };
2651 u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
2652 MII_LED_CONTROL;
2653 u16 leds = 0;
2654
2655 switch (state) {
2656 case ETHTOOL_ID_ACTIVE:
2657 return 2;
2658
2659 case ETHTOOL_ID_ON:
2660 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
2661 (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
2662 break;
2663
2664 case ETHTOOL_ID_OFF:
2665 leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
2666 break;
2667
2668 case ETHTOOL_ID_INACTIVE:
2669 break;
2670 }
2671
2672 mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
2673 return 0;
2674 }
2675
2676 static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = {
2677 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
2678 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
2679 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
2680 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
2681 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
2682 "tx_heartbeat_errors", "tx_window_errors",
2683
2684 "tx_deferred", "tx_single_collisions", "tx_multi_collisions",
2685 "tx_flow_control_pause", "rx_flow_control_pause",
2686 "rx_flow_control_unsupported", "tx_tco_packets", "rx_tco_packets",
2687 "rx_short_frame_errors", "rx_over_length_errors",
2688 };
2689 #define E100_NET_STATS_LEN 21
2690 #define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats)
2691
2692 static int e100_get_sset_count(struct net_device *netdev, int sset)
2693 {
2694 switch (sset) {
2695 case ETH_SS_TEST:
2696 return E100_TEST_LEN;
2697 case ETH_SS_STATS:
2698 return E100_STATS_LEN;
2699 default:
2700 return -EOPNOTSUPP;
2701 }
2702 }
2703
2704 static void e100_get_ethtool_stats(struct net_device *netdev,
2705 struct ethtool_stats *stats, u64 *data)
2706 {
2707 struct nic *nic = netdev_priv(netdev);
2708 int i;
2709
2710 for (i = 0; i < E100_NET_STATS_LEN; i++)
2711 data[i] = ((unsigned long *)&netdev->stats)[i];
2712
2713 data[i++] = nic->tx_deferred;
2714 data[i++] = nic->tx_single_collisions;
2715 data[i++] = nic->tx_multiple_collisions;
2716 data[i++] = nic->tx_fc_pause;
2717 data[i++] = nic->rx_fc_pause;
2718 data[i++] = nic->rx_fc_unsupported;
2719 data[i++] = nic->tx_tco_frames;
2720 data[i++] = nic->rx_tco_frames;
2721 data[i++] = nic->rx_short_frame_errors;
2722 data[i++] = nic->rx_over_length_errors;
2723 }
2724
2725 static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2726 {
2727 switch (stringset) {
2728 case ETH_SS_TEST:
2729 memcpy(data, e100_gstrings_test, sizeof(e100_gstrings_test));
2730 break;
2731 case ETH_SS_STATS:
2732 memcpy(data, e100_gstrings_stats, sizeof(e100_gstrings_stats));
2733 break;
2734 }
2735 }
2736
2737 static const struct ethtool_ops e100_ethtool_ops = {
2738 .get_drvinfo = e100_get_drvinfo,
2739 .get_regs_len = e100_get_regs_len,
2740 .get_regs = e100_get_regs,
2741 .get_wol = e100_get_wol,
2742 .set_wol = e100_set_wol,
2743 .get_msglevel = e100_get_msglevel,
2744 .set_msglevel = e100_set_msglevel,
2745 .nway_reset = e100_nway_reset,
2746 .get_link = e100_get_link,
2747 .get_eeprom_len = e100_get_eeprom_len,
2748 .get_eeprom = e100_get_eeprom,
2749 .set_eeprom = e100_set_eeprom,
2750 .get_ringparam = e100_get_ringparam,
2751 .set_ringparam = e100_set_ringparam,
2752 .self_test = e100_diag_test,
2753 .get_strings = e100_get_strings,
2754 .set_phys_id = e100_set_phys_id,
2755 .get_ethtool_stats = e100_get_ethtool_stats,
2756 .get_sset_count = e100_get_sset_count,
2757 .get_ts_info = ethtool_op_get_ts_info,
2758 .get_link_ksettings = e100_get_link_ksettings,
2759 .set_link_ksettings = e100_set_link_ksettings,
2760 };
2761
2762 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2763 {
2764 struct nic *nic = netdev_priv(netdev);
2765
2766 return generic_mii_ioctl(&nic->mii, if_mii(ifr), cmd, NULL);
2767 }
2768
2769 static int e100_alloc(struct nic *nic)
2770 {
2771 nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem),
2772 &nic->dma_addr, GFP_KERNEL);
2773 return nic->mem ? 0 : -ENOMEM;
2774 }
2775
2776 static void e100_free(struct nic *nic)
2777 {
2778 if (nic->mem) {
2779 dma_free_coherent(&nic->pdev->dev, sizeof(struct mem),
2780 nic->mem, nic->dma_addr);
2781 nic->mem = NULL;
2782 }
2783 }
2784
2785 static int e100_open(struct net_device *netdev)
2786 {
2787 struct nic *nic = netdev_priv(netdev);
2788 int err = 0;
2789
2790 netif_carrier_off(netdev);
2791 if ((err = e100_up(nic)))
2792 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
2793 return err;
2794 }
2795
2796 static int e100_close(struct net_device *netdev)
2797 {
2798 e100_down(netdev_priv(netdev));
2799 return 0;
2800 }
2801
2802 static int e100_set_features(struct net_device *netdev,
2803 netdev_features_t features)
2804 {
2805 struct nic *nic = netdev_priv(netdev);
2806 netdev_features_t changed = features ^ netdev->features;
2807
2808 if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL)))
2809 return 0;
2810
2811 netdev->features = features;
2812 e100_exec_cb(nic, NULL, e100_configure);
2813 return 1;
2814 }
2815
2816 static const struct net_device_ops e100_netdev_ops = {
2817 .ndo_open = e100_open,
2818 .ndo_stop = e100_close,
2819 .ndo_start_xmit = e100_xmit_frame,
2820 .ndo_validate_addr = eth_validate_addr,
2821 .ndo_set_rx_mode = e100_set_multicast_list,
2822 .ndo_set_mac_address = e100_set_mac_address,
2823 .ndo_eth_ioctl = e100_do_ioctl,
2824 .ndo_tx_timeout = e100_tx_timeout,
2825 #ifdef CONFIG_NET_POLL_CONTROLLER
2826 .ndo_poll_controller = e100_netpoll,
2827 #endif
2828 .ndo_set_features = e100_set_features,
2829 };
2830
2831 static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2832 {
2833 struct net_device *netdev;
2834 struct nic *nic;
2835 int err;
2836
2837 if (!(netdev = alloc_etherdev(sizeof(struct nic))))
2838 return -ENOMEM;
2839
2840 netdev->hw_features |= NETIF_F_RXFCS;
2841 netdev->priv_flags |= IFF_SUPP_NOFCS;
2842 netdev->hw_features |= NETIF_F_RXALL;
2843
2844 netdev->netdev_ops = &e100_netdev_ops;
2845 netdev->ethtool_ops = &e100_ethtool_ops;
2846 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2847 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2848
2849 nic = netdev_priv(netdev);
2850 netif_napi_add_weight(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
2851 nic->netdev = netdev;
2852 nic->pdev = pdev;
2853 nic->msg_enable = (1 << debug) - 1;
2854 nic->mdio_ctrl = mdio_ctrl_hw;
2855 pci_set_drvdata(pdev, netdev);
2856
2857 if ((err = pci_enable_device(pdev))) {
2858 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
2859 goto err_out_free_dev;
2860 }
2861
2862 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2863 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
2864 err = -ENODEV;
2865 goto err_out_disable_pdev;
2866 }
2867
2868 if ((err = pci_request_regions(pdev, DRV_NAME))) {
2869 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
2870 goto err_out_disable_pdev;
2871 }
2872
2873 if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
2874 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
2875 goto err_out_free_res;
2876 }
2877
2878 SET_NETDEV_DEV(netdev, &pdev->dev);
2879
2880 if (use_io)
2881 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
2882
2883 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2884 if (!nic->csr) {
2885 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
2886 err = -ENOMEM;
2887 goto err_out_free_res;
2888 }
2889
2890 if (ent->driver_data)
2891 nic->flags |= ich;
2892 else
2893 nic->flags &= ~ich;
2894
2895 e100_get_defaults(nic);
2896
2897
2898 if (nic->mac < mac_82558_D101_A4)
2899 netdev->features |= NETIF_F_VLAN_CHALLENGED;
2900
2901
2902 spin_lock_init(&nic->cb_lock);
2903 spin_lock_init(&nic->cmd_lock);
2904 spin_lock_init(&nic->mdio_lock);
2905
2906
2907
2908
2909 e100_hw_reset(nic);
2910
2911 pci_set_master(pdev);
2912
2913 timer_setup(&nic->watchdog, e100_watchdog, 0);
2914
2915 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2916
2917 if ((err = e100_alloc(nic))) {
2918 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
2919 goto err_out_iounmap;
2920 }
2921
2922 if ((err = e100_eeprom_load(nic)))
2923 goto err_out_free;
2924
2925 e100_phy_init(nic);
2926
2927 eth_hw_addr_set(netdev, (u8 *)nic->eeprom);
2928 if (!is_valid_ether_addr(netdev->dev_addr)) {
2929 if (!eeprom_bad_csum_allow) {
2930 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
2931 err = -EAGAIN;
2932 goto err_out_free;
2933 } else {
2934 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
2935 }
2936 }
2937
2938
2939 if ((nic->mac >= mac_82558_D101_A4) &&
2940 (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) {
2941 nic->flags |= wol_magic;
2942 device_set_wakeup_enable(&pdev->dev, true);
2943 }
2944
2945
2946 pci_pme_active(pdev, false);
2947
2948 strcpy(netdev->name, "eth%d");
2949 if ((err = register_netdev(netdev))) {
2950 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
2951 goto err_out_free;
2952 }
2953 nic->cbs_pool = dma_pool_create(netdev->name,
2954 &nic->pdev->dev,
2955 nic->params.cbs.max * sizeof(struct cb),
2956 sizeof(u32),
2957 0);
2958 if (!nic->cbs_pool) {
2959 netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
2960 err = -ENOMEM;
2961 goto err_out_pool;
2962 }
2963 netif_info(nic, probe, nic->netdev,
2964 "addr 0x%llx, irq %d, MAC addr %pM\n",
2965 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2966 pdev->irq, netdev->dev_addr);
2967
2968 return 0;
2969
2970 err_out_pool:
2971 unregister_netdev(netdev);
2972 err_out_free:
2973 e100_free(nic);
2974 err_out_iounmap:
2975 pci_iounmap(pdev, nic->csr);
2976 err_out_free_res:
2977 pci_release_regions(pdev);
2978 err_out_disable_pdev:
2979 pci_disable_device(pdev);
2980 err_out_free_dev:
2981 free_netdev(netdev);
2982 return err;
2983 }
2984
2985 static void e100_remove(struct pci_dev *pdev)
2986 {
2987 struct net_device *netdev = pci_get_drvdata(pdev);
2988
2989 if (netdev) {
2990 struct nic *nic = netdev_priv(netdev);
2991 unregister_netdev(netdev);
2992 e100_free(nic);
2993 pci_iounmap(pdev, nic->csr);
2994 dma_pool_destroy(nic->cbs_pool);
2995 free_netdev(netdev);
2996 pci_release_regions(pdev);
2997 pci_disable_device(pdev);
2998 }
2999 }
3000
3001 #define E100_82552_SMARTSPEED 0x14
3002 #define E100_82552_REV_ANEG 0x0200
3003 #define E100_82552_ANEG_NOW 0x0400
3004 static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
3005 {
3006 struct net_device *netdev = pci_get_drvdata(pdev);
3007 struct nic *nic = netdev_priv(netdev);
3008
3009 netif_device_detach(netdev);
3010
3011 if (netif_running(netdev))
3012 e100_down(nic);
3013
3014 if ((nic->flags & wol_magic) | e100_asf(nic)) {
3015
3016 if (nic->phy == phy_82552_v) {
3017 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3018 E100_82552_SMARTSPEED);
3019
3020 mdio_write(netdev, nic->mii.phy_id,
3021 E100_82552_SMARTSPEED, smartspeed |
3022 E100_82552_REV_ANEG | E100_82552_ANEG_NOW);
3023 }
3024 *enable_wake = true;
3025 } else {
3026 *enable_wake = false;
3027 }
3028
3029 pci_disable_device(pdev);
3030 }
3031
3032 static int __e100_power_off(struct pci_dev *pdev, bool wake)
3033 {
3034 if (wake)
3035 return pci_prepare_to_sleep(pdev);
3036
3037 pci_wake_from_d3(pdev, false);
3038 pci_set_power_state(pdev, PCI_D3hot);
3039
3040 return 0;
3041 }
3042
3043 static int __maybe_unused e100_suspend(struct device *dev_d)
3044 {
3045 bool wake;
3046
3047 __e100_shutdown(to_pci_dev(dev_d), &wake);
3048
3049 return 0;
3050 }
3051
3052 static int __maybe_unused e100_resume(struct device *dev_d)
3053 {
3054 struct net_device *netdev = dev_get_drvdata(dev_d);
3055 struct nic *nic = netdev_priv(netdev);
3056 int err;
3057
3058 err = pci_enable_device(to_pci_dev(dev_d));
3059 if (err) {
3060 netdev_err(netdev, "Resume cannot enable PCI device, aborting\n");
3061 return err;
3062 }
3063 pci_set_master(to_pci_dev(dev_d));
3064
3065
3066 if (nic->phy == phy_82552_v) {
3067 u16 smartspeed = mdio_read(netdev, nic->mii.phy_id,
3068 E100_82552_SMARTSPEED);
3069
3070 mdio_write(netdev, nic->mii.phy_id,
3071 E100_82552_SMARTSPEED,
3072 smartspeed & ~(E100_82552_REV_ANEG));
3073 }
3074
3075 if (netif_running(netdev))
3076 e100_up(nic);
3077
3078 netif_device_attach(netdev);
3079
3080 return 0;
3081 }
3082
3083 static void e100_shutdown(struct pci_dev *pdev)
3084 {
3085 bool wake;
3086 __e100_shutdown(pdev, &wake);
3087 if (system_state == SYSTEM_POWER_OFF)
3088 __e100_power_off(pdev, wake);
3089 }
3090
3091
3092
3093
3094
3095
3096
3097 static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
3098 {
3099 struct net_device *netdev = pci_get_drvdata(pdev);
3100 struct nic *nic = netdev_priv(netdev);
3101
3102 netif_device_detach(netdev);
3103
3104 if (state == pci_channel_io_perm_failure)
3105 return PCI_ERS_RESULT_DISCONNECT;
3106
3107 if (netif_running(netdev))
3108 e100_down(nic);
3109 pci_disable_device(pdev);
3110
3111
3112 return PCI_ERS_RESULT_NEED_RESET;
3113 }
3114
3115
3116
3117
3118
3119
3120
3121 static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3122 {
3123 struct net_device *netdev = pci_get_drvdata(pdev);
3124 struct nic *nic = netdev_priv(netdev);
3125
3126 if (pci_enable_device(pdev)) {
3127 pr_err("Cannot re-enable PCI device after reset\n");
3128 return PCI_ERS_RESULT_DISCONNECT;
3129 }
3130 pci_set_master(pdev);
3131
3132
3133 if (0 != PCI_FUNC(pdev->devfn))
3134 return PCI_ERS_RESULT_RECOVERED;
3135 e100_hw_reset(nic);
3136 e100_phy_init(nic);
3137
3138 return PCI_ERS_RESULT_RECOVERED;
3139 }
3140
3141
3142
3143
3144
3145
3146
3147
3148 static void e100_io_resume(struct pci_dev *pdev)
3149 {
3150 struct net_device *netdev = pci_get_drvdata(pdev);
3151 struct nic *nic = netdev_priv(netdev);
3152
3153
3154 pci_enable_wake(pdev, PCI_D0, 0);
3155
3156 netif_device_attach(netdev);
3157 if (netif_running(netdev)) {
3158 e100_open(netdev);
3159 mod_timer(&nic->watchdog, jiffies);
3160 }
3161 }
3162
3163 static const struct pci_error_handlers e100_err_handler = {
3164 .error_detected = e100_io_error_detected,
3165 .slot_reset = e100_io_slot_reset,
3166 .resume = e100_io_resume,
3167 };
3168
3169 static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume);
3170
3171 static struct pci_driver e100_driver = {
3172 .name = DRV_NAME,
3173 .id_table = e100_id_table,
3174 .probe = e100_probe,
3175 .remove = e100_remove,
3176
3177
3178 .driver.pm = &e100_pm_ops,
3179
3180 .shutdown = e100_shutdown,
3181 .err_handler = &e100_err_handler,
3182 };
3183
3184 static int __init e100_init_module(void)
3185 {
3186 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
3187 pr_info("%s\n", DRV_DESCRIPTION);
3188 pr_info("%s\n", DRV_COPYRIGHT);
3189 }
3190 return pci_register_driver(&e100_driver);
3191 }
3192
3193 static void __exit e100_cleanup_module(void)
3194 {
3195 pci_unregister_driver(&e100_driver);
3196 }
3197
3198 module_init(e100_init_module);
3199 module_exit(e100_cleanup_module);