0001
0002
0003
0004 #include "e1000.h"
0005 #include <net/ip6_checksum.h>
0006 #include <linux/io.h>
0007 #include <linux/prefetch.h>
0008 #include <linux/bitops.h>
0009 #include <linux/if_vlan.h>
0010
0011 char e1000_driver_name[] = "e1000";
0012 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
0013 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
0014
0015
0016
0017
0018
0019
0020
0021
0022 static const struct pci_device_id e1000_pci_tbl[] = {
0023 INTEL_E1000_ETHERNET_DEVICE(0x1000),
0024 INTEL_E1000_ETHERNET_DEVICE(0x1001),
0025 INTEL_E1000_ETHERNET_DEVICE(0x1004),
0026 INTEL_E1000_ETHERNET_DEVICE(0x1008),
0027 INTEL_E1000_ETHERNET_DEVICE(0x1009),
0028 INTEL_E1000_ETHERNET_DEVICE(0x100C),
0029 INTEL_E1000_ETHERNET_DEVICE(0x100D),
0030 INTEL_E1000_ETHERNET_DEVICE(0x100E),
0031 INTEL_E1000_ETHERNET_DEVICE(0x100F),
0032 INTEL_E1000_ETHERNET_DEVICE(0x1010),
0033 INTEL_E1000_ETHERNET_DEVICE(0x1011),
0034 INTEL_E1000_ETHERNET_DEVICE(0x1012),
0035 INTEL_E1000_ETHERNET_DEVICE(0x1013),
0036 INTEL_E1000_ETHERNET_DEVICE(0x1014),
0037 INTEL_E1000_ETHERNET_DEVICE(0x1015),
0038 INTEL_E1000_ETHERNET_DEVICE(0x1016),
0039 INTEL_E1000_ETHERNET_DEVICE(0x1017),
0040 INTEL_E1000_ETHERNET_DEVICE(0x1018),
0041 INTEL_E1000_ETHERNET_DEVICE(0x1019),
0042 INTEL_E1000_ETHERNET_DEVICE(0x101A),
0043 INTEL_E1000_ETHERNET_DEVICE(0x101D),
0044 INTEL_E1000_ETHERNET_DEVICE(0x101E),
0045 INTEL_E1000_ETHERNET_DEVICE(0x1026),
0046 INTEL_E1000_ETHERNET_DEVICE(0x1027),
0047 INTEL_E1000_ETHERNET_DEVICE(0x1028),
0048 INTEL_E1000_ETHERNET_DEVICE(0x1075),
0049 INTEL_E1000_ETHERNET_DEVICE(0x1076),
0050 INTEL_E1000_ETHERNET_DEVICE(0x1077),
0051 INTEL_E1000_ETHERNET_DEVICE(0x1078),
0052 INTEL_E1000_ETHERNET_DEVICE(0x1079),
0053 INTEL_E1000_ETHERNET_DEVICE(0x107A),
0054 INTEL_E1000_ETHERNET_DEVICE(0x107B),
0055 INTEL_E1000_ETHERNET_DEVICE(0x107C),
0056 INTEL_E1000_ETHERNET_DEVICE(0x108A),
0057 INTEL_E1000_ETHERNET_DEVICE(0x1099),
0058 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
0059 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
0060
0061 {0,}
0062 };
0063
0064 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
0065
0066 int e1000_up(struct e1000_adapter *adapter);
0067 void e1000_down(struct e1000_adapter *adapter);
0068 void e1000_reinit_locked(struct e1000_adapter *adapter);
0069 void e1000_reset(struct e1000_adapter *adapter);
0070 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
0071 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
0072 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
0073 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
0074 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
0075 struct e1000_tx_ring *txdr);
0076 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
0077 struct e1000_rx_ring *rxdr);
0078 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
0079 struct e1000_tx_ring *tx_ring);
0080 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
0081 struct e1000_rx_ring *rx_ring);
0082 void e1000_update_stats(struct e1000_adapter *adapter);
0083
0084 static int e1000_init_module(void);
0085 static void e1000_exit_module(void);
0086 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
0087 static void e1000_remove(struct pci_dev *pdev);
0088 static int e1000_alloc_queues(struct e1000_adapter *adapter);
0089 static int e1000_sw_init(struct e1000_adapter *adapter);
0090 int e1000_open(struct net_device *netdev);
0091 int e1000_close(struct net_device *netdev);
0092 static void e1000_configure_tx(struct e1000_adapter *adapter);
0093 static void e1000_configure_rx(struct e1000_adapter *adapter);
0094 static void e1000_setup_rctl(struct e1000_adapter *adapter);
0095 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
0096 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
0097 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
0098 struct e1000_tx_ring *tx_ring);
0099 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
0100 struct e1000_rx_ring *rx_ring);
0101 static void e1000_set_rx_mode(struct net_device *netdev);
0102 static void e1000_update_phy_info_task(struct work_struct *work);
0103 static void e1000_watchdog(struct work_struct *work);
0104 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
0105 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
0106 struct net_device *netdev);
0107 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
0108 static int e1000_set_mac(struct net_device *netdev, void *p);
0109 static irqreturn_t e1000_intr(int irq, void *data);
0110 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
0111 struct e1000_tx_ring *tx_ring);
0112 static int e1000_clean(struct napi_struct *napi, int budget);
0113 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
0114 struct e1000_rx_ring *rx_ring,
0115 int *work_done, int work_to_do);
0116 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
0117 struct e1000_rx_ring *rx_ring,
0118 int *work_done, int work_to_do);
0119 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
0120 struct e1000_rx_ring *rx_ring,
0121 int cleaned_count)
0122 {
0123 }
0124 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
0125 struct e1000_rx_ring *rx_ring,
0126 int cleaned_count);
0127 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
0128 struct e1000_rx_ring *rx_ring,
0129 int cleaned_count);
0130 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
0131 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
0132 int cmd);
0133 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
0134 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
0135 static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
0136 static void e1000_reset_task(struct work_struct *work);
0137 static void e1000_smartspeed(struct e1000_adapter *adapter);
0138 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
0139 struct sk_buff *skb);
0140
0141 static bool e1000_vlan_used(struct e1000_adapter *adapter);
0142 static void e1000_vlan_mode(struct net_device *netdev,
0143 netdev_features_t features);
0144 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
0145 bool filter_on);
0146 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
0147 __be16 proto, u16 vid);
0148 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
0149 __be16 proto, u16 vid);
0150 static void e1000_restore_vlan(struct e1000_adapter *adapter);
0151
0152 static int __maybe_unused e1000_suspend(struct device *dev);
0153 static int __maybe_unused e1000_resume(struct device *dev);
0154 static void e1000_shutdown(struct pci_dev *pdev);
0155
0156 #ifdef CONFIG_NET_POLL_CONTROLLER
0157
0158 static void e1000_netpoll (struct net_device *netdev);
0159 #endif
0160
0161 #define COPYBREAK_DEFAULT 256
0162 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
0163 module_param(copybreak, uint, 0644);
0164 MODULE_PARM_DESC(copybreak,
0165 "Maximum size of packet that is copied to a new buffer on receive");
0166
0167 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
0168 pci_channel_state_t state);
0169 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
0170 static void e1000_io_resume(struct pci_dev *pdev);
0171
0172 static const struct pci_error_handlers e1000_err_handler = {
0173 .error_detected = e1000_io_error_detected,
0174 .slot_reset = e1000_io_slot_reset,
0175 .resume = e1000_io_resume,
0176 };
0177
0178 static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
0179
0180 static struct pci_driver e1000_driver = {
0181 .name = e1000_driver_name,
0182 .id_table = e1000_pci_tbl,
0183 .probe = e1000_probe,
0184 .remove = e1000_remove,
0185 .driver = {
0186 .pm = &e1000_pm_ops,
0187 },
0188 .shutdown = e1000_shutdown,
0189 .err_handler = &e1000_err_handler
0190 };
0191
0192 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
0193 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
0194 MODULE_LICENSE("GPL v2");
0195
0196 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
0197 static int debug = -1;
0198 module_param(debug, int, 0);
0199 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
0200
0201
0202
0203
0204
0205
0206
0207
0208 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
0209 {
0210 struct e1000_adapter *adapter = hw->back;
0211 return adapter->netdev;
0212 }
0213
0214
0215
0216
0217
0218
0219
0220 static int __init e1000_init_module(void)
0221 {
0222 int ret;
0223 pr_info("%s\n", e1000_driver_string);
0224
0225 pr_info("%s\n", e1000_copyright);
0226
0227 ret = pci_register_driver(&e1000_driver);
0228 if (copybreak != COPYBREAK_DEFAULT) {
0229 if (copybreak == 0)
0230 pr_info("copybreak disabled\n");
0231 else
0232 pr_info("copybreak enabled for "
0233 "packets <= %u bytes\n", copybreak);
0234 }
0235 return ret;
0236 }
0237
0238 module_init(e1000_init_module);
0239
0240
0241
0242
0243
0244
0245
0246 static void __exit e1000_exit_module(void)
0247 {
0248 pci_unregister_driver(&e1000_driver);
0249 }
0250
0251 module_exit(e1000_exit_module);
0252
0253 static int e1000_request_irq(struct e1000_adapter *adapter)
0254 {
0255 struct net_device *netdev = adapter->netdev;
0256 irq_handler_t handler = e1000_intr;
0257 int irq_flags = IRQF_SHARED;
0258 int err;
0259
0260 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
0261 netdev);
0262 if (err) {
0263 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
0264 }
0265
0266 return err;
0267 }
0268
0269 static void e1000_free_irq(struct e1000_adapter *adapter)
0270 {
0271 struct net_device *netdev = adapter->netdev;
0272
0273 free_irq(adapter->pdev->irq, netdev);
0274 }
0275
0276
0277
0278
0279
0280 static void e1000_irq_disable(struct e1000_adapter *adapter)
0281 {
0282 struct e1000_hw *hw = &adapter->hw;
0283
0284 ew32(IMC, ~0);
0285 E1000_WRITE_FLUSH();
0286 synchronize_irq(adapter->pdev->irq);
0287 }
0288
0289
0290
0291
0292
0293 static void e1000_irq_enable(struct e1000_adapter *adapter)
0294 {
0295 struct e1000_hw *hw = &adapter->hw;
0296
0297 ew32(IMS, IMS_ENABLE_MASK);
0298 E1000_WRITE_FLUSH();
0299 }
0300
0301 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
0302 {
0303 struct e1000_hw *hw = &adapter->hw;
0304 struct net_device *netdev = adapter->netdev;
0305 u16 vid = hw->mng_cookie.vlan_id;
0306 u16 old_vid = adapter->mng_vlan_id;
0307
0308 if (!e1000_vlan_used(adapter))
0309 return;
0310
0311 if (!test_bit(vid, adapter->active_vlans)) {
0312 if (hw->mng_cookie.status &
0313 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
0314 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
0315 adapter->mng_vlan_id = vid;
0316 } else {
0317 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
0318 }
0319 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
0320 (vid != old_vid) &&
0321 !test_bit(old_vid, adapter->active_vlans))
0322 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
0323 old_vid);
0324 } else {
0325 adapter->mng_vlan_id = vid;
0326 }
0327 }
0328
0329 static void e1000_init_manageability(struct e1000_adapter *adapter)
0330 {
0331 struct e1000_hw *hw = &adapter->hw;
0332
0333 if (adapter->en_mng_pt) {
0334 u32 manc = er32(MANC);
0335
0336
0337 manc &= ~(E1000_MANC_ARP_EN);
0338
0339 ew32(MANC, manc);
0340 }
0341 }
0342
0343 static void e1000_release_manageability(struct e1000_adapter *adapter)
0344 {
0345 struct e1000_hw *hw = &adapter->hw;
0346
0347 if (adapter->en_mng_pt) {
0348 u32 manc = er32(MANC);
0349
0350
0351 manc |= E1000_MANC_ARP_EN;
0352
0353 ew32(MANC, manc);
0354 }
0355 }
0356
0357
0358
0359
0360
0361 static void e1000_configure(struct e1000_adapter *adapter)
0362 {
0363 struct net_device *netdev = adapter->netdev;
0364 int i;
0365
0366 e1000_set_rx_mode(netdev);
0367
0368 e1000_restore_vlan(adapter);
0369 e1000_init_manageability(adapter);
0370
0371 e1000_configure_tx(adapter);
0372 e1000_setup_rctl(adapter);
0373 e1000_configure_rx(adapter);
0374
0375
0376
0377
0378 for (i = 0; i < adapter->num_rx_queues; i++) {
0379 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
0380 adapter->alloc_rx_buf(adapter, ring,
0381 E1000_DESC_UNUSED(ring));
0382 }
0383 }
0384
0385 int e1000_up(struct e1000_adapter *adapter)
0386 {
0387 struct e1000_hw *hw = &adapter->hw;
0388
0389
0390 e1000_configure(adapter);
0391
0392 clear_bit(__E1000_DOWN, &adapter->flags);
0393
0394 napi_enable(&adapter->napi);
0395
0396 e1000_irq_enable(adapter);
0397
0398 netif_wake_queue(adapter->netdev);
0399
0400
0401 ew32(ICS, E1000_ICS_LSC);
0402 return 0;
0403 }
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413 void e1000_power_up_phy(struct e1000_adapter *adapter)
0414 {
0415 struct e1000_hw *hw = &adapter->hw;
0416 u16 mii_reg = 0;
0417
0418
0419 if (hw->media_type == e1000_media_type_copper) {
0420
0421
0422
0423 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
0424 mii_reg &= ~MII_CR_POWER_DOWN;
0425 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
0426 }
0427 }
0428
0429 static void e1000_power_down_phy(struct e1000_adapter *adapter)
0430 {
0431 struct e1000_hw *hw = &adapter->hw;
0432
0433
0434
0435
0436
0437
0438
0439 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
0440 hw->media_type == e1000_media_type_copper) {
0441 u16 mii_reg = 0;
0442
0443 switch (hw->mac_type) {
0444 case e1000_82540:
0445 case e1000_82545:
0446 case e1000_82545_rev_3:
0447 case e1000_82546:
0448 case e1000_ce4100:
0449 case e1000_82546_rev_3:
0450 case e1000_82541:
0451 case e1000_82541_rev_2:
0452 case e1000_82547:
0453 case e1000_82547_rev_2:
0454 if (er32(MANC) & E1000_MANC_SMBUS_EN)
0455 goto out;
0456 break;
0457 default:
0458 goto out;
0459 }
0460 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
0461 mii_reg |= MII_CR_POWER_DOWN;
0462 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
0463 msleep(1);
0464 }
0465 out:
0466 return;
0467 }
0468
0469 static void e1000_down_and_stop(struct e1000_adapter *adapter)
0470 {
0471 set_bit(__E1000_DOWN, &adapter->flags);
0472
0473 cancel_delayed_work_sync(&adapter->watchdog_task);
0474
0475
0476
0477
0478
0479
0480
0481 cancel_delayed_work_sync(&adapter->phy_info_task);
0482 cancel_delayed_work_sync(&adapter->fifo_stall_task);
0483
0484
0485 if (!test_bit(__E1000_RESETTING, &adapter->flags))
0486 cancel_work_sync(&adapter->reset_task);
0487 }
0488
0489 void e1000_down(struct e1000_adapter *adapter)
0490 {
0491 struct e1000_hw *hw = &adapter->hw;
0492 struct net_device *netdev = adapter->netdev;
0493 u32 rctl, tctl;
0494
0495
0496 rctl = er32(RCTL);
0497 ew32(RCTL, rctl & ~E1000_RCTL_EN);
0498
0499
0500 netif_tx_disable(netdev);
0501
0502
0503 tctl = er32(TCTL);
0504 tctl &= ~E1000_TCTL_EN;
0505 ew32(TCTL, tctl);
0506
0507 E1000_WRITE_FLUSH();
0508 msleep(10);
0509
0510
0511
0512
0513
0514
0515
0516
0517 netif_carrier_off(netdev);
0518
0519 napi_disable(&adapter->napi);
0520
0521 e1000_irq_disable(adapter);
0522
0523
0524
0525
0526
0527 e1000_down_and_stop(adapter);
0528
0529 adapter->link_speed = 0;
0530 adapter->link_duplex = 0;
0531
0532 e1000_reset(adapter);
0533 e1000_clean_all_tx_rings(adapter);
0534 e1000_clean_all_rx_rings(adapter);
0535 }
0536
0537 void e1000_reinit_locked(struct e1000_adapter *adapter)
0538 {
0539 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
0540 msleep(1);
0541
0542
0543 if (!test_bit(__E1000_DOWN, &adapter->flags)) {
0544 e1000_down(adapter);
0545 e1000_up(adapter);
0546 }
0547
0548 clear_bit(__E1000_RESETTING, &adapter->flags);
0549 }
0550
0551 void e1000_reset(struct e1000_adapter *adapter)
0552 {
0553 struct e1000_hw *hw = &adapter->hw;
0554 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
0555 bool legacy_pba_adjust = false;
0556 u16 hwm;
0557
0558
0559
0560
0561
0562 switch (hw->mac_type) {
0563 case e1000_82542_rev2_0:
0564 case e1000_82542_rev2_1:
0565 case e1000_82543:
0566 case e1000_82544:
0567 case e1000_82540:
0568 case e1000_82541:
0569 case e1000_82541_rev_2:
0570 legacy_pba_adjust = true;
0571 pba = E1000_PBA_48K;
0572 break;
0573 case e1000_82545:
0574 case e1000_82545_rev_3:
0575 case e1000_82546:
0576 case e1000_ce4100:
0577 case e1000_82546_rev_3:
0578 pba = E1000_PBA_48K;
0579 break;
0580 case e1000_82547:
0581 case e1000_82547_rev_2:
0582 legacy_pba_adjust = true;
0583 pba = E1000_PBA_30K;
0584 break;
0585 case e1000_undefined:
0586 case e1000_num_macs:
0587 break;
0588 }
0589
0590 if (legacy_pba_adjust) {
0591 if (hw->max_frame_size > E1000_RXBUFFER_8192)
0592 pba -= 8;
0593
0594 if (hw->mac_type == e1000_82547) {
0595 adapter->tx_fifo_head = 0;
0596 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
0597 adapter->tx_fifo_size =
0598 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
0599 atomic_set(&adapter->tx_fifo_stall, 0);
0600 }
0601 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
0602
0603 ew32(PBA, pba);
0604
0605
0606
0607
0608
0609
0610
0611
0612 pba = er32(PBA);
0613
0614 tx_space = pba >> 16;
0615
0616 pba &= 0xffff;
0617
0618
0619
0620 min_tx_space = (hw->max_frame_size +
0621 sizeof(struct e1000_tx_desc) -
0622 ETH_FCS_LEN) * 2;
0623 min_tx_space = ALIGN(min_tx_space, 1024);
0624 min_tx_space >>= 10;
0625
0626 min_rx_space = hw->max_frame_size;
0627 min_rx_space = ALIGN(min_rx_space, 1024);
0628 min_rx_space >>= 10;
0629
0630
0631
0632
0633
0634 if (tx_space < min_tx_space &&
0635 ((min_tx_space - tx_space) < pba)) {
0636 pba = pba - (min_tx_space - tx_space);
0637
0638
0639 switch (hw->mac_type) {
0640 case e1000_82545 ... e1000_82546_rev_3:
0641 pba &= ~(E1000_PBA_8K - 1);
0642 break;
0643 default:
0644 break;
0645 }
0646
0647
0648
0649
0650 if (pba < min_rx_space)
0651 pba = min_rx_space;
0652 }
0653 }
0654
0655 ew32(PBA, pba);
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666 hwm = min(((pba << 10) * 9 / 10),
0667 ((pba << 10) - hw->max_frame_size));
0668
0669 hw->fc_high_water = hwm & 0xFFF8;
0670 hw->fc_low_water = hw->fc_high_water - 8;
0671 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
0672 hw->fc_send_xon = 1;
0673 hw->fc = hw->original_fc;
0674
0675
0676 e1000_reset_hw(hw);
0677 if (hw->mac_type >= e1000_82544)
0678 ew32(WUC, 0);
0679
0680 if (e1000_init_hw(hw))
0681 e_dev_err("Hardware Error\n");
0682 e1000_update_mng_vlan(adapter);
0683
0684
0685 if (hw->mac_type >= e1000_82544 &&
0686 hw->autoneg == 1 &&
0687 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
0688 u32 ctrl = er32(CTRL);
0689
0690
0691
0692
0693 ctrl &= ~E1000_CTRL_SWDPIN3;
0694 ew32(CTRL, ctrl);
0695 }
0696
0697
0698 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
0699
0700 e1000_reset_adaptive(hw);
0701 e1000_phy_get_info(hw, &adapter->phy_info);
0702
0703 e1000_release_manageability(adapter);
0704 }
0705
0706
0707 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
0708 {
0709 struct net_device *netdev = adapter->netdev;
0710 struct ethtool_eeprom eeprom;
0711 const struct ethtool_ops *ops = netdev->ethtool_ops;
0712 u8 *data;
0713 int i;
0714 u16 csum_old, csum_new = 0;
0715
0716 eeprom.len = ops->get_eeprom_len(netdev);
0717 eeprom.offset = 0;
0718
0719 data = kmalloc(eeprom.len, GFP_KERNEL);
0720 if (!data)
0721 return;
0722
0723 ops->get_eeprom(netdev, &eeprom, data);
0724
0725 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
0726 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
0727 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
0728 csum_new += data[i] + (data[i + 1] << 8);
0729 csum_new = EEPROM_SUM - csum_new;
0730
0731 pr_err("/*********************/\n");
0732 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
0733 pr_err("Calculated : 0x%04x\n", csum_new);
0734
0735 pr_err("Offset Values\n");
0736 pr_err("======== ======\n");
0737 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
0738
0739 pr_err("Include this output when contacting your support provider.\n");
0740 pr_err("This is not a software error! Something bad happened to\n");
0741 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
0742 pr_err("result in further problems, possibly loss of data,\n");
0743 pr_err("corruption or system hangs!\n");
0744 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
0745 pr_err("which is invalid and requires you to set the proper MAC\n");
0746 pr_err("address manually before continuing to enable this network\n");
0747 pr_err("device. Please inspect the EEPROM dump and report the\n");
0748 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
0749 pr_err("/*********************/\n");
0750
0751 kfree(data);
0752 }
0753
0754
0755
0756
0757
0758
0759
0760 static int e1000_is_need_ioport(struct pci_dev *pdev)
0761 {
0762 switch (pdev->device) {
0763 case E1000_DEV_ID_82540EM:
0764 case E1000_DEV_ID_82540EM_LOM:
0765 case E1000_DEV_ID_82540EP:
0766 case E1000_DEV_ID_82540EP_LOM:
0767 case E1000_DEV_ID_82540EP_LP:
0768 case E1000_DEV_ID_82541EI:
0769 case E1000_DEV_ID_82541EI_MOBILE:
0770 case E1000_DEV_ID_82541ER:
0771 case E1000_DEV_ID_82541ER_LOM:
0772 case E1000_DEV_ID_82541GI:
0773 case E1000_DEV_ID_82541GI_LF:
0774 case E1000_DEV_ID_82541GI_MOBILE:
0775 case E1000_DEV_ID_82544EI_COPPER:
0776 case E1000_DEV_ID_82544EI_FIBER:
0777 case E1000_DEV_ID_82544GC_COPPER:
0778 case E1000_DEV_ID_82544GC_LOM:
0779 case E1000_DEV_ID_82545EM_COPPER:
0780 case E1000_DEV_ID_82545EM_FIBER:
0781 case E1000_DEV_ID_82546EB_COPPER:
0782 case E1000_DEV_ID_82546EB_FIBER:
0783 case E1000_DEV_ID_82546EB_QUAD_COPPER:
0784 return true;
0785 default:
0786 return false;
0787 }
0788 }
0789
0790 static netdev_features_t e1000_fix_features(struct net_device *netdev,
0791 netdev_features_t features)
0792 {
0793
0794
0795
0796 if (features & NETIF_F_HW_VLAN_CTAG_RX)
0797 features |= NETIF_F_HW_VLAN_CTAG_TX;
0798 else
0799 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
0800
0801 return features;
0802 }
0803
0804 static int e1000_set_features(struct net_device *netdev,
0805 netdev_features_t features)
0806 {
0807 struct e1000_adapter *adapter = netdev_priv(netdev);
0808 netdev_features_t changed = features ^ netdev->features;
0809
0810 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
0811 e1000_vlan_mode(netdev, features);
0812
0813 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
0814 return 0;
0815
0816 netdev->features = features;
0817 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
0818
0819 if (netif_running(netdev))
0820 e1000_reinit_locked(adapter);
0821 else
0822 e1000_reset(adapter);
0823
0824 return 1;
0825 }
0826
0827 static const struct net_device_ops e1000_netdev_ops = {
0828 .ndo_open = e1000_open,
0829 .ndo_stop = e1000_close,
0830 .ndo_start_xmit = e1000_xmit_frame,
0831 .ndo_set_rx_mode = e1000_set_rx_mode,
0832 .ndo_set_mac_address = e1000_set_mac,
0833 .ndo_tx_timeout = e1000_tx_timeout,
0834 .ndo_change_mtu = e1000_change_mtu,
0835 .ndo_eth_ioctl = e1000_ioctl,
0836 .ndo_validate_addr = eth_validate_addr,
0837 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
0838 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
0839 #ifdef CONFIG_NET_POLL_CONTROLLER
0840 .ndo_poll_controller = e1000_netpoll,
0841 #endif
0842 .ndo_fix_features = e1000_fix_features,
0843 .ndo_set_features = e1000_set_features,
0844 };
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
0858 struct e1000_hw *hw)
0859 {
0860 struct pci_dev *pdev = adapter->pdev;
0861
0862
0863 hw->vendor_id = pdev->vendor;
0864 hw->device_id = pdev->device;
0865 hw->subsystem_vendor_id = pdev->subsystem_vendor;
0866 hw->subsystem_id = pdev->subsystem_device;
0867 hw->revision_id = pdev->revision;
0868
0869 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
0870
0871 hw->max_frame_size = adapter->netdev->mtu +
0872 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
0873 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
0874
0875
0876 if (e1000_set_mac_type(hw)) {
0877 e_err(probe, "Unknown MAC Type\n");
0878 return -EIO;
0879 }
0880
0881 switch (hw->mac_type) {
0882 default:
0883 break;
0884 case e1000_82541:
0885 case e1000_82547:
0886 case e1000_82541_rev_2:
0887 case e1000_82547_rev_2:
0888 hw->phy_init_script = 1;
0889 break;
0890 }
0891
0892 e1000_set_media_type(hw);
0893 e1000_get_bus_info(hw);
0894
0895 hw->wait_autoneg_complete = false;
0896 hw->tbi_compatibility_en = true;
0897 hw->adaptive_ifs = true;
0898
0899
0900
0901 if (hw->media_type == e1000_media_type_copper) {
0902 hw->mdix = AUTO_ALL_MODES;
0903 hw->disable_polarity_correction = false;
0904 hw->master_slave = E1000_MASTER_SLAVE;
0905 }
0906
0907 return 0;
0908 }
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919
0920
0921 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
0922 {
0923 struct net_device *netdev;
0924 struct e1000_adapter *adapter = NULL;
0925 struct e1000_hw *hw;
0926
0927 static int cards_found;
0928 static int global_quad_port_a;
0929 int i, err, pci_using_dac;
0930 u16 eeprom_data = 0;
0931 u16 tmp = 0;
0932 u16 eeprom_apme_mask = E1000_EEPROM_APME;
0933 int bars, need_ioport;
0934 bool disable_dev = false;
0935
0936
0937 need_ioport = e1000_is_need_ioport(pdev);
0938 if (need_ioport) {
0939 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
0940 err = pci_enable_device(pdev);
0941 } else {
0942 bars = pci_select_bars(pdev, IORESOURCE_MEM);
0943 err = pci_enable_device_mem(pdev);
0944 }
0945 if (err)
0946 return err;
0947
0948 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
0949 if (err)
0950 goto err_pci_reg;
0951
0952 pci_set_master(pdev);
0953 err = pci_save_state(pdev);
0954 if (err)
0955 goto err_alloc_etherdev;
0956
0957 err = -ENOMEM;
0958 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
0959 if (!netdev)
0960 goto err_alloc_etherdev;
0961
0962 SET_NETDEV_DEV(netdev, &pdev->dev);
0963
0964 pci_set_drvdata(pdev, netdev);
0965 adapter = netdev_priv(netdev);
0966 adapter->netdev = netdev;
0967 adapter->pdev = pdev;
0968 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
0969 adapter->bars = bars;
0970 adapter->need_ioport = need_ioport;
0971
0972 hw = &adapter->hw;
0973 hw->back = adapter;
0974
0975 err = -EIO;
0976 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
0977 if (!hw->hw_addr)
0978 goto err_ioremap;
0979
0980 if (adapter->need_ioport) {
0981 for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
0982 if (pci_resource_len(pdev, i) == 0)
0983 continue;
0984 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
0985 hw->io_base = pci_resource_start(pdev, i);
0986 break;
0987 }
0988 }
0989 }
0990
0991
0992 err = e1000_init_hw_struct(adapter, hw);
0993 if (err)
0994 goto err_sw_init;
0995
0996
0997
0998
0999
1000 pci_using_dac = 0;
1001 if ((hw->bus_type == e1000_bus_type_pcix) &&
1002 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1003 pci_using_dac = 1;
1004 } else {
1005 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1006 if (err) {
1007 pr_err("No usable DMA config, aborting\n");
1008 goto err_dma;
1009 }
1010 }
1011
1012 netdev->netdev_ops = &e1000_netdev_ops;
1013 e1000_set_ethtool_ops(netdev);
1014 netdev->watchdog_timeo = 5 * HZ;
1015 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1016
1017 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1018
1019 adapter->bd_number = cards_found;
1020
1021
1022
1023 err = e1000_sw_init(adapter);
1024 if (err)
1025 goto err_sw_init;
1026
1027 err = -EIO;
1028 if (hw->mac_type == e1000_ce4100) {
1029 hw->ce4100_gbe_mdio_base_virt =
1030 ioremap(pci_resource_start(pdev, BAR_1),
1031 pci_resource_len(pdev, BAR_1));
1032
1033 if (!hw->ce4100_gbe_mdio_base_virt)
1034 goto err_mdio_ioremap;
1035 }
1036
1037 if (hw->mac_type >= e1000_82543) {
1038 netdev->hw_features = NETIF_F_SG |
1039 NETIF_F_HW_CSUM |
1040 NETIF_F_HW_VLAN_CTAG_RX;
1041 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1042 NETIF_F_HW_VLAN_CTAG_FILTER;
1043 }
1044
1045 if ((hw->mac_type >= e1000_82544) &&
1046 (hw->mac_type != e1000_82547))
1047 netdev->hw_features |= NETIF_F_TSO;
1048
1049 netdev->priv_flags |= IFF_SUPP_NOFCS;
1050
1051 netdev->features |= netdev->hw_features;
1052 netdev->hw_features |= (NETIF_F_RXCSUM |
1053 NETIF_F_RXALL |
1054 NETIF_F_RXFCS);
1055
1056 if (pci_using_dac) {
1057 netdev->features |= NETIF_F_HIGHDMA;
1058 netdev->vlan_features |= NETIF_F_HIGHDMA;
1059 }
1060
1061 netdev->vlan_features |= (NETIF_F_TSO |
1062 NETIF_F_HW_CSUM |
1063 NETIF_F_SG);
1064
1065
1066 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1067 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1068 netdev->priv_flags |= IFF_UNICAST_FLT;
1069
1070
1071 netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1072 netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1073
1074 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1075
1076
1077 if (e1000_init_eeprom_params(hw)) {
1078 e_err(probe, "EEPROM initialization failed\n");
1079 goto err_eeprom;
1080 }
1081
1082
1083
1084
1085
1086 e1000_reset_hw(hw);
1087
1088
1089 if (e1000_validate_eeprom_checksum(hw) < 0) {
1090 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1091 e1000_dump_eeprom(adapter);
1092
1093
1094
1095
1096
1097
1098
1099 memset(hw->mac_addr, 0, netdev->addr_len);
1100 } else {
1101
1102 if (e1000_read_mac_addr(hw))
1103 e_err(probe, "EEPROM Read Error\n");
1104 }
1105
1106 eth_hw_addr_set(netdev, hw->mac_addr);
1107
1108 if (!is_valid_ether_addr(netdev->dev_addr))
1109 e_err(probe, "Invalid MAC Address\n");
1110
1111
1112 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1113 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1114 e1000_82547_tx_fifo_stall_task);
1115 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1116 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1117
1118 e1000_check_options(adapter);
1119
1120
1121
1122
1123
1124
1125 switch (hw->mac_type) {
1126 case e1000_82542_rev2_0:
1127 case e1000_82542_rev2_1:
1128 case e1000_82543:
1129 break;
1130 case e1000_82544:
1131 e1000_read_eeprom(hw,
1132 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1133 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1134 break;
1135 case e1000_82546:
1136 case e1000_82546_rev_3:
1137 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1138 e1000_read_eeprom(hw,
1139 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1140 break;
1141 }
1142 fallthrough;
1143 default:
1144 e1000_read_eeprom(hw,
1145 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1146 break;
1147 }
1148 if (eeprom_data & eeprom_apme_mask)
1149 adapter->eeprom_wol |= E1000_WUFC_MAG;
1150
1151
1152
1153
1154
1155 switch (pdev->device) {
1156 case E1000_DEV_ID_82546GB_PCIE:
1157 adapter->eeprom_wol = 0;
1158 break;
1159 case E1000_DEV_ID_82546EB_FIBER:
1160 case E1000_DEV_ID_82546GB_FIBER:
1161
1162
1163
1164 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1165 adapter->eeprom_wol = 0;
1166 break;
1167 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1168
1169 if (global_quad_port_a != 0)
1170 adapter->eeprom_wol = 0;
1171 else
1172 adapter->quad_port_a = true;
1173
1174 if (++global_quad_port_a == 4)
1175 global_quad_port_a = 0;
1176 break;
1177 }
1178
1179
1180 adapter->wol = adapter->eeprom_wol;
1181 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1182
1183
1184 if (hw->mac_type == e1000_ce4100) {
1185 for (i = 0; i < 32; i++) {
1186 hw->phy_addr = i;
1187 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1188
1189 if (tmp != 0 && tmp != 0xFF)
1190 break;
1191 }
1192
1193 if (i >= 32)
1194 goto err_eeprom;
1195 }
1196
1197
1198 e1000_reset(adapter);
1199
1200 strcpy(netdev->name, "eth%d");
1201 err = register_netdev(netdev);
1202 if (err)
1203 goto err_register;
1204
1205 e1000_vlan_filter_on_off(adapter, false);
1206
1207
1208 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1209 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1210 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1211 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1212 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1213 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1214 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1215 netdev->dev_addr);
1216
1217
1218 netif_carrier_off(netdev);
1219
1220 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1221
1222 cards_found++;
1223 return 0;
1224
1225 err_register:
1226 err_eeprom:
1227 e1000_phy_hw_reset(hw);
1228
1229 if (hw->flash_address)
1230 iounmap(hw->flash_address);
1231 kfree(adapter->tx_ring);
1232 kfree(adapter->rx_ring);
1233 err_dma:
1234 err_sw_init:
1235 err_mdio_ioremap:
1236 iounmap(hw->ce4100_gbe_mdio_base_virt);
1237 iounmap(hw->hw_addr);
1238 err_ioremap:
1239 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1240 free_netdev(netdev);
1241 err_alloc_etherdev:
1242 pci_release_selected_regions(pdev, bars);
1243 err_pci_reg:
1244 if (!adapter || disable_dev)
1245 pci_disable_device(pdev);
1246 return err;
1247 }
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258 static void e1000_remove(struct pci_dev *pdev)
1259 {
1260 struct net_device *netdev = pci_get_drvdata(pdev);
1261 struct e1000_adapter *adapter = netdev_priv(netdev);
1262 struct e1000_hw *hw = &adapter->hw;
1263 bool disable_dev;
1264
1265 e1000_down_and_stop(adapter);
1266 e1000_release_manageability(adapter);
1267
1268 unregister_netdev(netdev);
1269
1270 e1000_phy_hw_reset(hw);
1271
1272 kfree(adapter->tx_ring);
1273 kfree(adapter->rx_ring);
1274
1275 if (hw->mac_type == e1000_ce4100)
1276 iounmap(hw->ce4100_gbe_mdio_base_virt);
1277 iounmap(hw->hw_addr);
1278 if (hw->flash_address)
1279 iounmap(hw->flash_address);
1280 pci_release_selected_regions(pdev, adapter->bars);
1281
1282 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1283 free_netdev(netdev);
1284
1285 if (disable_dev)
1286 pci_disable_device(pdev);
1287 }
1288
1289
1290
1291
1292
1293
1294
1295
1296 static int e1000_sw_init(struct e1000_adapter *adapter)
1297 {
1298 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1299
1300 adapter->num_tx_queues = 1;
1301 adapter->num_rx_queues = 1;
1302
1303 if (e1000_alloc_queues(adapter)) {
1304 e_err(probe, "Unable to allocate memory for queues\n");
1305 return -ENOMEM;
1306 }
1307
1308
1309 e1000_irq_disable(adapter);
1310
1311 spin_lock_init(&adapter->stats_lock);
1312
1313 set_bit(__E1000_DOWN, &adapter->flags);
1314
1315 return 0;
1316 }
1317
1318
1319
1320
1321
1322
1323
1324
1325 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1326 {
1327 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1328 sizeof(struct e1000_tx_ring), GFP_KERNEL);
1329 if (!adapter->tx_ring)
1330 return -ENOMEM;
1331
1332 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1333 sizeof(struct e1000_rx_ring), GFP_KERNEL);
1334 if (!adapter->rx_ring) {
1335 kfree(adapter->tx_ring);
1336 return -ENOMEM;
1337 }
1338
1339 return E1000_SUCCESS;
1340 }
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354 int e1000_open(struct net_device *netdev)
1355 {
1356 struct e1000_adapter *adapter = netdev_priv(netdev);
1357 struct e1000_hw *hw = &adapter->hw;
1358 int err;
1359
1360
1361 if (test_bit(__E1000_TESTING, &adapter->flags))
1362 return -EBUSY;
1363
1364 netif_carrier_off(netdev);
1365
1366
1367 err = e1000_setup_all_tx_resources(adapter);
1368 if (err)
1369 goto err_setup_tx;
1370
1371
1372 err = e1000_setup_all_rx_resources(adapter);
1373 if (err)
1374 goto err_setup_rx;
1375
1376 e1000_power_up_phy(adapter);
1377
1378 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1379 if ((hw->mng_cookie.status &
1380 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1381 e1000_update_mng_vlan(adapter);
1382 }
1383
1384
1385
1386
1387
1388
1389 e1000_configure(adapter);
1390
1391 err = e1000_request_irq(adapter);
1392 if (err)
1393 goto err_req_irq;
1394
1395
1396 clear_bit(__E1000_DOWN, &adapter->flags);
1397
1398 napi_enable(&adapter->napi);
1399
1400 e1000_irq_enable(adapter);
1401
1402 netif_start_queue(netdev);
1403
1404
1405 ew32(ICS, E1000_ICS_LSC);
1406
1407 return E1000_SUCCESS;
1408
1409 err_req_irq:
1410 e1000_power_down_phy(adapter);
1411 e1000_free_all_rx_resources(adapter);
1412 err_setup_rx:
1413 e1000_free_all_tx_resources(adapter);
1414 err_setup_tx:
1415 e1000_reset(adapter);
1416
1417 return err;
1418 }
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431 int e1000_close(struct net_device *netdev)
1432 {
1433 struct e1000_adapter *adapter = netdev_priv(netdev);
1434 struct e1000_hw *hw = &adapter->hw;
1435 int count = E1000_CHECK_RESET_COUNT;
1436
1437 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1438 usleep_range(10000, 20000);
1439
1440 WARN_ON(count < 0);
1441
1442
1443 set_bit(__E1000_DOWN, &adapter->flags);
1444 clear_bit(__E1000_RESETTING, &adapter->flags);
1445
1446 e1000_down(adapter);
1447 e1000_power_down_phy(adapter);
1448 e1000_free_irq(adapter);
1449
1450 e1000_free_all_tx_resources(adapter);
1451 e1000_free_all_rx_resources(adapter);
1452
1453
1454
1455
1456 if ((hw->mng_cookie.status &
1457 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1458 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1459 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1460 adapter->mng_vlan_id);
1461 }
1462
1463 return 0;
1464 }
1465
1466
1467
1468
1469
1470
1471
1472 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1473 unsigned long len)
1474 {
1475 struct e1000_hw *hw = &adapter->hw;
1476 unsigned long begin = (unsigned long)start;
1477 unsigned long end = begin + len;
1478
1479
1480
1481
1482 if (hw->mac_type == e1000_82545 ||
1483 hw->mac_type == e1000_ce4100 ||
1484 hw->mac_type == e1000_82546) {
1485 return ((begin ^ (end - 1)) >> 16) == 0;
1486 }
1487
1488 return true;
1489 }
1490
1491
1492
1493
1494
1495
1496
1497
1498 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1499 struct e1000_tx_ring *txdr)
1500 {
1501 struct pci_dev *pdev = adapter->pdev;
1502 int size;
1503
1504 size = sizeof(struct e1000_tx_buffer) * txdr->count;
1505 txdr->buffer_info = vzalloc(size);
1506 if (!txdr->buffer_info)
1507 return -ENOMEM;
1508
1509
1510
1511 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1512 txdr->size = ALIGN(txdr->size, 4096);
1513
1514 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1515 GFP_KERNEL);
1516 if (!txdr->desc) {
1517 setup_tx_desc_die:
1518 vfree(txdr->buffer_info);
1519 return -ENOMEM;
1520 }
1521
1522
1523 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1524 void *olddesc = txdr->desc;
1525 dma_addr_t olddma = txdr->dma;
1526 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1527 txdr->size, txdr->desc);
1528
1529 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1530 &txdr->dma, GFP_KERNEL);
1531
1532 if (!txdr->desc) {
1533 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1534 olddma);
1535 goto setup_tx_desc_die;
1536 }
1537
1538 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1539
1540 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1541 txdr->dma);
1542 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1543 olddma);
1544 e_err(probe, "Unable to allocate aligned memory "
1545 "for the transmit descriptor ring\n");
1546 vfree(txdr->buffer_info);
1547 return -ENOMEM;
1548 } else {
1549
1550 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1551 olddma);
1552 }
1553 }
1554 memset(txdr->desc, 0, txdr->size);
1555
1556 txdr->next_to_use = 0;
1557 txdr->next_to_clean = 0;
1558
1559 return 0;
1560 }
1561
1562
1563
1564
1565
1566
1567
1568
1569 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1570 {
1571 int i, err = 0;
1572
1573 for (i = 0; i < adapter->num_tx_queues; i++) {
1574 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1575 if (err) {
1576 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1577 for (i-- ; i >= 0; i--)
1578 e1000_free_tx_resources(adapter,
1579 &adapter->tx_ring[i]);
1580 break;
1581 }
1582 }
1583
1584 return err;
1585 }
1586
1587
1588
1589
1590
1591
1592
1593 static void e1000_configure_tx(struct e1000_adapter *adapter)
1594 {
1595 u64 tdba;
1596 struct e1000_hw *hw = &adapter->hw;
1597 u32 tdlen, tctl, tipg;
1598 u32 ipgr1, ipgr2;
1599
1600
1601
1602 switch (adapter->num_tx_queues) {
1603 case 1:
1604 default:
1605 tdba = adapter->tx_ring[0].dma;
1606 tdlen = adapter->tx_ring[0].count *
1607 sizeof(struct e1000_tx_desc);
1608 ew32(TDLEN, tdlen);
1609 ew32(TDBAH, (tdba >> 32));
1610 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1611 ew32(TDT, 0);
1612 ew32(TDH, 0);
1613 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1614 E1000_TDH : E1000_82542_TDH);
1615 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1616 E1000_TDT : E1000_82542_TDT);
1617 break;
1618 }
1619
1620
1621 if ((hw->media_type == e1000_media_type_fiber ||
1622 hw->media_type == e1000_media_type_internal_serdes))
1623 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1624 else
1625 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1626
1627 switch (hw->mac_type) {
1628 case e1000_82542_rev2_0:
1629 case e1000_82542_rev2_1:
1630 tipg = DEFAULT_82542_TIPG_IPGT;
1631 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1632 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1633 break;
1634 default:
1635 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1636 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1637 break;
1638 }
1639 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1640 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1641 ew32(TIPG, tipg);
1642
1643
1644
1645 ew32(TIDV, adapter->tx_int_delay);
1646 if (hw->mac_type >= e1000_82540)
1647 ew32(TADV, adapter->tx_abs_int_delay);
1648
1649
1650
1651 tctl = er32(TCTL);
1652 tctl &= ~E1000_TCTL_CT;
1653 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1654 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1655
1656 e1000_config_collision_dist(hw);
1657
1658
1659 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1660
1661
1662 if (adapter->tx_int_delay)
1663 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1664
1665 if (hw->mac_type < e1000_82543)
1666 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1667 else
1668 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1669
1670
1671
1672
1673 if (hw->mac_type == e1000_82544 &&
1674 hw->bus_type == e1000_bus_type_pcix)
1675 adapter->pcix_82544 = true;
1676
1677 ew32(TCTL, tctl);
1678
1679 }
1680
1681
1682
1683
1684
1685
1686
1687
1688 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1689 struct e1000_rx_ring *rxdr)
1690 {
1691 struct pci_dev *pdev = adapter->pdev;
1692 int size, desc_len;
1693
1694 size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1695 rxdr->buffer_info = vzalloc(size);
1696 if (!rxdr->buffer_info)
1697 return -ENOMEM;
1698
1699 desc_len = sizeof(struct e1000_rx_desc);
1700
1701
1702
1703 rxdr->size = rxdr->count * desc_len;
1704 rxdr->size = ALIGN(rxdr->size, 4096);
1705
1706 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1707 GFP_KERNEL);
1708 if (!rxdr->desc) {
1709 setup_rx_desc_die:
1710 vfree(rxdr->buffer_info);
1711 return -ENOMEM;
1712 }
1713
1714
1715 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1716 void *olddesc = rxdr->desc;
1717 dma_addr_t olddma = rxdr->dma;
1718 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1719 rxdr->size, rxdr->desc);
1720
1721 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1722 &rxdr->dma, GFP_KERNEL);
1723
1724 if (!rxdr->desc) {
1725 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1726 olddma);
1727 goto setup_rx_desc_die;
1728 }
1729
1730 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1731
1732 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1733 rxdr->dma);
1734 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1735 olddma);
1736 e_err(probe, "Unable to allocate aligned memory for "
1737 "the Rx descriptor ring\n");
1738 goto setup_rx_desc_die;
1739 } else {
1740
1741 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1742 olddma);
1743 }
1744 }
1745 memset(rxdr->desc, 0, rxdr->size);
1746
1747 rxdr->next_to_clean = 0;
1748 rxdr->next_to_use = 0;
1749 rxdr->rx_skb_top = NULL;
1750
1751 return 0;
1752 }
1753
1754
1755
1756
1757
1758
1759
1760
1761 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1762 {
1763 int i, err = 0;
1764
1765 for (i = 0; i < adapter->num_rx_queues; i++) {
1766 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1767 if (err) {
1768 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1769 for (i-- ; i >= 0; i--)
1770 e1000_free_rx_resources(adapter,
1771 &adapter->rx_ring[i]);
1772 break;
1773 }
1774 }
1775
1776 return err;
1777 }
1778
1779
1780
1781
1782
1783 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1784 {
1785 struct e1000_hw *hw = &adapter->hw;
1786 u32 rctl;
1787
1788 rctl = er32(RCTL);
1789
1790 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1791
1792 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1793 E1000_RCTL_RDMTS_HALF |
1794 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1795
1796 if (hw->tbi_compatibility_on == 1)
1797 rctl |= E1000_RCTL_SBP;
1798 else
1799 rctl &= ~E1000_RCTL_SBP;
1800
1801 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1802 rctl &= ~E1000_RCTL_LPE;
1803 else
1804 rctl |= E1000_RCTL_LPE;
1805
1806
1807 rctl &= ~E1000_RCTL_SZ_4096;
1808 rctl |= E1000_RCTL_BSEX;
1809 switch (adapter->rx_buffer_len) {
1810 case E1000_RXBUFFER_2048:
1811 default:
1812 rctl |= E1000_RCTL_SZ_2048;
1813 rctl &= ~E1000_RCTL_BSEX;
1814 break;
1815 case E1000_RXBUFFER_4096:
1816 rctl |= E1000_RCTL_SZ_4096;
1817 break;
1818 case E1000_RXBUFFER_8192:
1819 rctl |= E1000_RCTL_SZ_8192;
1820 break;
1821 case E1000_RXBUFFER_16384:
1822 rctl |= E1000_RCTL_SZ_16384;
1823 break;
1824 }
1825
1826
1827 if (adapter->netdev->features & NETIF_F_RXALL) {
1828
1829
1830
1831 rctl |= (E1000_RCTL_SBP |
1832 E1000_RCTL_BAM |
1833 E1000_RCTL_PMCF);
1834
1835 rctl &= ~(E1000_RCTL_VFE |
1836 E1000_RCTL_DPF |
1837 E1000_RCTL_CFIEN);
1838
1839
1840
1841 }
1842
1843 ew32(RCTL, rctl);
1844 }
1845
1846
1847
1848
1849
1850
1851
1852 static void e1000_configure_rx(struct e1000_adapter *adapter)
1853 {
1854 u64 rdba;
1855 struct e1000_hw *hw = &adapter->hw;
1856 u32 rdlen, rctl, rxcsum;
1857
1858 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1859 rdlen = adapter->rx_ring[0].count *
1860 sizeof(struct e1000_rx_desc);
1861 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1862 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1863 } else {
1864 rdlen = adapter->rx_ring[0].count *
1865 sizeof(struct e1000_rx_desc);
1866 adapter->clean_rx = e1000_clean_rx_irq;
1867 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1868 }
1869
1870
1871 rctl = er32(RCTL);
1872 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1873
1874
1875 ew32(RDTR, adapter->rx_int_delay);
1876
1877 if (hw->mac_type >= e1000_82540) {
1878 ew32(RADV, adapter->rx_abs_int_delay);
1879 if (adapter->itr_setting != 0)
1880 ew32(ITR, 1000000000 / (adapter->itr * 256));
1881 }
1882
1883
1884
1885
1886 switch (adapter->num_rx_queues) {
1887 case 1:
1888 default:
1889 rdba = adapter->rx_ring[0].dma;
1890 ew32(RDLEN, rdlen);
1891 ew32(RDBAH, (rdba >> 32));
1892 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1893 ew32(RDT, 0);
1894 ew32(RDH, 0);
1895 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1896 E1000_RDH : E1000_82542_RDH);
1897 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1898 E1000_RDT : E1000_82542_RDT);
1899 break;
1900 }
1901
1902
1903 if (hw->mac_type >= e1000_82543) {
1904 rxcsum = er32(RXCSUM);
1905 if (adapter->rx_csum)
1906 rxcsum |= E1000_RXCSUM_TUOFL;
1907 else
1908
1909 rxcsum &= ~E1000_RXCSUM_TUOFL;
1910 ew32(RXCSUM, rxcsum);
1911 }
1912
1913
1914 ew32(RCTL, rctl | E1000_RCTL_EN);
1915 }
1916
1917
1918
1919
1920
1921
1922
1923
1924 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1925 struct e1000_tx_ring *tx_ring)
1926 {
1927 struct pci_dev *pdev = adapter->pdev;
1928
1929 e1000_clean_tx_ring(adapter, tx_ring);
1930
1931 vfree(tx_ring->buffer_info);
1932 tx_ring->buffer_info = NULL;
1933
1934 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1935 tx_ring->dma);
1936
1937 tx_ring->desc = NULL;
1938 }
1939
1940
1941
1942
1943
1944
1945
1946 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1947 {
1948 int i;
1949
1950 for (i = 0; i < adapter->num_tx_queues; i++)
1951 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1952 }
1953
1954 static void
1955 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1956 struct e1000_tx_buffer *buffer_info,
1957 int budget)
1958 {
1959 if (buffer_info->dma) {
1960 if (buffer_info->mapped_as_page)
1961 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1962 buffer_info->length, DMA_TO_DEVICE);
1963 else
1964 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1965 buffer_info->length,
1966 DMA_TO_DEVICE);
1967 buffer_info->dma = 0;
1968 }
1969 if (buffer_info->skb) {
1970 napi_consume_skb(buffer_info->skb, budget);
1971 buffer_info->skb = NULL;
1972 }
1973 buffer_info->time_stamp = 0;
1974
1975 }
1976
1977
1978
1979
1980
1981
1982 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1983 struct e1000_tx_ring *tx_ring)
1984 {
1985 struct e1000_hw *hw = &adapter->hw;
1986 struct e1000_tx_buffer *buffer_info;
1987 unsigned long size;
1988 unsigned int i;
1989
1990
1991
1992 for (i = 0; i < tx_ring->count; i++) {
1993 buffer_info = &tx_ring->buffer_info[i];
1994 e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
1995 }
1996
1997 netdev_reset_queue(adapter->netdev);
1998 size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
1999 memset(tx_ring->buffer_info, 0, size);
2000
2001
2002
2003 memset(tx_ring->desc, 0, tx_ring->size);
2004
2005 tx_ring->next_to_use = 0;
2006 tx_ring->next_to_clean = 0;
2007 tx_ring->last_tx_tso = false;
2008
2009 writel(0, hw->hw_addr + tx_ring->tdh);
2010 writel(0, hw->hw_addr + tx_ring->tdt);
2011 }
2012
2013
2014
2015
2016
2017 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2018 {
2019 int i;
2020
2021 for (i = 0; i < adapter->num_tx_queues; i++)
2022 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2023 }
2024
2025
2026
2027
2028
2029
2030
2031
2032 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2033 struct e1000_rx_ring *rx_ring)
2034 {
2035 struct pci_dev *pdev = adapter->pdev;
2036
2037 e1000_clean_rx_ring(adapter, rx_ring);
2038
2039 vfree(rx_ring->buffer_info);
2040 rx_ring->buffer_info = NULL;
2041
2042 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2043 rx_ring->dma);
2044
2045 rx_ring->desc = NULL;
2046 }
2047
2048
2049
2050
2051
2052
2053
2054 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2055 {
2056 int i;
2057
2058 for (i = 0; i < adapter->num_rx_queues; i++)
2059 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2060 }
2061
2062 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2063 static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2064 {
2065 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2066 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2067 }
2068
2069 static void *e1000_alloc_frag(const struct e1000_adapter *a)
2070 {
2071 unsigned int len = e1000_frag_len(a);
2072 u8 *data = netdev_alloc_frag(len);
2073
2074 if (likely(data))
2075 data += E1000_HEADROOM;
2076 return data;
2077 }
2078
2079
2080
2081
2082
2083
2084 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2085 struct e1000_rx_ring *rx_ring)
2086 {
2087 struct e1000_hw *hw = &adapter->hw;
2088 struct e1000_rx_buffer *buffer_info;
2089 struct pci_dev *pdev = adapter->pdev;
2090 unsigned long size;
2091 unsigned int i;
2092
2093
2094 for (i = 0; i < rx_ring->count; i++) {
2095 buffer_info = &rx_ring->buffer_info[i];
2096 if (adapter->clean_rx == e1000_clean_rx_irq) {
2097 if (buffer_info->dma)
2098 dma_unmap_single(&pdev->dev, buffer_info->dma,
2099 adapter->rx_buffer_len,
2100 DMA_FROM_DEVICE);
2101 if (buffer_info->rxbuf.data) {
2102 skb_free_frag(buffer_info->rxbuf.data);
2103 buffer_info->rxbuf.data = NULL;
2104 }
2105 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2106 if (buffer_info->dma)
2107 dma_unmap_page(&pdev->dev, buffer_info->dma,
2108 adapter->rx_buffer_len,
2109 DMA_FROM_DEVICE);
2110 if (buffer_info->rxbuf.page) {
2111 put_page(buffer_info->rxbuf.page);
2112 buffer_info->rxbuf.page = NULL;
2113 }
2114 }
2115
2116 buffer_info->dma = 0;
2117 }
2118
2119
2120 napi_free_frags(&adapter->napi);
2121 rx_ring->rx_skb_top = NULL;
2122
2123 size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2124 memset(rx_ring->buffer_info, 0, size);
2125
2126
2127 memset(rx_ring->desc, 0, rx_ring->size);
2128
2129 rx_ring->next_to_clean = 0;
2130 rx_ring->next_to_use = 0;
2131
2132 writel(0, hw->hw_addr + rx_ring->rdh);
2133 writel(0, hw->hw_addr + rx_ring->rdt);
2134 }
2135
2136
2137
2138
2139
2140 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2141 {
2142 int i;
2143
2144 for (i = 0; i < adapter->num_rx_queues; i++)
2145 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2146 }
2147
2148
2149
2150
2151 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2152 {
2153 struct e1000_hw *hw = &adapter->hw;
2154 struct net_device *netdev = adapter->netdev;
2155 u32 rctl;
2156
2157 e1000_pci_clear_mwi(hw);
2158
2159 rctl = er32(RCTL);
2160 rctl |= E1000_RCTL_RST;
2161 ew32(RCTL, rctl);
2162 E1000_WRITE_FLUSH();
2163 mdelay(5);
2164
2165 if (netif_running(netdev))
2166 e1000_clean_all_rx_rings(adapter);
2167 }
2168
2169 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2170 {
2171 struct e1000_hw *hw = &adapter->hw;
2172 struct net_device *netdev = adapter->netdev;
2173 u32 rctl;
2174
2175 rctl = er32(RCTL);
2176 rctl &= ~E1000_RCTL_RST;
2177 ew32(RCTL, rctl);
2178 E1000_WRITE_FLUSH();
2179 mdelay(5);
2180
2181 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2182 e1000_pci_set_mwi(hw);
2183
2184 if (netif_running(netdev)) {
2185
2186 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2187 e1000_configure_rx(adapter);
2188 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2189 }
2190 }
2191
2192
2193
2194
2195
2196
2197
2198
2199 static int e1000_set_mac(struct net_device *netdev, void *p)
2200 {
2201 struct e1000_adapter *adapter = netdev_priv(netdev);
2202 struct e1000_hw *hw = &adapter->hw;
2203 struct sockaddr *addr = p;
2204
2205 if (!is_valid_ether_addr(addr->sa_data))
2206 return -EADDRNOTAVAIL;
2207
2208
2209
2210 if (hw->mac_type == e1000_82542_rev2_0)
2211 e1000_enter_82542_rst(adapter);
2212
2213 eth_hw_addr_set(netdev, addr->sa_data);
2214 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2215
2216 e1000_rar_set(hw, hw->mac_addr, 0);
2217
2218 if (hw->mac_type == e1000_82542_rev2_0)
2219 e1000_leave_82542_rst(adapter);
2220
2221 return 0;
2222 }
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233 static void e1000_set_rx_mode(struct net_device *netdev)
2234 {
2235 struct e1000_adapter *adapter = netdev_priv(netdev);
2236 struct e1000_hw *hw = &adapter->hw;
2237 struct netdev_hw_addr *ha;
2238 bool use_uc = false;
2239 u32 rctl;
2240 u32 hash_value;
2241 int i, rar_entries = E1000_RAR_ENTRIES;
2242 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2243 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2244
2245 if (!mcarray)
2246 return;
2247
2248
2249
2250 rctl = er32(RCTL);
2251
2252 if (netdev->flags & IFF_PROMISC) {
2253 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2254 rctl &= ~E1000_RCTL_VFE;
2255 } else {
2256 if (netdev->flags & IFF_ALLMULTI)
2257 rctl |= E1000_RCTL_MPE;
2258 else
2259 rctl &= ~E1000_RCTL_MPE;
2260
2261 if (e1000_vlan_used(adapter))
2262 rctl |= E1000_RCTL_VFE;
2263 }
2264
2265 if (netdev_uc_count(netdev) > rar_entries - 1) {
2266 rctl |= E1000_RCTL_UPE;
2267 } else if (!(netdev->flags & IFF_PROMISC)) {
2268 rctl &= ~E1000_RCTL_UPE;
2269 use_uc = true;
2270 }
2271
2272 ew32(RCTL, rctl);
2273
2274
2275
2276 if (hw->mac_type == e1000_82542_rev2_0)
2277 e1000_enter_82542_rst(adapter);
2278
2279
2280
2281
2282
2283
2284
2285
2286 i = 1;
2287 if (use_uc)
2288 netdev_for_each_uc_addr(ha, netdev) {
2289 if (i == rar_entries)
2290 break;
2291 e1000_rar_set(hw, ha->addr, i++);
2292 }
2293
2294 netdev_for_each_mc_addr(ha, netdev) {
2295 if (i == rar_entries) {
2296
2297 u32 hash_reg, hash_bit, mta;
2298 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2299 hash_reg = (hash_value >> 5) & 0x7F;
2300 hash_bit = hash_value & 0x1F;
2301 mta = (1 << hash_bit);
2302 mcarray[hash_reg] |= mta;
2303 } else {
2304 e1000_rar_set(hw, ha->addr, i++);
2305 }
2306 }
2307
2308 for (; i < rar_entries; i++) {
2309 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2310 E1000_WRITE_FLUSH();
2311 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2312 E1000_WRITE_FLUSH();
2313 }
2314
2315
2316
2317
2318 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2319
2320
2321
2322
2323
2324 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2325 }
2326 E1000_WRITE_FLUSH();
2327
2328 if (hw->mac_type == e1000_82542_rev2_0)
2329 e1000_leave_82542_rst(adapter);
2330
2331 kfree(mcarray);
2332 }
2333
2334
2335
2336
2337
2338
2339
2340
2341 static void e1000_update_phy_info_task(struct work_struct *work)
2342 {
2343 struct e1000_adapter *adapter = container_of(work,
2344 struct e1000_adapter,
2345 phy_info_task.work);
2346
2347 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2348 }
2349
2350
2351
2352
2353
2354 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2355 {
2356 struct e1000_adapter *adapter = container_of(work,
2357 struct e1000_adapter,
2358 fifo_stall_task.work);
2359 struct e1000_hw *hw = &adapter->hw;
2360 struct net_device *netdev = adapter->netdev;
2361 u32 tctl;
2362
2363 if (atomic_read(&adapter->tx_fifo_stall)) {
2364 if ((er32(TDT) == er32(TDH)) &&
2365 (er32(TDFT) == er32(TDFH)) &&
2366 (er32(TDFTS) == er32(TDFHS))) {
2367 tctl = er32(TCTL);
2368 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2369 ew32(TDFT, adapter->tx_head_addr);
2370 ew32(TDFH, adapter->tx_head_addr);
2371 ew32(TDFTS, adapter->tx_head_addr);
2372 ew32(TDFHS, adapter->tx_head_addr);
2373 ew32(TCTL, tctl);
2374 E1000_WRITE_FLUSH();
2375
2376 adapter->tx_fifo_head = 0;
2377 atomic_set(&adapter->tx_fifo_stall, 0);
2378 netif_wake_queue(netdev);
2379 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2380 schedule_delayed_work(&adapter->fifo_stall_task, 1);
2381 }
2382 }
2383 }
2384
2385 bool e1000_has_link(struct e1000_adapter *adapter)
2386 {
2387 struct e1000_hw *hw = &adapter->hw;
2388 bool link_active = false;
2389
2390
2391
2392
2393
2394
2395
2396 switch (hw->media_type) {
2397 case e1000_media_type_copper:
2398 if (hw->mac_type == e1000_ce4100)
2399 hw->get_link_status = 1;
2400 if (hw->get_link_status) {
2401 e1000_check_for_link(hw);
2402 link_active = !hw->get_link_status;
2403 } else {
2404 link_active = true;
2405 }
2406 break;
2407 case e1000_media_type_fiber:
2408 e1000_check_for_link(hw);
2409 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2410 break;
2411 case e1000_media_type_internal_serdes:
2412 e1000_check_for_link(hw);
2413 link_active = hw->serdes_has_link;
2414 break;
2415 default:
2416 break;
2417 }
2418
2419 return link_active;
2420 }
2421
2422
2423
2424
2425
2426 static void e1000_watchdog(struct work_struct *work)
2427 {
2428 struct e1000_adapter *adapter = container_of(work,
2429 struct e1000_adapter,
2430 watchdog_task.work);
2431 struct e1000_hw *hw = &adapter->hw;
2432 struct net_device *netdev = adapter->netdev;
2433 struct e1000_tx_ring *txdr = adapter->tx_ring;
2434 u32 link, tctl;
2435
2436 link = e1000_has_link(adapter);
2437 if ((netif_carrier_ok(netdev)) && link)
2438 goto link_up;
2439
2440 if (link) {
2441 if (!netif_carrier_ok(netdev)) {
2442 u32 ctrl;
2443
2444 e1000_get_speed_and_duplex(hw,
2445 &adapter->link_speed,
2446 &adapter->link_duplex);
2447
2448 ctrl = er32(CTRL);
2449 pr_info("%s NIC Link is Up %d Mbps %s, "
2450 "Flow Control: %s\n",
2451 netdev->name,
2452 adapter->link_speed,
2453 adapter->link_duplex == FULL_DUPLEX ?
2454 "Full Duplex" : "Half Duplex",
2455 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2456 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2457 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2458 E1000_CTRL_TFCE) ? "TX" : "None")));
2459
2460
2461 adapter->tx_timeout_factor = 1;
2462 switch (adapter->link_speed) {
2463 case SPEED_10:
2464 adapter->tx_timeout_factor = 16;
2465 break;
2466 case SPEED_100:
2467
2468 break;
2469 }
2470
2471
2472 tctl = er32(TCTL);
2473 tctl |= E1000_TCTL_EN;
2474 ew32(TCTL, tctl);
2475
2476 netif_carrier_on(netdev);
2477 if (!test_bit(__E1000_DOWN, &adapter->flags))
2478 schedule_delayed_work(&adapter->phy_info_task,
2479 2 * HZ);
2480 adapter->smartspeed = 0;
2481 }
2482 } else {
2483 if (netif_carrier_ok(netdev)) {
2484 adapter->link_speed = 0;
2485 adapter->link_duplex = 0;
2486 pr_info("%s NIC Link is Down\n",
2487 netdev->name);
2488 netif_carrier_off(netdev);
2489
2490 if (!test_bit(__E1000_DOWN, &adapter->flags))
2491 schedule_delayed_work(&adapter->phy_info_task,
2492 2 * HZ);
2493 }
2494
2495 e1000_smartspeed(adapter);
2496 }
2497
2498 link_up:
2499 e1000_update_stats(adapter);
2500
2501 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2502 adapter->tpt_old = adapter->stats.tpt;
2503 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2504 adapter->colc_old = adapter->stats.colc;
2505
2506 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2507 adapter->gorcl_old = adapter->stats.gorcl;
2508 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2509 adapter->gotcl_old = adapter->stats.gotcl;
2510
2511 e1000_update_adaptive(hw);
2512
2513 if (!netif_carrier_ok(netdev)) {
2514 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2515
2516
2517
2518
2519
2520 adapter->tx_timeout_count++;
2521 schedule_work(&adapter->reset_task);
2522
2523 return;
2524 }
2525 }
2526
2527
2528 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2529
2530
2531
2532
2533 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2534 u32 dif = (adapter->gotcl > adapter->gorcl ?
2535 adapter->gotcl - adapter->gorcl :
2536 adapter->gorcl - adapter->gotcl) / 10000;
2537 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2538
2539 ew32(ITR, 1000000000 / (itr * 256));
2540 }
2541
2542
2543 ew32(ICS, E1000_ICS_RXDMT0);
2544
2545
2546 adapter->detect_tx_hung = true;
2547
2548
2549 if (!test_bit(__E1000_DOWN, &adapter->flags))
2550 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2551 }
2552
2553 enum latency_range {
2554 lowest_latency = 0,
2555 low_latency = 1,
2556 bulk_latency = 2,
2557 latency_invalid = 255
2558 };
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2578 u16 itr_setting, int packets, int bytes)
2579 {
2580 unsigned int retval = itr_setting;
2581 struct e1000_hw *hw = &adapter->hw;
2582
2583 if (unlikely(hw->mac_type < e1000_82540))
2584 goto update_itr_done;
2585
2586 if (packets == 0)
2587 goto update_itr_done;
2588
2589 switch (itr_setting) {
2590 case lowest_latency:
2591
2592 if (bytes/packets > 8000)
2593 retval = bulk_latency;
2594 else if ((packets < 5) && (bytes > 512))
2595 retval = low_latency;
2596 break;
2597 case low_latency:
2598 if (bytes > 10000) {
2599
2600 if (bytes/packets > 8000)
2601 retval = bulk_latency;
2602 else if ((packets < 10) || ((bytes/packets) > 1200))
2603 retval = bulk_latency;
2604 else if ((packets > 35))
2605 retval = lowest_latency;
2606 } else if (bytes/packets > 2000)
2607 retval = bulk_latency;
2608 else if (packets <= 2 && bytes < 512)
2609 retval = lowest_latency;
2610 break;
2611 case bulk_latency:
2612 if (bytes > 25000) {
2613 if (packets > 35)
2614 retval = low_latency;
2615 } else if (bytes < 6000) {
2616 retval = low_latency;
2617 }
2618 break;
2619 }
2620
2621 update_itr_done:
2622 return retval;
2623 }
2624
2625 static void e1000_set_itr(struct e1000_adapter *adapter)
2626 {
2627 struct e1000_hw *hw = &adapter->hw;
2628 u16 current_itr;
2629 u32 new_itr = adapter->itr;
2630
2631 if (unlikely(hw->mac_type < e1000_82540))
2632 return;
2633
2634
2635 if (unlikely(adapter->link_speed != SPEED_1000)) {
2636 new_itr = 4000;
2637 goto set_itr_now;
2638 }
2639
2640 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2641 adapter->total_tx_packets,
2642 adapter->total_tx_bytes);
2643
2644 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2645 adapter->tx_itr = low_latency;
2646
2647 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2648 adapter->total_rx_packets,
2649 adapter->total_rx_bytes);
2650
2651 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2652 adapter->rx_itr = low_latency;
2653
2654 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2655
2656 switch (current_itr) {
2657
2658 case lowest_latency:
2659 new_itr = 70000;
2660 break;
2661 case low_latency:
2662 new_itr = 20000;
2663 break;
2664 case bulk_latency:
2665 new_itr = 4000;
2666 break;
2667 default:
2668 break;
2669 }
2670
2671 set_itr_now:
2672 if (new_itr != adapter->itr) {
2673
2674
2675
2676
2677 new_itr = new_itr > adapter->itr ?
2678 min(adapter->itr + (new_itr >> 2), new_itr) :
2679 new_itr;
2680 adapter->itr = new_itr;
2681 ew32(ITR, 1000000000 / (new_itr * 256));
2682 }
2683 }
2684
2685 #define E1000_TX_FLAGS_CSUM 0x00000001
2686 #define E1000_TX_FLAGS_VLAN 0x00000002
2687 #define E1000_TX_FLAGS_TSO 0x00000004
2688 #define E1000_TX_FLAGS_IPV4 0x00000008
2689 #define E1000_TX_FLAGS_NO_FCS 0x00000010
2690 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2691 #define E1000_TX_FLAGS_VLAN_SHIFT 16
2692
2693 static int e1000_tso(struct e1000_adapter *adapter,
2694 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2695 __be16 protocol)
2696 {
2697 struct e1000_context_desc *context_desc;
2698 struct e1000_tx_buffer *buffer_info;
2699 unsigned int i;
2700 u32 cmd_length = 0;
2701 u16 ipcse = 0, tucse, mss;
2702 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2703
2704 if (skb_is_gso(skb)) {
2705 int err;
2706
2707 err = skb_cow_head(skb, 0);
2708 if (err < 0)
2709 return err;
2710
2711 hdr_len = skb_tcp_all_headers(skb);
2712 mss = skb_shinfo(skb)->gso_size;
2713 if (protocol == htons(ETH_P_IP)) {
2714 struct iphdr *iph = ip_hdr(skb);
2715 iph->tot_len = 0;
2716 iph->check = 0;
2717 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2718 iph->daddr, 0,
2719 IPPROTO_TCP,
2720 0);
2721 cmd_length = E1000_TXD_CMD_IP;
2722 ipcse = skb_transport_offset(skb) - 1;
2723 } else if (skb_is_gso_v6(skb)) {
2724 tcp_v6_gso_csum_prep(skb);
2725 ipcse = 0;
2726 }
2727 ipcss = skb_network_offset(skb);
2728 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2729 tucss = skb_transport_offset(skb);
2730 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2731 tucse = 0;
2732
2733 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2734 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2735
2736 i = tx_ring->next_to_use;
2737 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2738 buffer_info = &tx_ring->buffer_info[i];
2739
2740 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2741 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2742 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2743 context_desc->upper_setup.tcp_fields.tucss = tucss;
2744 context_desc->upper_setup.tcp_fields.tucso = tucso;
2745 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2746 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2747 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2748 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2749
2750 buffer_info->time_stamp = jiffies;
2751 buffer_info->next_to_watch = i;
2752
2753 if (++i == tx_ring->count)
2754 i = 0;
2755
2756 tx_ring->next_to_use = i;
2757
2758 return true;
2759 }
2760 return false;
2761 }
2762
2763 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2764 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2765 __be16 protocol)
2766 {
2767 struct e1000_context_desc *context_desc;
2768 struct e1000_tx_buffer *buffer_info;
2769 unsigned int i;
2770 u8 css;
2771 u32 cmd_len = E1000_TXD_CMD_DEXT;
2772
2773 if (skb->ip_summed != CHECKSUM_PARTIAL)
2774 return false;
2775
2776 switch (protocol) {
2777 case cpu_to_be16(ETH_P_IP):
2778 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2779 cmd_len |= E1000_TXD_CMD_TCP;
2780 break;
2781 case cpu_to_be16(ETH_P_IPV6):
2782
2783 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2784 cmd_len |= E1000_TXD_CMD_TCP;
2785 break;
2786 default:
2787 if (unlikely(net_ratelimit()))
2788 e_warn(drv, "checksum_partial proto=%x!\n",
2789 skb->protocol);
2790 break;
2791 }
2792
2793 css = skb_checksum_start_offset(skb);
2794
2795 i = tx_ring->next_to_use;
2796 buffer_info = &tx_ring->buffer_info[i];
2797 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2798
2799 context_desc->lower_setup.ip_config = 0;
2800 context_desc->upper_setup.tcp_fields.tucss = css;
2801 context_desc->upper_setup.tcp_fields.tucso =
2802 css + skb->csum_offset;
2803 context_desc->upper_setup.tcp_fields.tucse = 0;
2804 context_desc->tcp_seg_setup.data = 0;
2805 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2806
2807 buffer_info->time_stamp = jiffies;
2808 buffer_info->next_to_watch = i;
2809
2810 if (unlikely(++i == tx_ring->count))
2811 i = 0;
2812
2813 tx_ring->next_to_use = i;
2814
2815 return true;
2816 }
2817
2818 #define E1000_MAX_TXD_PWR 12
2819 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2820
2821 static int e1000_tx_map(struct e1000_adapter *adapter,
2822 struct e1000_tx_ring *tx_ring,
2823 struct sk_buff *skb, unsigned int first,
2824 unsigned int max_per_txd, unsigned int nr_frags,
2825 unsigned int mss)
2826 {
2827 struct e1000_hw *hw = &adapter->hw;
2828 struct pci_dev *pdev = adapter->pdev;
2829 struct e1000_tx_buffer *buffer_info;
2830 unsigned int len = skb_headlen(skb);
2831 unsigned int offset = 0, size, count = 0, i;
2832 unsigned int f, bytecount, segs;
2833
2834 i = tx_ring->next_to_use;
2835
2836 while (len) {
2837 buffer_info = &tx_ring->buffer_info[i];
2838 size = min(len, max_per_txd);
2839
2840
2841
2842
2843
2844 if (!skb->data_len && tx_ring->last_tx_tso &&
2845 !skb_is_gso(skb)) {
2846 tx_ring->last_tx_tso = false;
2847 size -= 4;
2848 }
2849
2850
2851
2852
2853 if (unlikely(mss && !nr_frags && size == len && size > 8))
2854 size -= 4;
2855
2856
2857
2858
2859
2860 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2861 (size > 2015) && count == 0))
2862 size = 2015;
2863
2864
2865
2866
2867 if (unlikely(adapter->pcix_82544 &&
2868 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2869 size > 4))
2870 size -= 4;
2871
2872 buffer_info->length = size;
2873
2874 buffer_info->time_stamp = jiffies;
2875 buffer_info->mapped_as_page = false;
2876 buffer_info->dma = dma_map_single(&pdev->dev,
2877 skb->data + offset,
2878 size, DMA_TO_DEVICE);
2879 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2880 goto dma_error;
2881 buffer_info->next_to_watch = i;
2882
2883 len -= size;
2884 offset += size;
2885 count++;
2886 if (len) {
2887 i++;
2888 if (unlikely(i == tx_ring->count))
2889 i = 0;
2890 }
2891 }
2892
2893 for (f = 0; f < nr_frags; f++) {
2894 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2895
2896 len = skb_frag_size(frag);
2897 offset = 0;
2898
2899 while (len) {
2900 unsigned long bufend;
2901 i++;
2902 if (unlikely(i == tx_ring->count))
2903 i = 0;
2904
2905 buffer_info = &tx_ring->buffer_info[i];
2906 size = min(len, max_per_txd);
2907
2908
2909
2910 if (unlikely(mss && f == (nr_frags-1) &&
2911 size == len && size > 8))
2912 size -= 4;
2913
2914
2915
2916
2917 bufend = (unsigned long)
2918 page_to_phys(skb_frag_page(frag));
2919 bufend += offset + size - 1;
2920 if (unlikely(adapter->pcix_82544 &&
2921 !(bufend & 4) &&
2922 size > 4))
2923 size -= 4;
2924
2925 buffer_info->length = size;
2926 buffer_info->time_stamp = jiffies;
2927 buffer_info->mapped_as_page = true;
2928 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2929 offset, size, DMA_TO_DEVICE);
2930 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2931 goto dma_error;
2932 buffer_info->next_to_watch = i;
2933
2934 len -= size;
2935 offset += size;
2936 count++;
2937 }
2938 }
2939
2940 segs = skb_shinfo(skb)->gso_segs ?: 1;
2941
2942 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2943
2944 tx_ring->buffer_info[i].skb = skb;
2945 tx_ring->buffer_info[i].segs = segs;
2946 tx_ring->buffer_info[i].bytecount = bytecount;
2947 tx_ring->buffer_info[first].next_to_watch = i;
2948
2949 return count;
2950
2951 dma_error:
2952 dev_err(&pdev->dev, "TX DMA map failed\n");
2953 buffer_info->dma = 0;
2954 if (count)
2955 count--;
2956
2957 while (count--) {
2958 if (i == 0)
2959 i += tx_ring->count;
2960 i--;
2961 buffer_info = &tx_ring->buffer_info[i];
2962 e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
2963 }
2964
2965 return 0;
2966 }
2967
2968 static void e1000_tx_queue(struct e1000_adapter *adapter,
2969 struct e1000_tx_ring *tx_ring, int tx_flags,
2970 int count)
2971 {
2972 struct e1000_tx_desc *tx_desc = NULL;
2973 struct e1000_tx_buffer *buffer_info;
2974 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2975 unsigned int i;
2976
2977 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2978 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2979 E1000_TXD_CMD_TSE;
2980 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2981
2982 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2983 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2984 }
2985
2986 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2987 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2988 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2989 }
2990
2991 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2992 txd_lower |= E1000_TXD_CMD_VLE;
2993 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2994 }
2995
2996 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2997 txd_lower &= ~(E1000_TXD_CMD_IFCS);
2998
2999 i = tx_ring->next_to_use;
3000
3001 while (count--) {
3002 buffer_info = &tx_ring->buffer_info[i];
3003 tx_desc = E1000_TX_DESC(*tx_ring, i);
3004 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3005 tx_desc->lower.data =
3006 cpu_to_le32(txd_lower | buffer_info->length);
3007 tx_desc->upper.data = cpu_to_le32(txd_upper);
3008 if (unlikely(++i == tx_ring->count))
3009 i = 0;
3010 }
3011
3012 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3013
3014
3015 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3016 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3017
3018
3019
3020
3021
3022
3023 dma_wmb();
3024
3025 tx_ring->next_to_use = i;
3026 }
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036 #define E1000_FIFO_HDR 0x10
3037 #define E1000_82547_PAD_LEN 0x3E0
3038
3039 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3040 struct sk_buff *skb)
3041 {
3042 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3043 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3044
3045 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3046
3047 if (adapter->link_duplex != HALF_DUPLEX)
3048 goto no_fifo_stall_required;
3049
3050 if (atomic_read(&adapter->tx_fifo_stall))
3051 return 1;
3052
3053 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3054 atomic_set(&adapter->tx_fifo_stall, 1);
3055 return 1;
3056 }
3057
3058 no_fifo_stall_required:
3059 adapter->tx_fifo_head += skb_fifo_len;
3060 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3061 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3062 return 0;
3063 }
3064
3065 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3066 {
3067 struct e1000_adapter *adapter = netdev_priv(netdev);
3068 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3069
3070 netif_stop_queue(netdev);
3071
3072
3073
3074
3075 smp_mb();
3076
3077
3078
3079
3080 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3081 return -EBUSY;
3082
3083
3084 netif_start_queue(netdev);
3085 ++adapter->restart_queue;
3086 return 0;
3087 }
3088
3089 static int e1000_maybe_stop_tx(struct net_device *netdev,
3090 struct e1000_tx_ring *tx_ring, int size)
3091 {
3092 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3093 return 0;
3094 return __e1000_maybe_stop_tx(netdev, size);
3095 }
3096
3097 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3098 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3099 struct net_device *netdev)
3100 {
3101 struct e1000_adapter *adapter = netdev_priv(netdev);
3102 struct e1000_hw *hw = &adapter->hw;
3103 struct e1000_tx_ring *tx_ring;
3104 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3105 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3106 unsigned int tx_flags = 0;
3107 unsigned int len = skb_headlen(skb);
3108 unsigned int nr_frags;
3109 unsigned int mss;
3110 int count = 0;
3111 int tso;
3112 unsigned int f;
3113 __be16 protocol = vlan_get_protocol(skb);
3114
3115
3116
3117
3118
3119
3120 tx_ring = adapter->tx_ring;
3121
3122
3123
3124
3125
3126 if (eth_skb_pad(skb))
3127 return NETDEV_TX_OK;
3128
3129 mss = skb_shinfo(skb)->gso_size;
3130
3131
3132
3133
3134
3135
3136
3137 if (mss) {
3138 u8 hdr_len;
3139 max_per_txd = min(mss << 2, max_per_txd);
3140 max_txd_pwr = fls(max_per_txd) - 1;
3141
3142 hdr_len = skb_tcp_all_headers(skb);
3143 if (skb->data_len && hdr_len == len) {
3144 switch (hw->mac_type) {
3145 case e1000_82544: {
3146 unsigned int pull_size;
3147
3148
3149
3150
3151
3152
3153
3154
3155 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3156 & 4)
3157 break;
3158 pull_size = min((unsigned int)4, skb->data_len);
3159 if (!__pskb_pull_tail(skb, pull_size)) {
3160 e_err(drv, "__pskb_pull_tail "
3161 "failed.\n");
3162 dev_kfree_skb_any(skb);
3163 return NETDEV_TX_OK;
3164 }
3165 len = skb_headlen(skb);
3166 break;
3167 }
3168 default:
3169
3170 break;
3171 }
3172 }
3173 }
3174
3175
3176 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3177 count++;
3178 count++;
3179
3180
3181 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3182 count++;
3183
3184 count += TXD_USE_COUNT(len, max_txd_pwr);
3185
3186 if (adapter->pcix_82544)
3187 count++;
3188
3189
3190
3191
3192 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3193 (len > 2015)))
3194 count++;
3195
3196 nr_frags = skb_shinfo(skb)->nr_frags;
3197 for (f = 0; f < nr_frags; f++)
3198 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3199 max_txd_pwr);
3200 if (adapter->pcix_82544)
3201 count += nr_frags;
3202
3203
3204
3205
3206 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3207 return NETDEV_TX_BUSY;
3208
3209 if (unlikely((hw->mac_type == e1000_82547) &&
3210 (e1000_82547_fifo_workaround(adapter, skb)))) {
3211 netif_stop_queue(netdev);
3212 if (!test_bit(__E1000_DOWN, &adapter->flags))
3213 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3214 return NETDEV_TX_BUSY;
3215 }
3216
3217 if (skb_vlan_tag_present(skb)) {
3218 tx_flags |= E1000_TX_FLAGS_VLAN;
3219 tx_flags |= (skb_vlan_tag_get(skb) <<
3220 E1000_TX_FLAGS_VLAN_SHIFT);
3221 }
3222
3223 first = tx_ring->next_to_use;
3224
3225 tso = e1000_tso(adapter, tx_ring, skb, protocol);
3226 if (tso < 0) {
3227 dev_kfree_skb_any(skb);
3228 return NETDEV_TX_OK;
3229 }
3230
3231 if (likely(tso)) {
3232 if (likely(hw->mac_type != e1000_82544))
3233 tx_ring->last_tx_tso = true;
3234 tx_flags |= E1000_TX_FLAGS_TSO;
3235 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3236 tx_flags |= E1000_TX_FLAGS_CSUM;
3237
3238 if (protocol == htons(ETH_P_IP))
3239 tx_flags |= E1000_TX_FLAGS_IPV4;
3240
3241 if (unlikely(skb->no_fcs))
3242 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3243
3244 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3245 nr_frags, mss);
3246
3247 if (count) {
3248
3249
3250
3251
3252
3253
3254
3255 int desc_needed = MAX_SKB_FRAGS + 7;
3256
3257 netdev_sent_queue(netdev, skb->len);
3258 skb_tx_timestamp(skb);
3259
3260 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3261
3262
3263
3264
3265
3266 if (adapter->pcix_82544)
3267 desc_needed += MAX_SKB_FRAGS + 1;
3268
3269
3270 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3271
3272 if (!netdev_xmit_more() ||
3273 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3274 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3275 }
3276 } else {
3277 dev_kfree_skb_any(skb);
3278 tx_ring->buffer_info[first].time_stamp = 0;
3279 tx_ring->next_to_use = first;
3280 }
3281
3282 return NETDEV_TX_OK;
3283 }
3284
3285 #define NUM_REGS 38
3286 static void e1000_regdump(struct e1000_adapter *adapter)
3287 {
3288 struct e1000_hw *hw = &adapter->hw;
3289 u32 regs[NUM_REGS];
3290 u32 *regs_buff = regs;
3291 int i = 0;
3292
3293 static const char * const reg_name[] = {
3294 "CTRL", "STATUS",
3295 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3296 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3297 "TIDV", "TXDCTL", "TADV", "TARC0",
3298 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3299 "TXDCTL1", "TARC1",
3300 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3301 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3302 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3303 };
3304
3305 regs_buff[0] = er32(CTRL);
3306 regs_buff[1] = er32(STATUS);
3307
3308 regs_buff[2] = er32(RCTL);
3309 regs_buff[3] = er32(RDLEN);
3310 regs_buff[4] = er32(RDH);
3311 regs_buff[5] = er32(RDT);
3312 regs_buff[6] = er32(RDTR);
3313
3314 regs_buff[7] = er32(TCTL);
3315 regs_buff[8] = er32(TDBAL);
3316 regs_buff[9] = er32(TDBAH);
3317 regs_buff[10] = er32(TDLEN);
3318 regs_buff[11] = er32(TDH);
3319 regs_buff[12] = er32(TDT);
3320 regs_buff[13] = er32(TIDV);
3321 regs_buff[14] = er32(TXDCTL);
3322 regs_buff[15] = er32(TADV);
3323 regs_buff[16] = er32(TARC0);
3324
3325 regs_buff[17] = er32(TDBAL1);
3326 regs_buff[18] = er32(TDBAH1);
3327 regs_buff[19] = er32(TDLEN1);
3328 regs_buff[20] = er32(TDH1);
3329 regs_buff[21] = er32(TDT1);
3330 regs_buff[22] = er32(TXDCTL1);
3331 regs_buff[23] = er32(TARC1);
3332 regs_buff[24] = er32(CTRL_EXT);
3333 regs_buff[25] = er32(ERT);
3334 regs_buff[26] = er32(RDBAL0);
3335 regs_buff[27] = er32(RDBAH0);
3336 regs_buff[28] = er32(TDFH);
3337 regs_buff[29] = er32(TDFT);
3338 regs_buff[30] = er32(TDFHS);
3339 regs_buff[31] = er32(TDFTS);
3340 regs_buff[32] = er32(TDFPC);
3341 regs_buff[33] = er32(RDFH);
3342 regs_buff[34] = er32(RDFT);
3343 regs_buff[35] = er32(RDFHS);
3344 regs_buff[36] = er32(RDFTS);
3345 regs_buff[37] = er32(RDFPC);
3346
3347 pr_info("Register dump\n");
3348 for (i = 0; i < NUM_REGS; i++)
3349 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
3350 }
3351
3352
3353
3354
3355 static void e1000_dump(struct e1000_adapter *adapter)
3356 {
3357
3358 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3359 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3360 int i;
3361
3362 if (!netif_msg_hw(adapter))
3363 return;
3364
3365
3366 e1000_regdump(adapter);
3367
3368
3369 pr_info("TX Desc ring0 dump\n");
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3399 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
3400
3401 if (!netif_msg_tx_done(adapter))
3402 goto rx_ring_summary;
3403
3404 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3405 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3406 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3407 struct my_u { __le64 a; __le64 b; };
3408 struct my_u *u = (struct my_u *)tx_desc;
3409 const char *type;
3410
3411 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3412 type = "NTC/U";
3413 else if (i == tx_ring->next_to_use)
3414 type = "NTU";
3415 else if (i == tx_ring->next_to_clean)
3416 type = "NTC";
3417 else
3418 type = "";
3419
3420 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3421 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3422 le64_to_cpu(u->a), le64_to_cpu(u->b),
3423 (u64)buffer_info->dma, buffer_info->length,
3424 buffer_info->next_to_watch,
3425 (u64)buffer_info->time_stamp, buffer_info->skb, type);
3426 }
3427
3428 rx_ring_summary:
3429
3430 pr_info("\nRX Desc ring dump\n");
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
3442
3443 if (!netif_msg_rx_status(adapter))
3444 goto exit;
3445
3446 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3447 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3448 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3449 struct my_u { __le64 a; __le64 b; };
3450 struct my_u *u = (struct my_u *)rx_desc;
3451 const char *type;
3452
3453 if (i == rx_ring->next_to_use)
3454 type = "NTU";
3455 else if (i == rx_ring->next_to_clean)
3456 type = "NTC";
3457 else
3458 type = "";
3459
3460 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3461 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3462 (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3463 }
3464
3465
3466
3467 pr_info("Rx descriptor cache in 64bit format\n");
3468 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3469 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3470 i,
3471 readl(adapter->hw.hw_addr + i+4),
3472 readl(adapter->hw.hw_addr + i),
3473 readl(adapter->hw.hw_addr + i+12),
3474 readl(adapter->hw.hw_addr + i+8));
3475 }
3476
3477 pr_info("Tx descriptor cache in 64bit format\n");
3478 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3479 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3480 i,
3481 readl(adapter->hw.hw_addr + i+4),
3482 readl(adapter->hw.hw_addr + i),
3483 readl(adapter->hw.hw_addr + i+12),
3484 readl(adapter->hw.hw_addr + i+8));
3485 }
3486 exit:
3487 return;
3488 }
3489
3490
3491
3492
3493
3494
3495 static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
3496 {
3497 struct e1000_adapter *adapter = netdev_priv(netdev);
3498
3499
3500 adapter->tx_timeout_count++;
3501 schedule_work(&adapter->reset_task);
3502 }
3503
3504 static void e1000_reset_task(struct work_struct *work)
3505 {
3506 struct e1000_adapter *adapter =
3507 container_of(work, struct e1000_adapter, reset_task);
3508
3509 e_err(drv, "Reset adapter\n");
3510 e1000_reinit_locked(adapter);
3511 }
3512
3513
3514
3515
3516
3517
3518
3519
3520 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3521 {
3522 struct e1000_adapter *adapter = netdev_priv(netdev);
3523 struct e1000_hw *hw = &adapter->hw;
3524 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3525
3526
3527 switch (hw->mac_type) {
3528 case e1000_undefined ... e1000_82542_rev2_1:
3529 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3530 e_err(probe, "Jumbo Frames not supported.\n");
3531 return -EINVAL;
3532 }
3533 break;
3534 default:
3535
3536 break;
3537 }
3538
3539 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3540 msleep(1);
3541
3542 hw->max_frame_size = max_frame;
3543 if (netif_running(netdev)) {
3544
3545 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3546 e1000_down(adapter);
3547 }
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557 if (max_frame <= E1000_RXBUFFER_2048)
3558 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3559 else
3560 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3561 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3562 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3563 adapter->rx_buffer_len = PAGE_SIZE;
3564 #endif
3565
3566
3567 if (!hw->tbi_compatibility_on &&
3568 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3569 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3570 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3571
3572 netdev_dbg(netdev, "changing MTU from %d to %d\n",
3573 netdev->mtu, new_mtu);
3574 netdev->mtu = new_mtu;
3575
3576 if (netif_running(netdev))
3577 e1000_up(adapter);
3578 else
3579 e1000_reset(adapter);
3580
3581 clear_bit(__E1000_RESETTING, &adapter->flags);
3582
3583 return 0;
3584 }
3585
3586
3587
3588
3589
3590 void e1000_update_stats(struct e1000_adapter *adapter)
3591 {
3592 struct net_device *netdev = adapter->netdev;
3593 struct e1000_hw *hw = &adapter->hw;
3594 struct pci_dev *pdev = adapter->pdev;
3595 unsigned long flags;
3596 u16 phy_tmp;
3597
3598 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3599
3600
3601
3602
3603 if (adapter->link_speed == 0)
3604 return;
3605 if (pci_channel_offline(pdev))
3606 return;
3607
3608 spin_lock_irqsave(&adapter->stats_lock, flags);
3609
3610
3611
3612
3613
3614
3615 adapter->stats.crcerrs += er32(CRCERRS);
3616 adapter->stats.gprc += er32(GPRC);
3617 adapter->stats.gorcl += er32(GORCL);
3618 adapter->stats.gorch += er32(GORCH);
3619 adapter->stats.bprc += er32(BPRC);
3620 adapter->stats.mprc += er32(MPRC);
3621 adapter->stats.roc += er32(ROC);
3622
3623 adapter->stats.prc64 += er32(PRC64);
3624 adapter->stats.prc127 += er32(PRC127);
3625 adapter->stats.prc255 += er32(PRC255);
3626 adapter->stats.prc511 += er32(PRC511);
3627 adapter->stats.prc1023 += er32(PRC1023);
3628 adapter->stats.prc1522 += er32(PRC1522);
3629
3630 adapter->stats.symerrs += er32(SYMERRS);
3631 adapter->stats.mpc += er32(MPC);
3632 adapter->stats.scc += er32(SCC);
3633 adapter->stats.ecol += er32(ECOL);
3634 adapter->stats.mcc += er32(MCC);
3635 adapter->stats.latecol += er32(LATECOL);
3636 adapter->stats.dc += er32(DC);
3637 adapter->stats.sec += er32(SEC);
3638 adapter->stats.rlec += er32(RLEC);
3639 adapter->stats.xonrxc += er32(XONRXC);
3640 adapter->stats.xontxc += er32(XONTXC);
3641 adapter->stats.xoffrxc += er32(XOFFRXC);
3642 adapter->stats.xofftxc += er32(XOFFTXC);
3643 adapter->stats.fcruc += er32(FCRUC);
3644 adapter->stats.gptc += er32(GPTC);
3645 adapter->stats.gotcl += er32(GOTCL);
3646 adapter->stats.gotch += er32(GOTCH);
3647 adapter->stats.rnbc += er32(RNBC);
3648 adapter->stats.ruc += er32(RUC);
3649 adapter->stats.rfc += er32(RFC);
3650 adapter->stats.rjc += er32(RJC);
3651 adapter->stats.torl += er32(TORL);
3652 adapter->stats.torh += er32(TORH);
3653 adapter->stats.totl += er32(TOTL);
3654 adapter->stats.toth += er32(TOTH);
3655 adapter->stats.tpr += er32(TPR);
3656
3657 adapter->stats.ptc64 += er32(PTC64);
3658 adapter->stats.ptc127 += er32(PTC127);
3659 adapter->stats.ptc255 += er32(PTC255);
3660 adapter->stats.ptc511 += er32(PTC511);
3661 adapter->stats.ptc1023 += er32(PTC1023);
3662 adapter->stats.ptc1522 += er32(PTC1522);
3663
3664 adapter->stats.mptc += er32(MPTC);
3665 adapter->stats.bptc += er32(BPTC);
3666
3667
3668
3669 hw->tx_packet_delta = er32(TPT);
3670 adapter->stats.tpt += hw->tx_packet_delta;
3671 hw->collision_delta = er32(COLC);
3672 adapter->stats.colc += hw->collision_delta;
3673
3674 if (hw->mac_type >= e1000_82543) {
3675 adapter->stats.algnerrc += er32(ALGNERRC);
3676 adapter->stats.rxerrc += er32(RXERRC);
3677 adapter->stats.tncrs += er32(TNCRS);
3678 adapter->stats.cexterr += er32(CEXTERR);
3679 adapter->stats.tsctc += er32(TSCTC);
3680 adapter->stats.tsctfc += er32(TSCTFC);
3681 }
3682
3683
3684 netdev->stats.multicast = adapter->stats.mprc;
3685 netdev->stats.collisions = adapter->stats.colc;
3686
3687
3688
3689
3690
3691
3692 netdev->stats.rx_errors = adapter->stats.rxerrc +
3693 adapter->stats.crcerrs + adapter->stats.algnerrc +
3694 adapter->stats.ruc + adapter->stats.roc +
3695 adapter->stats.cexterr;
3696 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3697 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3698 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3699 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3700 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3701
3702
3703 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3704 netdev->stats.tx_errors = adapter->stats.txerrc;
3705 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3706 netdev->stats.tx_window_errors = adapter->stats.latecol;
3707 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3708 if (hw->bad_tx_carr_stats_fd &&
3709 adapter->link_duplex == FULL_DUPLEX) {
3710 netdev->stats.tx_carrier_errors = 0;
3711 adapter->stats.tncrs = 0;
3712 }
3713
3714
3715
3716
3717 if (hw->media_type == e1000_media_type_copper) {
3718 if ((adapter->link_speed == SPEED_1000) &&
3719 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3720 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3721 adapter->phy_stats.idle_errors += phy_tmp;
3722 }
3723
3724 if ((hw->mac_type <= e1000_82546) &&
3725 (hw->phy_type == e1000_phy_m88) &&
3726 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3727 adapter->phy_stats.receive_errors += phy_tmp;
3728 }
3729
3730
3731 if (hw->has_smbus) {
3732 adapter->stats.mgptc += er32(MGTPTC);
3733 adapter->stats.mgprc += er32(MGTPRC);
3734 adapter->stats.mgpdc += er32(MGTPDC);
3735 }
3736
3737 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3738 }
3739
3740
3741
3742
3743
3744
3745 static irqreturn_t e1000_intr(int irq, void *data)
3746 {
3747 struct net_device *netdev = data;
3748 struct e1000_adapter *adapter = netdev_priv(netdev);
3749 struct e1000_hw *hw = &adapter->hw;
3750 u32 icr = er32(ICR);
3751
3752 if (unlikely((!icr)))
3753 return IRQ_NONE;
3754
3755
3756
3757
3758
3759 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3760 return IRQ_HANDLED;
3761
3762 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3763 hw->get_link_status = 1;
3764
3765 if (!test_bit(__E1000_DOWN, &adapter->flags))
3766 schedule_delayed_work(&adapter->watchdog_task, 1);
3767 }
3768
3769
3770 ew32(IMC, ~0);
3771 E1000_WRITE_FLUSH();
3772
3773 if (likely(napi_schedule_prep(&adapter->napi))) {
3774 adapter->total_tx_bytes = 0;
3775 adapter->total_tx_packets = 0;
3776 adapter->total_rx_bytes = 0;
3777 adapter->total_rx_packets = 0;
3778 __napi_schedule(&adapter->napi);
3779 } else {
3780
3781
3782
3783 if (!test_bit(__E1000_DOWN, &adapter->flags))
3784 e1000_irq_enable(adapter);
3785 }
3786
3787 return IRQ_HANDLED;
3788 }
3789
3790
3791
3792
3793
3794
3795 static int e1000_clean(struct napi_struct *napi, int budget)
3796 {
3797 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3798 napi);
3799 int tx_clean_complete = 0, work_done = 0;
3800
3801 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3802
3803 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3804
3805 if (!tx_clean_complete || work_done == budget)
3806 return budget;
3807
3808
3809
3810
3811 if (likely(napi_complete_done(napi, work_done))) {
3812 if (likely(adapter->itr_setting & 3))
3813 e1000_set_itr(adapter);
3814 if (!test_bit(__E1000_DOWN, &adapter->flags))
3815 e1000_irq_enable(adapter);
3816 }
3817
3818 return work_done;
3819 }
3820
3821
3822
3823
3824
3825
3826 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3827 struct e1000_tx_ring *tx_ring)
3828 {
3829 struct e1000_hw *hw = &adapter->hw;
3830 struct net_device *netdev = adapter->netdev;
3831 struct e1000_tx_desc *tx_desc, *eop_desc;
3832 struct e1000_tx_buffer *buffer_info;
3833 unsigned int i, eop;
3834 unsigned int count = 0;
3835 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3836 unsigned int bytes_compl = 0, pkts_compl = 0;
3837
3838 i = tx_ring->next_to_clean;
3839 eop = tx_ring->buffer_info[i].next_to_watch;
3840 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3841
3842 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3843 (count < tx_ring->count)) {
3844 bool cleaned = false;
3845 dma_rmb();
3846 for ( ; !cleaned; count++) {
3847 tx_desc = E1000_TX_DESC(*tx_ring, i);
3848 buffer_info = &tx_ring->buffer_info[i];
3849 cleaned = (i == eop);
3850
3851 if (cleaned) {
3852 total_tx_packets += buffer_info->segs;
3853 total_tx_bytes += buffer_info->bytecount;
3854 if (buffer_info->skb) {
3855 bytes_compl += buffer_info->skb->len;
3856 pkts_compl++;
3857 }
3858
3859 }
3860 e1000_unmap_and_free_tx_resource(adapter, buffer_info,
3861 64);
3862 tx_desc->upper.data = 0;
3863
3864 if (unlikely(++i == tx_ring->count))
3865 i = 0;
3866 }
3867
3868 eop = tx_ring->buffer_info[i].next_to_watch;
3869 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3870 }
3871
3872
3873
3874
3875 smp_store_release(&tx_ring->next_to_clean, i);
3876
3877 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3878
3879 #define TX_WAKE_THRESHOLD 32
3880 if (unlikely(count && netif_carrier_ok(netdev) &&
3881 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3882
3883
3884
3885 smp_mb();
3886
3887 if (netif_queue_stopped(netdev) &&
3888 !(test_bit(__E1000_DOWN, &adapter->flags))) {
3889 netif_wake_queue(netdev);
3890 ++adapter->restart_queue;
3891 }
3892 }
3893
3894 if (adapter->detect_tx_hung) {
3895
3896
3897
3898 adapter->detect_tx_hung = false;
3899 if (tx_ring->buffer_info[eop].time_stamp &&
3900 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3901 (adapter->tx_timeout_factor * HZ)) &&
3902 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3903
3904
3905 e_err(drv, "Detected Tx Unit Hang\n"
3906 " Tx Queue <%lu>\n"
3907 " TDH <%x>\n"
3908 " TDT <%x>\n"
3909 " next_to_use <%x>\n"
3910 " next_to_clean <%x>\n"
3911 "buffer_info[next_to_clean]\n"
3912 " time_stamp <%lx>\n"
3913 " next_to_watch <%x>\n"
3914 " jiffies <%lx>\n"
3915 " next_to_watch.status <%x>\n",
3916 (unsigned long)(tx_ring - adapter->tx_ring),
3917 readl(hw->hw_addr + tx_ring->tdh),
3918 readl(hw->hw_addr + tx_ring->tdt),
3919 tx_ring->next_to_use,
3920 tx_ring->next_to_clean,
3921 tx_ring->buffer_info[eop].time_stamp,
3922 eop,
3923 jiffies,
3924 eop_desc->upper.fields.status);
3925 e1000_dump(adapter);
3926 netif_stop_queue(netdev);
3927 }
3928 }
3929 adapter->total_tx_bytes += total_tx_bytes;
3930 adapter->total_tx_packets += total_tx_packets;
3931 netdev->stats.tx_bytes += total_tx_bytes;
3932 netdev->stats.tx_packets += total_tx_packets;
3933 return count < tx_ring->count;
3934 }
3935
3936
3937
3938
3939
3940
3941
3942
3943 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3944 u32 csum, struct sk_buff *skb)
3945 {
3946 struct e1000_hw *hw = &adapter->hw;
3947 u16 status = (u16)status_err;
3948 u8 errors = (u8)(status_err >> 24);
3949
3950 skb_checksum_none_assert(skb);
3951
3952
3953 if (unlikely(hw->mac_type < e1000_82543))
3954 return;
3955
3956 if (unlikely(status & E1000_RXD_STAT_IXSM))
3957 return;
3958
3959 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3960
3961 adapter->hw_csum_err++;
3962 return;
3963 }
3964
3965 if (!(status & E1000_RXD_STAT_TCPCS))
3966 return;
3967
3968
3969 if (likely(status & E1000_RXD_STAT_TCPCS)) {
3970
3971 skb->ip_summed = CHECKSUM_UNNECESSARY;
3972 }
3973 adapter->hw_csum_good++;
3974 }
3975
3976
3977
3978
3979
3980
3981
3982 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3983 u16 length)
3984 {
3985 bi->rxbuf.page = NULL;
3986 skb->len += length;
3987 skb->data_len += length;
3988 skb->truesize += PAGE_SIZE;
3989 }
3990
3991
3992
3993
3994
3995
3996
3997
3998 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3999 __le16 vlan, struct sk_buff *skb)
4000 {
4001 skb->protocol = eth_type_trans(skb, adapter->netdev);
4002
4003 if (status & E1000_RXD_STAT_VP) {
4004 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4005
4006 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4007 }
4008 napi_gro_receive(&adapter->napi, skb);
4009 }
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020 static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4021 struct e1000_hw_stats *stats,
4022 u32 frame_len, const u8 *mac_addr)
4023 {
4024 u64 carry_bit;
4025
4026
4027 frame_len--;
4028
4029
4030
4031
4032
4033 stats->crcerrs--;
4034
4035 stats->gprc++;
4036
4037
4038 carry_bit = 0x80000000 & stats->gorcl;
4039 stats->gorcl += frame_len;
4040
4041
4042
4043
4044
4045
4046
4047 if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4048 stats->gorch++;
4049
4050
4051
4052
4053 if (is_broadcast_ether_addr(mac_addr))
4054 stats->bprc++;
4055 else if (is_multicast_ether_addr(mac_addr))
4056 stats->mprc++;
4057
4058 if (frame_len == hw->max_frame_size) {
4059
4060
4061
4062 if (stats->roc > 0)
4063 stats->roc--;
4064 }
4065
4066
4067
4068
4069 if (frame_len == 64) {
4070 stats->prc64++;
4071 stats->prc127--;
4072 } else if (frame_len == 127) {
4073 stats->prc127++;
4074 stats->prc255--;
4075 } else if (frame_len == 255) {
4076 stats->prc255++;
4077 stats->prc511--;
4078 } else if (frame_len == 511) {
4079 stats->prc511++;
4080 stats->prc1023--;
4081 } else if (frame_len == 1023) {
4082 stats->prc1023++;
4083 stats->prc1522--;
4084 } else if (frame_len == 1522) {
4085 stats->prc1522++;
4086 }
4087 }
4088
4089 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4090 u8 status, u8 errors,
4091 u32 length, const u8 *data)
4092 {
4093 struct e1000_hw *hw = &adapter->hw;
4094 u8 last_byte = *(data + length - 1);
4095
4096 if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4097 unsigned long irq_flags;
4098
4099 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4100 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4101 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4102
4103 return true;
4104 }
4105
4106 return false;
4107 }
4108
4109 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4110 unsigned int bufsz)
4111 {
4112 struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4113
4114 if (unlikely(!skb))
4115 adapter->alloc_rx_buff_failed++;
4116 return skb;
4117 }
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4130 struct e1000_rx_ring *rx_ring,
4131 int *work_done, int work_to_do)
4132 {
4133 struct net_device *netdev = adapter->netdev;
4134 struct pci_dev *pdev = adapter->pdev;
4135 struct e1000_rx_desc *rx_desc, *next_rxd;
4136 struct e1000_rx_buffer *buffer_info, *next_buffer;
4137 u32 length;
4138 unsigned int i;
4139 int cleaned_count = 0;
4140 bool cleaned = false;
4141 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4142
4143 i = rx_ring->next_to_clean;
4144 rx_desc = E1000_RX_DESC(*rx_ring, i);
4145 buffer_info = &rx_ring->buffer_info[i];
4146
4147 while (rx_desc->status & E1000_RXD_STAT_DD) {
4148 struct sk_buff *skb;
4149 u8 status;
4150
4151 if (*work_done >= work_to_do)
4152 break;
4153 (*work_done)++;
4154 dma_rmb();
4155
4156 status = rx_desc->status;
4157
4158 if (++i == rx_ring->count)
4159 i = 0;
4160
4161 next_rxd = E1000_RX_DESC(*rx_ring, i);
4162 prefetch(next_rxd);
4163
4164 next_buffer = &rx_ring->buffer_info[i];
4165
4166 cleaned = true;
4167 cleaned_count++;
4168 dma_unmap_page(&pdev->dev, buffer_info->dma,
4169 adapter->rx_buffer_len, DMA_FROM_DEVICE);
4170 buffer_info->dma = 0;
4171
4172 length = le16_to_cpu(rx_desc->length);
4173
4174
4175 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4176 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4177 u8 *mapped = page_address(buffer_info->rxbuf.page);
4178
4179 if (e1000_tbi_should_accept(adapter, status,
4180 rx_desc->errors,
4181 length, mapped)) {
4182 length--;
4183 } else if (netdev->features & NETIF_F_RXALL) {
4184 goto process_skb;
4185 } else {
4186
4187
4188
4189 dev_kfree_skb(rx_ring->rx_skb_top);
4190 rx_ring->rx_skb_top = NULL;
4191 goto next_desc;
4192 }
4193 }
4194
4195 #define rxtop rx_ring->rx_skb_top
4196 process_skb:
4197 if (!(status & E1000_RXD_STAT_EOP)) {
4198
4199 if (!rxtop) {
4200
4201 rxtop = napi_get_frags(&adapter->napi);
4202 if (!rxtop)
4203 break;
4204
4205 skb_fill_page_desc(rxtop, 0,
4206 buffer_info->rxbuf.page,
4207 0, length);
4208 } else {
4209
4210 skb_fill_page_desc(rxtop,
4211 skb_shinfo(rxtop)->nr_frags,
4212 buffer_info->rxbuf.page, 0, length);
4213 }
4214 e1000_consume_page(buffer_info, rxtop, length);
4215 goto next_desc;
4216 } else {
4217 if (rxtop) {
4218
4219 skb_fill_page_desc(rxtop,
4220 skb_shinfo(rxtop)->nr_frags,
4221 buffer_info->rxbuf.page, 0, length);
4222 skb = rxtop;
4223 rxtop = NULL;
4224 e1000_consume_page(buffer_info, skb, length);
4225 } else {
4226 struct page *p;
4227
4228
4229
4230 p = buffer_info->rxbuf.page;
4231 if (length <= copybreak) {
4232 u8 *vaddr;
4233
4234 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4235 length -= 4;
4236 skb = e1000_alloc_rx_skb(adapter,
4237 length);
4238 if (!skb)
4239 break;
4240
4241 vaddr = kmap_atomic(p);
4242 memcpy(skb_tail_pointer(skb), vaddr,
4243 length);
4244 kunmap_atomic(vaddr);
4245
4246
4247
4248 skb_put(skb, length);
4249 e1000_rx_checksum(adapter,
4250 status | rx_desc->errors << 24,
4251 le16_to_cpu(rx_desc->csum), skb);
4252
4253 total_rx_bytes += skb->len;
4254 total_rx_packets++;
4255
4256 e1000_receive_skb(adapter, status,
4257 rx_desc->special, skb);
4258 goto next_desc;
4259 } else {
4260 skb = napi_get_frags(&adapter->napi);
4261 if (!skb) {
4262 adapter->alloc_rx_buff_failed++;
4263 break;
4264 }
4265 skb_fill_page_desc(skb, 0, p, 0,
4266 length);
4267 e1000_consume_page(buffer_info, skb,
4268 length);
4269 }
4270 }
4271 }
4272
4273
4274 e1000_rx_checksum(adapter,
4275 (u32)(status) |
4276 ((u32)(rx_desc->errors) << 24),
4277 le16_to_cpu(rx_desc->csum), skb);
4278
4279 total_rx_bytes += (skb->len - 4);
4280 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4281 pskb_trim(skb, skb->len - 4);
4282 total_rx_packets++;
4283
4284 if (status & E1000_RXD_STAT_VP) {
4285 __le16 vlan = rx_desc->special;
4286 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4287
4288 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4289 }
4290
4291 napi_gro_frags(&adapter->napi);
4292
4293 next_desc:
4294 rx_desc->status = 0;
4295
4296
4297 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4298 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4299 cleaned_count = 0;
4300 }
4301
4302
4303 rx_desc = next_rxd;
4304 buffer_info = next_buffer;
4305 }
4306 rx_ring->next_to_clean = i;
4307
4308 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4309 if (cleaned_count)
4310 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4311
4312 adapter->total_rx_packets += total_rx_packets;
4313 adapter->total_rx_bytes += total_rx_bytes;
4314 netdev->stats.rx_bytes += total_rx_bytes;
4315 netdev->stats.rx_packets += total_rx_packets;
4316 return cleaned;
4317 }
4318
4319
4320
4321
4322 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4323 struct e1000_rx_buffer *buffer_info,
4324 u32 length, const void *data)
4325 {
4326 struct sk_buff *skb;
4327
4328 if (length > copybreak)
4329 return NULL;
4330
4331 skb = e1000_alloc_rx_skb(adapter, length);
4332 if (!skb)
4333 return NULL;
4334
4335 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4336 length, DMA_FROM_DEVICE);
4337
4338 skb_put_data(skb, data, length);
4339
4340 return skb;
4341 }
4342
4343
4344
4345
4346
4347
4348
4349
4350 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4351 struct e1000_rx_ring *rx_ring,
4352 int *work_done, int work_to_do)
4353 {
4354 struct net_device *netdev = adapter->netdev;
4355 struct pci_dev *pdev = adapter->pdev;
4356 struct e1000_rx_desc *rx_desc, *next_rxd;
4357 struct e1000_rx_buffer *buffer_info, *next_buffer;
4358 u32 length;
4359 unsigned int i;
4360 int cleaned_count = 0;
4361 bool cleaned = false;
4362 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4363
4364 i = rx_ring->next_to_clean;
4365 rx_desc = E1000_RX_DESC(*rx_ring, i);
4366 buffer_info = &rx_ring->buffer_info[i];
4367
4368 while (rx_desc->status & E1000_RXD_STAT_DD) {
4369 struct sk_buff *skb;
4370 u8 *data;
4371 u8 status;
4372
4373 if (*work_done >= work_to_do)
4374 break;
4375 (*work_done)++;
4376 dma_rmb();
4377
4378 status = rx_desc->status;
4379 length = le16_to_cpu(rx_desc->length);
4380
4381 data = buffer_info->rxbuf.data;
4382 prefetch(data);
4383 skb = e1000_copybreak(adapter, buffer_info, length, data);
4384 if (!skb) {
4385 unsigned int frag_len = e1000_frag_len(adapter);
4386
4387 skb = napi_build_skb(data - E1000_HEADROOM, frag_len);
4388 if (!skb) {
4389 adapter->alloc_rx_buff_failed++;
4390 break;
4391 }
4392
4393 skb_reserve(skb, E1000_HEADROOM);
4394 dma_unmap_single(&pdev->dev, buffer_info->dma,
4395 adapter->rx_buffer_len,
4396 DMA_FROM_DEVICE);
4397 buffer_info->dma = 0;
4398 buffer_info->rxbuf.data = NULL;
4399 }
4400
4401 if (++i == rx_ring->count)
4402 i = 0;
4403
4404 next_rxd = E1000_RX_DESC(*rx_ring, i);
4405 prefetch(next_rxd);
4406
4407 next_buffer = &rx_ring->buffer_info[i];
4408
4409 cleaned = true;
4410 cleaned_count++;
4411
4412
4413
4414
4415
4416
4417
4418 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4419 adapter->discarding = true;
4420
4421 if (adapter->discarding) {
4422
4423 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4424 dev_kfree_skb(skb);
4425 if (status & E1000_RXD_STAT_EOP)
4426 adapter->discarding = false;
4427 goto next_desc;
4428 }
4429
4430 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4431 if (e1000_tbi_should_accept(adapter, status,
4432 rx_desc->errors,
4433 length, data)) {
4434 length--;
4435 } else if (netdev->features & NETIF_F_RXALL) {
4436 goto process_skb;
4437 } else {
4438 dev_kfree_skb(skb);
4439 goto next_desc;
4440 }
4441 }
4442
4443 process_skb:
4444 total_rx_bytes += (length - 4);
4445 total_rx_packets++;
4446
4447 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4448
4449
4450
4451 length -= 4;
4452
4453 if (buffer_info->rxbuf.data == NULL)
4454 skb_put(skb, length);
4455 else
4456 skb_trim(skb, length);
4457
4458
4459 e1000_rx_checksum(adapter,
4460 (u32)(status) |
4461 ((u32)(rx_desc->errors) << 24),
4462 le16_to_cpu(rx_desc->csum), skb);
4463
4464 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4465
4466 next_desc:
4467 rx_desc->status = 0;
4468
4469
4470 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4471 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4472 cleaned_count = 0;
4473 }
4474
4475
4476 rx_desc = next_rxd;
4477 buffer_info = next_buffer;
4478 }
4479 rx_ring->next_to_clean = i;
4480
4481 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4482 if (cleaned_count)
4483 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4484
4485 adapter->total_rx_packets += total_rx_packets;
4486 adapter->total_rx_bytes += total_rx_bytes;
4487 netdev->stats.rx_bytes += total_rx_bytes;
4488 netdev->stats.rx_packets += total_rx_packets;
4489 return cleaned;
4490 }
4491
4492
4493
4494
4495
4496
4497
4498 static void
4499 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4500 struct e1000_rx_ring *rx_ring, int cleaned_count)
4501 {
4502 struct pci_dev *pdev = adapter->pdev;
4503 struct e1000_rx_desc *rx_desc;
4504 struct e1000_rx_buffer *buffer_info;
4505 unsigned int i;
4506
4507 i = rx_ring->next_to_use;
4508 buffer_info = &rx_ring->buffer_info[i];
4509
4510 while (cleaned_count--) {
4511
4512 if (!buffer_info->rxbuf.page) {
4513 buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4514 if (unlikely(!buffer_info->rxbuf.page)) {
4515 adapter->alloc_rx_buff_failed++;
4516 break;
4517 }
4518 }
4519
4520 if (!buffer_info->dma) {
4521 buffer_info->dma = dma_map_page(&pdev->dev,
4522 buffer_info->rxbuf.page, 0,
4523 adapter->rx_buffer_len,
4524 DMA_FROM_DEVICE);
4525 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4526 put_page(buffer_info->rxbuf.page);
4527 buffer_info->rxbuf.page = NULL;
4528 buffer_info->dma = 0;
4529 adapter->alloc_rx_buff_failed++;
4530 break;
4531 }
4532 }
4533
4534 rx_desc = E1000_RX_DESC(*rx_ring, i);
4535 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4536
4537 if (unlikely(++i == rx_ring->count))
4538 i = 0;
4539 buffer_info = &rx_ring->buffer_info[i];
4540 }
4541
4542 if (likely(rx_ring->next_to_use != i)) {
4543 rx_ring->next_to_use = i;
4544 if (unlikely(i-- == 0))
4545 i = (rx_ring->count - 1);
4546
4547
4548
4549
4550
4551
4552 dma_wmb();
4553 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4554 }
4555 }
4556
4557
4558
4559
4560
4561
4562
4563 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4564 struct e1000_rx_ring *rx_ring,
4565 int cleaned_count)
4566 {
4567 struct e1000_hw *hw = &adapter->hw;
4568 struct pci_dev *pdev = adapter->pdev;
4569 struct e1000_rx_desc *rx_desc;
4570 struct e1000_rx_buffer *buffer_info;
4571 unsigned int i;
4572 unsigned int bufsz = adapter->rx_buffer_len;
4573
4574 i = rx_ring->next_to_use;
4575 buffer_info = &rx_ring->buffer_info[i];
4576
4577 while (cleaned_count--) {
4578 void *data;
4579
4580 if (buffer_info->rxbuf.data)
4581 goto skip;
4582
4583 data = e1000_alloc_frag(adapter);
4584 if (!data) {
4585
4586 adapter->alloc_rx_buff_failed++;
4587 break;
4588 }
4589
4590
4591 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4592 void *olddata = data;
4593 e_err(rx_err, "skb align check failed: %u bytes at "
4594 "%p\n", bufsz, data);
4595
4596 data = e1000_alloc_frag(adapter);
4597
4598 if (!data) {
4599 skb_free_frag(olddata);
4600 adapter->alloc_rx_buff_failed++;
4601 break;
4602 }
4603
4604 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4605
4606 skb_free_frag(data);
4607 skb_free_frag(olddata);
4608 adapter->alloc_rx_buff_failed++;
4609 break;
4610 }
4611
4612
4613 skb_free_frag(olddata);
4614 }
4615 buffer_info->dma = dma_map_single(&pdev->dev,
4616 data,
4617 adapter->rx_buffer_len,
4618 DMA_FROM_DEVICE);
4619 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4620 skb_free_frag(data);
4621 buffer_info->dma = 0;
4622 adapter->alloc_rx_buff_failed++;
4623 break;
4624 }
4625
4626
4627
4628
4629
4630
4631 if (!e1000_check_64k_bound(adapter,
4632 (void *)(unsigned long)buffer_info->dma,
4633 adapter->rx_buffer_len)) {
4634 e_err(rx_err, "dma align check failed: %u bytes at "
4635 "%p\n", adapter->rx_buffer_len,
4636 (void *)(unsigned long)buffer_info->dma);
4637
4638 dma_unmap_single(&pdev->dev, buffer_info->dma,
4639 adapter->rx_buffer_len,
4640 DMA_FROM_DEVICE);
4641
4642 skb_free_frag(data);
4643 buffer_info->rxbuf.data = NULL;
4644 buffer_info->dma = 0;
4645
4646 adapter->alloc_rx_buff_failed++;
4647 break;
4648 }
4649 buffer_info->rxbuf.data = data;
4650 skip:
4651 rx_desc = E1000_RX_DESC(*rx_ring, i);
4652 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4653
4654 if (unlikely(++i == rx_ring->count))
4655 i = 0;
4656 buffer_info = &rx_ring->buffer_info[i];
4657 }
4658
4659 if (likely(rx_ring->next_to_use != i)) {
4660 rx_ring->next_to_use = i;
4661 if (unlikely(i-- == 0))
4662 i = (rx_ring->count - 1);
4663
4664
4665
4666
4667
4668
4669 dma_wmb();
4670 writel(i, hw->hw_addr + rx_ring->rdt);
4671 }
4672 }
4673
4674
4675
4676
4677
4678 static void e1000_smartspeed(struct e1000_adapter *adapter)
4679 {
4680 struct e1000_hw *hw = &adapter->hw;
4681 u16 phy_status;
4682 u16 phy_ctrl;
4683
4684 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4685 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4686 return;
4687
4688 if (adapter->smartspeed == 0) {
4689
4690
4691
4692 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4693 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4694 return;
4695 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4696 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4697 return;
4698 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4699 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4700 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4701 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4702 phy_ctrl);
4703 adapter->smartspeed++;
4704 if (!e1000_phy_setup_autoneg(hw) &&
4705 !e1000_read_phy_reg(hw, PHY_CTRL,
4706 &phy_ctrl)) {
4707 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4708 MII_CR_RESTART_AUTO_NEG);
4709 e1000_write_phy_reg(hw, PHY_CTRL,
4710 phy_ctrl);
4711 }
4712 }
4713 return;
4714 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4715
4716 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4717 phy_ctrl |= CR_1000T_MS_ENABLE;
4718 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4719 if (!e1000_phy_setup_autoneg(hw) &&
4720 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4721 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4722 MII_CR_RESTART_AUTO_NEG);
4723 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4724 }
4725 }
4726
4727 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4728 adapter->smartspeed = 0;
4729 }
4730
4731
4732
4733
4734
4735
4736
4737 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4738 {
4739 switch (cmd) {
4740 case SIOCGMIIPHY:
4741 case SIOCGMIIREG:
4742 case SIOCSMIIREG:
4743 return e1000_mii_ioctl(netdev, ifr, cmd);
4744 default:
4745 return -EOPNOTSUPP;
4746 }
4747 }
4748
4749
4750
4751
4752
4753
4754
4755 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4756 int cmd)
4757 {
4758 struct e1000_adapter *adapter = netdev_priv(netdev);
4759 struct e1000_hw *hw = &adapter->hw;
4760 struct mii_ioctl_data *data = if_mii(ifr);
4761 int retval;
4762 u16 mii_reg;
4763 unsigned long flags;
4764
4765 if (hw->media_type != e1000_media_type_copper)
4766 return -EOPNOTSUPP;
4767
4768 switch (cmd) {
4769 case SIOCGMIIPHY:
4770 data->phy_id = hw->phy_addr;
4771 break;
4772 case SIOCGMIIREG:
4773 spin_lock_irqsave(&adapter->stats_lock, flags);
4774 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4775 &data->val_out)) {
4776 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4777 return -EIO;
4778 }
4779 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4780 break;
4781 case SIOCSMIIREG:
4782 if (data->reg_num & ~(0x1F))
4783 return -EFAULT;
4784 mii_reg = data->val_in;
4785 spin_lock_irqsave(&adapter->stats_lock, flags);
4786 if (e1000_write_phy_reg(hw, data->reg_num,
4787 mii_reg)) {
4788 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4789 return -EIO;
4790 }
4791 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4792 if (hw->media_type == e1000_media_type_copper) {
4793 switch (data->reg_num) {
4794 case PHY_CTRL:
4795 if (mii_reg & MII_CR_POWER_DOWN)
4796 break;
4797 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4798 hw->autoneg = 1;
4799 hw->autoneg_advertised = 0x2F;
4800 } else {
4801 u32 speed;
4802 if (mii_reg & 0x40)
4803 speed = SPEED_1000;
4804 else if (mii_reg & 0x2000)
4805 speed = SPEED_100;
4806 else
4807 speed = SPEED_10;
4808 retval = e1000_set_spd_dplx(
4809 adapter, speed,
4810 ((mii_reg & 0x100)
4811 ? DUPLEX_FULL :
4812 DUPLEX_HALF));
4813 if (retval)
4814 return retval;
4815 }
4816 if (netif_running(adapter->netdev))
4817 e1000_reinit_locked(adapter);
4818 else
4819 e1000_reset(adapter);
4820 break;
4821 case M88E1000_PHY_SPEC_CTRL:
4822 case M88E1000_EXT_PHY_SPEC_CTRL:
4823 if (e1000_phy_reset(hw))
4824 return -EIO;
4825 break;
4826 }
4827 } else {
4828 switch (data->reg_num) {
4829 case PHY_CTRL:
4830 if (mii_reg & MII_CR_POWER_DOWN)
4831 break;
4832 if (netif_running(adapter->netdev))
4833 e1000_reinit_locked(adapter);
4834 else
4835 e1000_reset(adapter);
4836 break;
4837 }
4838 }
4839 break;
4840 default:
4841 return -EOPNOTSUPP;
4842 }
4843 return E1000_SUCCESS;
4844 }
4845
4846 void e1000_pci_set_mwi(struct e1000_hw *hw)
4847 {
4848 struct e1000_adapter *adapter = hw->back;
4849 int ret_val = pci_set_mwi(adapter->pdev);
4850
4851 if (ret_val)
4852 e_err(probe, "Error in setting MWI\n");
4853 }
4854
4855 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4856 {
4857 struct e1000_adapter *adapter = hw->back;
4858
4859 pci_clear_mwi(adapter->pdev);
4860 }
4861
4862 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4863 {
4864 struct e1000_adapter *adapter = hw->back;
4865 return pcix_get_mmrbc(adapter->pdev);
4866 }
4867
4868 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4869 {
4870 struct e1000_adapter *adapter = hw->back;
4871 pcix_set_mmrbc(adapter->pdev, mmrbc);
4872 }
4873
4874 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4875 {
4876 outl(value, port);
4877 }
4878
4879 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4880 {
4881 u16 vid;
4882
4883 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4884 return true;
4885 return false;
4886 }
4887
4888 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4889 netdev_features_t features)
4890 {
4891 struct e1000_hw *hw = &adapter->hw;
4892 u32 ctrl;
4893
4894 ctrl = er32(CTRL);
4895 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4896
4897 ctrl |= E1000_CTRL_VME;
4898 } else {
4899
4900 ctrl &= ~E1000_CTRL_VME;
4901 }
4902 ew32(CTRL, ctrl);
4903 }
4904 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4905 bool filter_on)
4906 {
4907 struct e1000_hw *hw = &adapter->hw;
4908 u32 rctl;
4909
4910 if (!test_bit(__E1000_DOWN, &adapter->flags))
4911 e1000_irq_disable(adapter);
4912
4913 __e1000_vlan_mode(adapter, adapter->netdev->features);
4914 if (filter_on) {
4915
4916 rctl = er32(RCTL);
4917 rctl &= ~E1000_RCTL_CFIEN;
4918 if (!(adapter->netdev->flags & IFF_PROMISC))
4919 rctl |= E1000_RCTL_VFE;
4920 ew32(RCTL, rctl);
4921 e1000_update_mng_vlan(adapter);
4922 } else {
4923
4924 rctl = er32(RCTL);
4925 rctl &= ~E1000_RCTL_VFE;
4926 ew32(RCTL, rctl);
4927 }
4928
4929 if (!test_bit(__E1000_DOWN, &adapter->flags))
4930 e1000_irq_enable(adapter);
4931 }
4932
4933 static void e1000_vlan_mode(struct net_device *netdev,
4934 netdev_features_t features)
4935 {
4936 struct e1000_adapter *adapter = netdev_priv(netdev);
4937
4938 if (!test_bit(__E1000_DOWN, &adapter->flags))
4939 e1000_irq_disable(adapter);
4940
4941 __e1000_vlan_mode(adapter, features);
4942
4943 if (!test_bit(__E1000_DOWN, &adapter->flags))
4944 e1000_irq_enable(adapter);
4945 }
4946
4947 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4948 __be16 proto, u16 vid)
4949 {
4950 struct e1000_adapter *adapter = netdev_priv(netdev);
4951 struct e1000_hw *hw = &adapter->hw;
4952 u32 vfta, index;
4953
4954 if ((hw->mng_cookie.status &
4955 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4956 (vid == adapter->mng_vlan_id))
4957 return 0;
4958
4959 if (!e1000_vlan_used(adapter))
4960 e1000_vlan_filter_on_off(adapter, true);
4961
4962
4963 index = (vid >> 5) & 0x7F;
4964 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4965 vfta |= (1 << (vid & 0x1F));
4966 e1000_write_vfta(hw, index, vfta);
4967
4968 set_bit(vid, adapter->active_vlans);
4969
4970 return 0;
4971 }
4972
4973 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4974 __be16 proto, u16 vid)
4975 {
4976 struct e1000_adapter *adapter = netdev_priv(netdev);
4977 struct e1000_hw *hw = &adapter->hw;
4978 u32 vfta, index;
4979
4980 if (!test_bit(__E1000_DOWN, &adapter->flags))
4981 e1000_irq_disable(adapter);
4982 if (!test_bit(__E1000_DOWN, &adapter->flags))
4983 e1000_irq_enable(adapter);
4984
4985
4986 index = (vid >> 5) & 0x7F;
4987 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4988 vfta &= ~(1 << (vid & 0x1F));
4989 e1000_write_vfta(hw, index, vfta);
4990
4991 clear_bit(vid, adapter->active_vlans);
4992
4993 if (!e1000_vlan_used(adapter))
4994 e1000_vlan_filter_on_off(adapter, false);
4995
4996 return 0;
4997 }
4998
4999 static void e1000_restore_vlan(struct e1000_adapter *adapter)
5000 {
5001 u16 vid;
5002
5003 if (!e1000_vlan_used(adapter))
5004 return;
5005
5006 e1000_vlan_filter_on_off(adapter, true);
5007 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5008 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5009 }
5010
5011 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5012 {
5013 struct e1000_hw *hw = &adapter->hw;
5014
5015 hw->autoneg = 0;
5016
5017
5018
5019
5020 if ((spd & 1) || (dplx & ~1))
5021 goto err_inval;
5022
5023
5024 if ((hw->media_type == e1000_media_type_fiber) &&
5025 spd != SPEED_1000 &&
5026 dplx != DUPLEX_FULL)
5027 goto err_inval;
5028
5029 switch (spd + dplx) {
5030 case SPEED_10 + DUPLEX_HALF:
5031 hw->forced_speed_duplex = e1000_10_half;
5032 break;
5033 case SPEED_10 + DUPLEX_FULL:
5034 hw->forced_speed_duplex = e1000_10_full;
5035 break;
5036 case SPEED_100 + DUPLEX_HALF:
5037 hw->forced_speed_duplex = e1000_100_half;
5038 break;
5039 case SPEED_100 + DUPLEX_FULL:
5040 hw->forced_speed_duplex = e1000_100_full;
5041 break;
5042 case SPEED_1000 + DUPLEX_FULL:
5043 hw->autoneg = 1;
5044 hw->autoneg_advertised = ADVERTISE_1000_FULL;
5045 break;
5046 case SPEED_1000 + DUPLEX_HALF:
5047 default:
5048 goto err_inval;
5049 }
5050
5051
5052 hw->mdix = AUTO_ALL_MODES;
5053
5054 return 0;
5055
5056 err_inval:
5057 e_err(probe, "Unsupported Speed/Duplex configuration\n");
5058 return -EINVAL;
5059 }
5060
5061 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5062 {
5063 struct net_device *netdev = pci_get_drvdata(pdev);
5064 struct e1000_adapter *adapter = netdev_priv(netdev);
5065 struct e1000_hw *hw = &adapter->hw;
5066 u32 ctrl, ctrl_ext, rctl, status;
5067 u32 wufc = adapter->wol;
5068
5069 netif_device_detach(netdev);
5070
5071 if (netif_running(netdev)) {
5072 int count = E1000_CHECK_RESET_COUNT;
5073
5074 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5075 usleep_range(10000, 20000);
5076
5077 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5078 e1000_down(adapter);
5079 }
5080
5081 status = er32(STATUS);
5082 if (status & E1000_STATUS_LU)
5083 wufc &= ~E1000_WUFC_LNKC;
5084
5085 if (wufc) {
5086 e1000_setup_rctl(adapter);
5087 e1000_set_rx_mode(netdev);
5088
5089 rctl = er32(RCTL);
5090
5091
5092 if (wufc & E1000_WUFC_MC)
5093 rctl |= E1000_RCTL_MPE;
5094
5095
5096 ew32(RCTL, rctl | E1000_RCTL_EN);
5097
5098 if (hw->mac_type >= e1000_82540) {
5099 ctrl = er32(CTRL);
5100
5101 #define E1000_CTRL_ADVD3WUC 0x00100000
5102
5103 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5104 ctrl |= E1000_CTRL_ADVD3WUC |
5105 E1000_CTRL_EN_PHY_PWR_MGMT;
5106 ew32(CTRL, ctrl);
5107 }
5108
5109 if (hw->media_type == e1000_media_type_fiber ||
5110 hw->media_type == e1000_media_type_internal_serdes) {
5111
5112 ctrl_ext = er32(CTRL_EXT);
5113 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5114 ew32(CTRL_EXT, ctrl_ext);
5115 }
5116
5117 ew32(WUC, E1000_WUC_PME_EN);
5118 ew32(WUFC, wufc);
5119 } else {
5120 ew32(WUC, 0);
5121 ew32(WUFC, 0);
5122 }
5123
5124 e1000_release_manageability(adapter);
5125
5126 *enable_wake = !!wufc;
5127
5128
5129 if (adapter->en_mng_pt)
5130 *enable_wake = true;
5131
5132 if (netif_running(netdev))
5133 e1000_free_irq(adapter);
5134
5135 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5136 pci_disable_device(pdev);
5137
5138 return 0;
5139 }
5140
5141 static int __maybe_unused e1000_suspend(struct device *dev)
5142 {
5143 int retval;
5144 struct pci_dev *pdev = to_pci_dev(dev);
5145 bool wake;
5146
5147 retval = __e1000_shutdown(pdev, &wake);
5148 device_set_wakeup_enable(dev, wake);
5149
5150 return retval;
5151 }
5152
5153 static int __maybe_unused e1000_resume(struct device *dev)
5154 {
5155 struct pci_dev *pdev = to_pci_dev(dev);
5156 struct net_device *netdev = pci_get_drvdata(pdev);
5157 struct e1000_adapter *adapter = netdev_priv(netdev);
5158 struct e1000_hw *hw = &adapter->hw;
5159 u32 err;
5160
5161 if (adapter->need_ioport)
5162 err = pci_enable_device(pdev);
5163 else
5164 err = pci_enable_device_mem(pdev);
5165 if (err) {
5166 pr_err("Cannot enable PCI device from suspend\n");
5167 return err;
5168 }
5169
5170
5171 smp_mb__before_atomic();
5172 clear_bit(__E1000_DISABLED, &adapter->flags);
5173 pci_set_master(pdev);
5174
5175 pci_enable_wake(pdev, PCI_D3hot, 0);
5176 pci_enable_wake(pdev, PCI_D3cold, 0);
5177
5178 if (netif_running(netdev)) {
5179 err = e1000_request_irq(adapter);
5180 if (err)
5181 return err;
5182 }
5183
5184 e1000_power_up_phy(adapter);
5185 e1000_reset(adapter);
5186 ew32(WUS, ~0);
5187
5188 e1000_init_manageability(adapter);
5189
5190 if (netif_running(netdev))
5191 e1000_up(adapter);
5192
5193 netif_device_attach(netdev);
5194
5195 return 0;
5196 }
5197
5198 static void e1000_shutdown(struct pci_dev *pdev)
5199 {
5200 bool wake;
5201
5202 __e1000_shutdown(pdev, &wake);
5203
5204 if (system_state == SYSTEM_POWER_OFF) {
5205 pci_wake_from_d3(pdev, wake);
5206 pci_set_power_state(pdev, PCI_D3hot);
5207 }
5208 }
5209
5210 #ifdef CONFIG_NET_POLL_CONTROLLER
5211
5212
5213
5214
5215 static void e1000_netpoll(struct net_device *netdev)
5216 {
5217 struct e1000_adapter *adapter = netdev_priv(netdev);
5218
5219 if (disable_hardirq(adapter->pdev->irq))
5220 e1000_intr(adapter->pdev->irq, netdev);
5221 enable_irq(adapter->pdev->irq);
5222 }
5223 #endif
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5234 pci_channel_state_t state)
5235 {
5236 struct net_device *netdev = pci_get_drvdata(pdev);
5237 struct e1000_adapter *adapter = netdev_priv(netdev);
5238
5239 netif_device_detach(netdev);
5240
5241 if (state == pci_channel_io_perm_failure)
5242 return PCI_ERS_RESULT_DISCONNECT;
5243
5244 if (netif_running(netdev))
5245 e1000_down(adapter);
5246
5247 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5248 pci_disable_device(pdev);
5249
5250
5251 return PCI_ERS_RESULT_NEED_RESET;
5252 }
5253
5254
5255
5256
5257
5258
5259
5260
5261 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5262 {
5263 struct net_device *netdev = pci_get_drvdata(pdev);
5264 struct e1000_adapter *adapter = netdev_priv(netdev);
5265 struct e1000_hw *hw = &adapter->hw;
5266 int err;
5267
5268 if (adapter->need_ioport)
5269 err = pci_enable_device(pdev);
5270 else
5271 err = pci_enable_device_mem(pdev);
5272 if (err) {
5273 pr_err("Cannot re-enable PCI device after reset.\n");
5274 return PCI_ERS_RESULT_DISCONNECT;
5275 }
5276
5277
5278 smp_mb__before_atomic();
5279 clear_bit(__E1000_DISABLED, &adapter->flags);
5280 pci_set_master(pdev);
5281
5282 pci_enable_wake(pdev, PCI_D3hot, 0);
5283 pci_enable_wake(pdev, PCI_D3cold, 0);
5284
5285 e1000_reset(adapter);
5286 ew32(WUS, ~0);
5287
5288 return PCI_ERS_RESULT_RECOVERED;
5289 }
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299 static void e1000_io_resume(struct pci_dev *pdev)
5300 {
5301 struct net_device *netdev = pci_get_drvdata(pdev);
5302 struct e1000_adapter *adapter = netdev_priv(netdev);
5303
5304 e1000_init_manageability(adapter);
5305
5306 if (netif_running(netdev)) {
5307 if (e1000_up(adapter)) {
5308 pr_info("can't bring device back up after reset\n");
5309 return;
5310 }
5311 }
5312
5313 netif_device_attach(netdev);
5314 }
5315
5316