0001
0002
0003
0004 #include "e1000.h"
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
0015 {
0016 struct e1000_mac_info *mac = &hw->mac;
0017 struct e1000_bus_info *bus = &hw->bus;
0018 struct e1000_adapter *adapter = hw->adapter;
0019 u16 pcie_link_status, cap_offset;
0020
0021 cap_offset = adapter->pdev->pcie_cap;
0022 if (!cap_offset) {
0023 bus->width = e1000_bus_width_unknown;
0024 } else {
0025 pci_read_config_word(adapter->pdev,
0026 cap_offset + PCIE_LINK_STATUS,
0027 &pcie_link_status);
0028 bus->width = (enum e1000_bus_width)((pcie_link_status &
0029 PCIE_LINK_WIDTH_MASK) >>
0030 PCIE_LINK_WIDTH_SHIFT);
0031 }
0032
0033 mac->ops.set_lan_id(hw);
0034
0035 return 0;
0036 }
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
0047 {
0048 struct e1000_bus_info *bus = &hw->bus;
0049 u32 reg;
0050
0051
0052
0053
0054 reg = er32(STATUS);
0055 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
0056 }
0057
0058
0059
0060
0061
0062
0063
0064 void e1000_set_lan_id_single_port(struct e1000_hw *hw)
0065 {
0066 struct e1000_bus_info *bus = &hw->bus;
0067
0068 bus->func = 0;
0069 }
0070
0071
0072
0073
0074
0075
0076
0077
0078 void e1000_clear_vfta_generic(struct e1000_hw *hw)
0079 {
0080 u32 offset;
0081
0082 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
0083 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
0084 e1e_flush();
0085 }
0086 }
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097 void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
0098 {
0099 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
0100 e1e_flush();
0101 }
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112 void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
0113 {
0114 u32 i;
0115 u8 mac_addr[ETH_ALEN] = { 0 };
0116
0117
0118 e_dbg("Programming MAC Address into RAR[0]\n");
0119
0120 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
0121
0122
0123 e_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
0124 for (i = 1; i < rar_count; i++)
0125 hw->mac.ops.rar_set(hw, mac_addr, i);
0126 }
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140 s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
0141 {
0142 u32 i;
0143 s32 ret_val;
0144 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
0145 u8 alt_mac_addr[ETH_ALEN];
0146
0147 ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
0148 if (ret_val)
0149 return ret_val;
0150
0151
0152 if (hw->mac.type == e1000_82573)
0153 return 0;
0154
0155 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
0156 &nvm_alt_mac_addr_offset);
0157 if (ret_val) {
0158 e_dbg("NVM Read Error\n");
0159 return ret_val;
0160 }
0161
0162 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
0163 (nvm_alt_mac_addr_offset == 0x0000))
0164
0165 return 0;
0166
0167 if (hw->bus.func == E1000_FUNC_1)
0168 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
0169 for (i = 0; i < ETH_ALEN; i += 2) {
0170 offset = nvm_alt_mac_addr_offset + (i >> 1);
0171 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
0172 if (ret_val) {
0173 e_dbg("NVM Read Error\n");
0174 return ret_val;
0175 }
0176
0177 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
0178 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
0179 }
0180
0181
0182 if (is_multicast_ether_addr(alt_mac_addr)) {
0183 e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
0184 return 0;
0185 }
0186
0187
0188
0189
0190
0191 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
0192
0193 return 0;
0194 }
0195
0196 u32 e1000e_rar_get_count_generic(struct e1000_hw *hw)
0197 {
0198 return hw->mac.rar_entry_count;
0199 }
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210 int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
0211 {
0212 u32 rar_low, rar_high;
0213
0214
0215
0216
0217 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
0218 ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
0219
0220 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
0221
0222
0223 if (rar_low || rar_high)
0224 rar_high |= E1000_RAH_AV;
0225
0226
0227
0228
0229
0230 ew32(RAL(index), rar_low);
0231 e1e_flush();
0232 ew32(RAH(index), rar_high);
0233 e1e_flush();
0234
0235 return 0;
0236 }
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246 static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
0247 {
0248 u32 hash_value, hash_mask;
0249 u8 bit_shift = 0;
0250
0251
0252 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
0253
0254
0255
0256
0257 while (hash_mask >> bit_shift != 0xFF)
0258 bit_shift++;
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285 switch (hw->mac.mc_filter_type) {
0286 default:
0287 case 0:
0288 break;
0289 case 1:
0290 bit_shift += 1;
0291 break;
0292 case 2:
0293 bit_shift += 2;
0294 break;
0295 case 3:
0296 bit_shift += 4;
0297 break;
0298 }
0299
0300 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
0301 (((u16)mc_addr[5]) << bit_shift)));
0302
0303 return hash_value;
0304 }
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314
0315 void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
0316 u8 *mc_addr_list, u32 mc_addr_count)
0317 {
0318 u32 hash_value, hash_bit, hash_reg;
0319 int i;
0320
0321
0322 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
0323
0324
0325 for (i = 0; (u32)i < mc_addr_count; i++) {
0326 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
0327
0328 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
0329 hash_bit = hash_value & 0x1F;
0330
0331 hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
0332 mc_addr_list += (ETH_ALEN);
0333 }
0334
0335
0336 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
0337 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
0338 e1e_flush();
0339 }
0340
0341
0342
0343
0344
0345
0346
0347 void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
0348 {
0349 er32(CRCERRS);
0350 er32(SYMERRS);
0351 er32(MPC);
0352 er32(SCC);
0353 er32(ECOL);
0354 er32(MCC);
0355 er32(LATECOL);
0356 er32(COLC);
0357 er32(DC);
0358 er32(SEC);
0359 er32(RLEC);
0360 er32(XONRXC);
0361 er32(XONTXC);
0362 er32(XOFFRXC);
0363 er32(XOFFTXC);
0364 er32(FCRUC);
0365 er32(GPRC);
0366 er32(BPRC);
0367 er32(MPRC);
0368 er32(GPTC);
0369 er32(GORCL);
0370 er32(GORCH);
0371 er32(GOTCL);
0372 er32(GOTCH);
0373 er32(RNBC);
0374 er32(RUC);
0375 er32(RFC);
0376 er32(ROC);
0377 er32(RJC);
0378 er32(TORL);
0379 er32(TORH);
0380 er32(TOTL);
0381 er32(TOTH);
0382 er32(TPR);
0383 er32(TPT);
0384 er32(MPTC);
0385 er32(BPTC);
0386 }
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396 s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
0397 {
0398 struct e1000_mac_info *mac = &hw->mac;
0399 s32 ret_val;
0400 bool link;
0401
0402
0403
0404
0405
0406
0407 if (!mac->get_link_status)
0408 return 0;
0409 mac->get_link_status = false;
0410
0411
0412
0413
0414
0415 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
0416 if (ret_val || !link)
0417 goto out;
0418
0419
0420
0421
0422 e1000e_check_downshift(hw);
0423
0424
0425
0426
0427 if (!mac->autoneg)
0428 return -E1000_ERR_CONFIG;
0429
0430
0431
0432
0433
0434 mac->ops.config_collision_dist(hw);
0435
0436
0437
0438
0439
0440
0441 ret_val = e1000e_config_fc_after_link_up(hw);
0442 if (ret_val)
0443 e_dbg("Error configuring flow control\n");
0444
0445 return ret_val;
0446
0447 out:
0448 mac->get_link_status = true;
0449 return ret_val;
0450 }
0451
0452
0453
0454
0455
0456
0457
0458
0459 s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
0460 {
0461 struct e1000_mac_info *mac = &hw->mac;
0462 u32 rxcw;
0463 u32 ctrl;
0464 u32 status;
0465 s32 ret_val;
0466
0467 ctrl = er32(CTRL);
0468 status = er32(STATUS);
0469 rxcw = er32(RXCW);
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479 if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) &&
0480 !(rxcw & E1000_RXCW_C)) {
0481 if (!mac->autoneg_failed) {
0482 mac->autoneg_failed = true;
0483 return 0;
0484 }
0485 e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
0486
0487
0488 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
0489
0490
0491 ctrl = er32(CTRL);
0492 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
0493 ew32(CTRL, ctrl);
0494
0495
0496 ret_val = e1000e_config_fc_after_link_up(hw);
0497 if (ret_val) {
0498 e_dbg("Error configuring flow control\n");
0499 return ret_val;
0500 }
0501 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
0502
0503
0504
0505
0506
0507 e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
0508 ew32(TXCW, mac->txcw);
0509 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
0510
0511 mac->serdes_has_link = true;
0512 }
0513
0514 return 0;
0515 }
0516
0517
0518
0519
0520
0521
0522
0523
0524 s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
0525 {
0526 struct e1000_mac_info *mac = &hw->mac;
0527 u32 rxcw;
0528 u32 ctrl;
0529 u32 status;
0530 s32 ret_val;
0531
0532 ctrl = er32(CTRL);
0533 status = er32(STATUS);
0534 rxcw = er32(RXCW);
0535
0536
0537
0538
0539
0540
0541
0542
0543 if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) {
0544 if (!mac->autoneg_failed) {
0545 mac->autoneg_failed = true;
0546 return 0;
0547 }
0548 e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
0549
0550
0551 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
0552
0553
0554 ctrl = er32(CTRL);
0555 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
0556 ew32(CTRL, ctrl);
0557
0558
0559 ret_val = e1000e_config_fc_after_link_up(hw);
0560 if (ret_val) {
0561 e_dbg("Error configuring flow control\n");
0562 return ret_val;
0563 }
0564 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
0565
0566
0567
0568
0569
0570 e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
0571 ew32(TXCW, mac->txcw);
0572 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
0573
0574 mac->serdes_has_link = true;
0575 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
0576
0577
0578
0579
0580
0581 usleep_range(10, 20);
0582 rxcw = er32(RXCW);
0583 if (rxcw & E1000_RXCW_SYNCH) {
0584 if (!(rxcw & E1000_RXCW_IV)) {
0585 mac->serdes_has_link = true;
0586 e_dbg("SERDES: Link up - forced.\n");
0587 }
0588 } else {
0589 mac->serdes_has_link = false;
0590 e_dbg("SERDES: Link down - force failed.\n");
0591 }
0592 }
0593
0594 if (E1000_TXCW_ANE & er32(TXCW)) {
0595 status = er32(STATUS);
0596 if (status & E1000_STATUS_LU) {
0597
0598 usleep_range(10, 20);
0599 rxcw = er32(RXCW);
0600 if (rxcw & E1000_RXCW_SYNCH) {
0601 if (!(rxcw & E1000_RXCW_IV)) {
0602 mac->serdes_has_link = true;
0603 e_dbg("SERDES: Link up - autoneg completed successfully.\n");
0604 } else {
0605 mac->serdes_has_link = false;
0606 e_dbg("SERDES: Link down - invalid codewords detected in autoneg.\n");
0607 }
0608 } else {
0609 mac->serdes_has_link = false;
0610 e_dbg("SERDES: Link down - no sync.\n");
0611 }
0612 } else {
0613 mac->serdes_has_link = false;
0614 e_dbg("SERDES: Link down - autoneg failed\n");
0615 }
0616 }
0617
0618 return 0;
0619 }
0620
0621
0622
0623
0624
0625
0626
0627
0628 static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
0629 {
0630 s32 ret_val;
0631 u16 nvm_data;
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
0642
0643 if (ret_val) {
0644 e_dbg("NVM Read Error\n");
0645 return ret_val;
0646 }
0647
0648 if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
0649 hw->fc.requested_mode = e1000_fc_none;
0650 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
0651 hw->fc.requested_mode = e1000_fc_tx_pause;
0652 else
0653 hw->fc.requested_mode = e1000_fc_full;
0654
0655 return 0;
0656 }
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668 s32 e1000e_setup_link_generic(struct e1000_hw *hw)
0669 {
0670 s32 ret_val;
0671
0672
0673
0674
0675 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
0676 return 0;
0677
0678
0679
0680
0681 if (hw->fc.requested_mode == e1000_fc_default) {
0682 ret_val = e1000_set_default_fc_generic(hw);
0683 if (ret_val)
0684 return ret_val;
0685 }
0686
0687
0688
0689
0690 hw->fc.current_mode = hw->fc.requested_mode;
0691
0692 e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
0693
0694
0695 ret_val = hw->mac.ops.setup_physical_interface(hw);
0696 if (ret_val)
0697 return ret_val;
0698
0699
0700
0701
0702
0703
0704 e_dbg("Initializing the Flow Control address, type and timer regs\n");
0705 ew32(FCT, FLOW_CONTROL_TYPE);
0706 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
0707 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
0708
0709 ew32(FCTTV, hw->fc.pause_time);
0710
0711 return e1000e_set_fc_watermarks(hw);
0712 }
0713
0714
0715
0716
0717
0718
0719
0720
0721 static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
0722 {
0723 struct e1000_mac_info *mac = &hw->mac;
0724 u32 txcw;
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740
0741
0742 switch (hw->fc.current_mode) {
0743 case e1000_fc_none:
0744
0745 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
0746 break;
0747 case e1000_fc_rx_pause:
0748
0749
0750
0751
0752
0753
0754
0755 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
0756 break;
0757 case e1000_fc_tx_pause:
0758
0759
0760
0761 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
0762 break;
0763 case e1000_fc_full:
0764
0765
0766
0767 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
0768 break;
0769 default:
0770 e_dbg("Flow control param set incorrectly\n");
0771 return -E1000_ERR_CONFIG;
0772 }
0773
0774 ew32(TXCW, txcw);
0775 mac->txcw = txcw;
0776
0777 return 0;
0778 }
0779
0780
0781
0782
0783
0784
0785
0786
0787 static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
0788 {
0789 struct e1000_mac_info *mac = &hw->mac;
0790 u32 i, status;
0791 s32 ret_val;
0792
0793
0794
0795
0796
0797
0798
0799 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
0800 usleep_range(10000, 11000);
0801 status = er32(STATUS);
0802 if (status & E1000_STATUS_LU)
0803 break;
0804 }
0805 if (i == FIBER_LINK_UP_LIMIT) {
0806 e_dbg("Never got a valid link from auto-neg!!!\n");
0807 mac->autoneg_failed = true;
0808
0809
0810
0811
0812
0813 ret_val = mac->ops.check_for_link(hw);
0814 if (ret_val) {
0815 e_dbg("Error while checking for link\n");
0816 return ret_val;
0817 }
0818 mac->autoneg_failed = false;
0819 } else {
0820 mac->autoneg_failed = false;
0821 e_dbg("Valid Link Found\n");
0822 }
0823
0824 return 0;
0825 }
0826
0827
0828
0829
0830
0831
0832
0833
0834 s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
0835 {
0836 u32 ctrl;
0837 s32 ret_val;
0838
0839 ctrl = er32(CTRL);
0840
0841
0842 ctrl &= ~E1000_CTRL_LRST;
0843
0844 hw->mac.ops.config_collision_dist(hw);
0845
0846 ret_val = e1000_commit_fc_settings_generic(hw);
0847 if (ret_val)
0848 return ret_val;
0849
0850
0851
0852
0853
0854
0855
0856 e_dbg("Auto-negotiation enabled\n");
0857
0858 ew32(CTRL, ctrl);
0859 e1e_flush();
0860 usleep_range(1000, 2000);
0861
0862
0863
0864
0865
0866 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
0867 (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
0868 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
0869 } else {
0870 e_dbg("No signal detected\n");
0871 }
0872
0873 return ret_val;
0874 }
0875
0876
0877
0878
0879
0880
0881
0882
0883 void e1000e_config_collision_dist_generic(struct e1000_hw *hw)
0884 {
0885 u32 tctl;
0886
0887 tctl = er32(TCTL);
0888
0889 tctl &= ~E1000_TCTL_COLD;
0890 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
0891
0892 ew32(TCTL, tctl);
0893 e1e_flush();
0894 }
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904 s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
0905 {
0906 u32 fcrtl = 0, fcrth = 0;
0907
0908
0909
0910
0911
0912
0913
0914 if (hw->fc.current_mode & e1000_fc_tx_pause) {
0915
0916
0917
0918
0919 fcrtl = hw->fc.low_water;
0920 if (hw->fc.send_xon)
0921 fcrtl |= E1000_FCRTL_XONE;
0922
0923 fcrth = hw->fc.high_water;
0924 }
0925 ew32(FCRTL, fcrtl);
0926 ew32(FCRTH, fcrth);
0927
0928 return 0;
0929 }
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941 s32 e1000e_force_mac_fc(struct e1000_hw *hw)
0942 {
0943 u32 ctrl;
0944
0945 ctrl = er32(CTRL);
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964 e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
0965
0966 switch (hw->fc.current_mode) {
0967 case e1000_fc_none:
0968 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
0969 break;
0970 case e1000_fc_rx_pause:
0971 ctrl &= (~E1000_CTRL_TFCE);
0972 ctrl |= E1000_CTRL_RFCE;
0973 break;
0974 case e1000_fc_tx_pause:
0975 ctrl &= (~E1000_CTRL_RFCE);
0976 ctrl |= E1000_CTRL_TFCE;
0977 break;
0978 case e1000_fc_full:
0979 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
0980 break;
0981 default:
0982 e_dbg("Flow control param set incorrectly\n");
0983 return -E1000_ERR_CONFIG;
0984 }
0985
0986 ew32(CTRL, ctrl);
0987
0988 return 0;
0989 }
0990
0991
0992
0993
0994
0995
0996
0997
0998
0999
1000
1001 s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1002 {
1003 struct e1000_mac_info *mac = &hw->mac;
1004 s32 ret_val = 0;
1005 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
1006 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
1007 u16 speed, duplex;
1008
1009
1010
1011
1012
1013 if (mac->autoneg_failed) {
1014 if (hw->phy.media_type == e1000_media_type_fiber ||
1015 hw->phy.media_type == e1000_media_type_internal_serdes)
1016 ret_val = e1000e_force_mac_fc(hw);
1017 } else {
1018 if (hw->phy.media_type == e1000_media_type_copper)
1019 ret_val = e1000e_force_mac_fc(hw);
1020 }
1021
1022 if (ret_val) {
1023 e_dbg("Error forcing flow control settings\n");
1024 return ret_val;
1025 }
1026
1027
1028
1029
1030
1031
1032 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
1033
1034
1035
1036
1037 ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg);
1038 if (ret_val)
1039 return ret_val;
1040 ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg);
1041 if (ret_val)
1042 return ret_val;
1043
1044 if (!(mii_status_reg & BMSR_ANEGCOMPLETE)) {
1045 e_dbg("Copper PHY and Auto Neg has not completed.\n");
1046 return ret_val;
1047 }
1048
1049
1050
1051
1052
1053
1054
1055 ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_nway_adv_reg);
1056 if (ret_val)
1057 return ret_val;
1058 ret_val = e1e_rphy(hw, MII_LPA, &mii_nway_lp_ability_reg);
1059 if (ret_val)
1060 return ret_val;
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095 if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
1096 (mii_nway_lp_ability_reg & LPA_PAUSE_CAP)) {
1097
1098
1099
1100
1101
1102
1103 if (hw->fc.requested_mode == e1000_fc_full) {
1104 hw->fc.current_mode = e1000_fc_full;
1105 e_dbg("Flow Control = FULL.\n");
1106 } else {
1107 hw->fc.current_mode = e1000_fc_rx_pause;
1108 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1109 }
1110 }
1111
1112
1113
1114
1115
1116
1117
1118 else if (!(mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
1119 (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) &&
1120 (mii_nway_lp_ability_reg & LPA_PAUSE_CAP) &&
1121 (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) {
1122 hw->fc.current_mode = e1000_fc_tx_pause;
1123 e_dbg("Flow Control = Tx PAUSE frames only.\n");
1124 }
1125
1126
1127
1128
1129
1130
1131
1132 else if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
1133 (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) &&
1134 !(mii_nway_lp_ability_reg & LPA_PAUSE_CAP) &&
1135 (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) {
1136 hw->fc.current_mode = e1000_fc_rx_pause;
1137 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1138 } else {
1139
1140
1141
1142 hw->fc.current_mode = e1000_fc_none;
1143 e_dbg("Flow Control = NONE.\n");
1144 }
1145
1146
1147
1148
1149
1150 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1151 if (ret_val) {
1152 e_dbg("Error getting link speed and duplex\n");
1153 return ret_val;
1154 }
1155
1156 if (duplex == HALF_DUPLEX)
1157 hw->fc.current_mode = e1000_fc_none;
1158
1159
1160
1161
1162 ret_val = e1000e_force_mac_fc(hw);
1163 if (ret_val) {
1164 e_dbg("Error forcing flow control settings\n");
1165 return ret_val;
1166 }
1167 }
1168
1169
1170
1171
1172
1173
1174 if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
1175 mac->autoneg) {
1176
1177
1178
1179 pcs_status_reg = er32(PCS_LSTAT);
1180
1181 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1182 e_dbg("PCS Auto Neg has not completed.\n");
1183 return ret_val;
1184 }
1185
1186
1187
1188
1189
1190
1191
1192 pcs_adv_reg = er32(PCS_ANADV);
1193 pcs_lp_ability_reg = er32(PCS_LPAB);
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1229 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1230
1231
1232
1233
1234
1235
1236 if (hw->fc.requested_mode == e1000_fc_full) {
1237 hw->fc.current_mode = e1000_fc_full;
1238 e_dbg("Flow Control = FULL.\n");
1239 } else {
1240 hw->fc.current_mode = e1000_fc_rx_pause;
1241 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1242 }
1243 }
1244
1245
1246
1247
1248
1249
1250
1251 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1252 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1253 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1254 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1255 hw->fc.current_mode = e1000_fc_tx_pause;
1256 e_dbg("Flow Control = Tx PAUSE frames only.\n");
1257 }
1258
1259
1260
1261
1262
1263
1264
1265 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1266 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1267 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1268 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1269 hw->fc.current_mode = e1000_fc_rx_pause;
1270 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1271 } else {
1272
1273
1274
1275 hw->fc.current_mode = e1000_fc_none;
1276 e_dbg("Flow Control = NONE.\n");
1277 }
1278
1279
1280
1281
1282 pcs_ctrl_reg = er32(PCS_LCTL);
1283 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1284 ew32(PCS_LCTL, pcs_ctrl_reg);
1285
1286 ret_val = e1000e_force_mac_fc(hw);
1287 if (ret_val) {
1288 e_dbg("Error forcing flow control settings\n");
1289 return ret_val;
1290 }
1291 }
1292
1293 return 0;
1294 }
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1306 u16 *duplex)
1307 {
1308 u32 status;
1309
1310 status = er32(STATUS);
1311 if (status & E1000_STATUS_SPEED_1000)
1312 *speed = SPEED_1000;
1313 else if (status & E1000_STATUS_SPEED_100)
1314 *speed = SPEED_100;
1315 else
1316 *speed = SPEED_10;
1317
1318 if (status & E1000_STATUS_FD)
1319 *duplex = FULL_DUPLEX;
1320 else
1321 *duplex = HALF_DUPLEX;
1322
1323 e_dbg("%u Mbps, %s Duplex\n",
1324 *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10,
1325 *duplex == FULL_DUPLEX ? "Full" : "Half");
1326
1327 return 0;
1328 }
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339 s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw __always_unused
1340 *hw, u16 *speed, u16 *duplex)
1341 {
1342 *speed = SPEED_1000;
1343 *duplex = FULL_DUPLEX;
1344
1345 return 0;
1346 }
1347
1348
1349
1350
1351
1352
1353
1354 s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1355 {
1356 u32 swsm;
1357 s32 timeout = hw->nvm.word_size + 1;
1358 s32 i = 0;
1359
1360
1361 while (i < timeout) {
1362 swsm = er32(SWSM);
1363 if (!(swsm & E1000_SWSM_SMBI))
1364 break;
1365
1366 udelay(100);
1367 i++;
1368 }
1369
1370 if (i == timeout) {
1371 e_dbg("Driver can't access device - SMBI bit is set.\n");
1372 return -E1000_ERR_NVM;
1373 }
1374
1375
1376 for (i = 0; i < timeout; i++) {
1377 swsm = er32(SWSM);
1378 ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
1379
1380
1381 if (er32(SWSM) & E1000_SWSM_SWESMBI)
1382 break;
1383
1384 udelay(100);
1385 }
1386
1387 if (i == timeout) {
1388
1389 e1000e_put_hw_semaphore(hw);
1390 e_dbg("Driver can't access the NVM\n");
1391 return -E1000_ERR_NVM;
1392 }
1393
1394 return 0;
1395 }
1396
1397
1398
1399
1400
1401
1402
1403 void e1000e_put_hw_semaphore(struct e1000_hw *hw)
1404 {
1405 u32 swsm;
1406
1407 swsm = er32(SWSM);
1408 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1409 ew32(SWSM, swsm);
1410 }
1411
1412
1413
1414
1415
1416
1417
1418 s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
1419 {
1420 s32 i = 0;
1421
1422 while (i < AUTO_READ_DONE_TIMEOUT) {
1423 if (er32(EECD) & E1000_EECD_AUTO_RD)
1424 break;
1425 usleep_range(1000, 2000);
1426 i++;
1427 }
1428
1429 if (i == AUTO_READ_DONE_TIMEOUT) {
1430 e_dbg("Auto read by HW from NVM has not completed.\n");
1431 return -E1000_ERR_RESET;
1432 }
1433
1434 return 0;
1435 }
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445 s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
1446 {
1447 s32 ret_val;
1448
1449 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1450 if (ret_val) {
1451 e_dbg("NVM Read Error\n");
1452 return ret_val;
1453 }
1454
1455 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1456 *data = ID_LED_DEFAULT;
1457
1458 return 0;
1459 }
1460
1461
1462
1463
1464
1465
1466 s32 e1000e_id_led_init_generic(struct e1000_hw *hw)
1467 {
1468 struct e1000_mac_info *mac = &hw->mac;
1469 s32 ret_val;
1470 const u32 ledctl_mask = 0x000000FF;
1471 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1472 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1473 u16 data, i, temp;
1474 const u16 led_mask = 0x0F;
1475
1476 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
1477 if (ret_val)
1478 return ret_val;
1479
1480 mac->ledctl_default = er32(LEDCTL);
1481 mac->ledctl_mode1 = mac->ledctl_default;
1482 mac->ledctl_mode2 = mac->ledctl_default;
1483
1484 for (i = 0; i < 4; i++) {
1485 temp = (data >> (i << 2)) & led_mask;
1486 switch (temp) {
1487 case ID_LED_ON1_DEF2:
1488 case ID_LED_ON1_ON2:
1489 case ID_LED_ON1_OFF2:
1490 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1491 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1492 break;
1493 case ID_LED_OFF1_DEF2:
1494 case ID_LED_OFF1_ON2:
1495 case ID_LED_OFF1_OFF2:
1496 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1497 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1498 break;
1499 default:
1500
1501 break;
1502 }
1503 switch (temp) {
1504 case ID_LED_DEF1_ON2:
1505 case ID_LED_ON1_ON2:
1506 case ID_LED_OFF1_ON2:
1507 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1508 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1509 break;
1510 case ID_LED_DEF1_OFF2:
1511 case ID_LED_ON1_OFF2:
1512 case ID_LED_OFF1_OFF2:
1513 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1514 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1515 break;
1516 default:
1517
1518 break;
1519 }
1520 }
1521
1522 return 0;
1523 }
1524
1525
1526
1527
1528
1529
1530
1531
1532 s32 e1000e_setup_led_generic(struct e1000_hw *hw)
1533 {
1534 u32 ledctl;
1535
1536 if (hw->mac.ops.setup_led != e1000e_setup_led_generic)
1537 return -E1000_ERR_CONFIG;
1538
1539 if (hw->phy.media_type == e1000_media_type_fiber) {
1540 ledctl = er32(LEDCTL);
1541 hw->mac.ledctl_default = ledctl;
1542
1543 ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK |
1544 E1000_LEDCTL_LED0_MODE_MASK);
1545 ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
1546 E1000_LEDCTL_LED0_MODE_SHIFT);
1547 ew32(LEDCTL, ledctl);
1548 } else if (hw->phy.media_type == e1000_media_type_copper) {
1549 ew32(LEDCTL, hw->mac.ledctl_mode1);
1550 }
1551
1552 return 0;
1553 }
1554
1555
1556
1557
1558
1559
1560
1561
1562 s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
1563 {
1564 ew32(LEDCTL, hw->mac.ledctl_default);
1565 return 0;
1566 }
1567
1568
1569
1570
1571
1572
1573
1574 s32 e1000e_blink_led_generic(struct e1000_hw *hw)
1575 {
1576 u32 ledctl_blink = 0;
1577 u32 i;
1578
1579 if (hw->phy.media_type == e1000_media_type_fiber) {
1580
1581 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1582 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1583 } else {
1584
1585
1586
1587
1588
1589
1590 ledctl_blink = hw->mac.ledctl_mode2;
1591 for (i = 0; i < 32; i += 8) {
1592 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1593 E1000_LEDCTL_LED0_MODE_MASK;
1594 u32 led_default = hw->mac.ledctl_default >> i;
1595
1596 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1597 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1598 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1599 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1600 ledctl_blink &=
1601 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1602 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1603 E1000_LEDCTL_MODE_LED_ON) << i;
1604 }
1605 }
1606 }
1607
1608 ew32(LEDCTL, ledctl_blink);
1609
1610 return 0;
1611 }
1612
1613
1614
1615
1616
1617
1618
1619 s32 e1000e_led_on_generic(struct e1000_hw *hw)
1620 {
1621 u32 ctrl;
1622
1623 switch (hw->phy.media_type) {
1624 case e1000_media_type_fiber:
1625 ctrl = er32(CTRL);
1626 ctrl &= ~E1000_CTRL_SWDPIN0;
1627 ctrl |= E1000_CTRL_SWDPIO0;
1628 ew32(CTRL, ctrl);
1629 break;
1630 case e1000_media_type_copper:
1631 ew32(LEDCTL, hw->mac.ledctl_mode2);
1632 break;
1633 default:
1634 break;
1635 }
1636
1637 return 0;
1638 }
1639
1640
1641
1642
1643
1644
1645
1646 s32 e1000e_led_off_generic(struct e1000_hw *hw)
1647 {
1648 u32 ctrl;
1649
1650 switch (hw->phy.media_type) {
1651 case e1000_media_type_fiber:
1652 ctrl = er32(CTRL);
1653 ctrl |= E1000_CTRL_SWDPIN0;
1654 ctrl |= E1000_CTRL_SWDPIO0;
1655 ew32(CTRL, ctrl);
1656 break;
1657 case e1000_media_type_copper:
1658 ew32(LEDCTL, hw->mac.ledctl_mode1);
1659 break;
1660 default:
1661 break;
1662 }
1663
1664 return 0;
1665 }
1666
1667
1668
1669
1670
1671
1672
1673
1674 void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
1675 {
1676 u32 gcr;
1677
1678 if (no_snoop) {
1679 gcr = er32(GCR);
1680 gcr &= ~(PCIE_NO_SNOOP_ALL);
1681 gcr |= no_snoop;
1682 ew32(GCR, gcr);
1683 }
1684 }
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697 s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1698 {
1699 u32 ctrl;
1700 s32 timeout = MASTER_DISABLE_TIMEOUT;
1701
1702 ctrl = er32(CTRL);
1703 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1704 ew32(CTRL, ctrl);
1705
1706 while (timeout) {
1707 if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
1708 break;
1709 usleep_range(100, 200);
1710 timeout--;
1711 }
1712
1713 if (!timeout) {
1714 e_dbg("Master requests are pending.\n");
1715 return -E1000_ERR_MASTER_REQUESTS_PENDING;
1716 }
1717
1718 return 0;
1719 }
1720
1721
1722
1723
1724
1725
1726
1727 void e1000e_reset_adaptive(struct e1000_hw *hw)
1728 {
1729 struct e1000_mac_info *mac = &hw->mac;
1730
1731 if (!mac->adaptive_ifs) {
1732 e_dbg("Not in Adaptive IFS mode!\n");
1733 return;
1734 }
1735
1736 mac->current_ifs_val = 0;
1737 mac->ifs_min_val = IFS_MIN;
1738 mac->ifs_max_val = IFS_MAX;
1739 mac->ifs_step_size = IFS_STEP;
1740 mac->ifs_ratio = IFS_RATIO;
1741
1742 mac->in_ifs_mode = false;
1743 ew32(AIT, 0);
1744 }
1745
1746
1747
1748
1749
1750
1751
1752
1753 void e1000e_update_adaptive(struct e1000_hw *hw)
1754 {
1755 struct e1000_mac_info *mac = &hw->mac;
1756
1757 if (!mac->adaptive_ifs) {
1758 e_dbg("Not in Adaptive IFS mode!\n");
1759 return;
1760 }
1761
1762 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1763 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1764 mac->in_ifs_mode = true;
1765 if (mac->current_ifs_val < mac->ifs_max_val) {
1766 if (!mac->current_ifs_val)
1767 mac->current_ifs_val = mac->ifs_min_val;
1768 else
1769 mac->current_ifs_val +=
1770 mac->ifs_step_size;
1771 ew32(AIT, mac->current_ifs_val);
1772 }
1773 }
1774 } else {
1775 if (mac->in_ifs_mode &&
1776 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1777 mac->current_ifs_val = 0;
1778 mac->in_ifs_mode = false;
1779 ew32(AIT, 0);
1780 }
1781 }
1782 }