0001
0002
0003
0004 #include <linux/if_ether.h>
0005 #include <linux/delay.h>
0006 #include <linux/pci.h>
0007 #include <linux/netdevice.h>
0008 #include <linux/etherdevice.h>
0009
0010 #include "e1000_mac.h"
0011
0012 #include "igb.h"
0013
0014 static s32 igb_set_default_fc(struct e1000_hw *hw);
0015 static void igb_set_fc_watermarks(struct e1000_hw *hw);
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025 s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
0026 {
0027 struct e1000_bus_info *bus = &hw->bus;
0028 s32 ret_val;
0029 u32 reg;
0030 u16 pcie_link_status;
0031
0032 bus->type = e1000_bus_type_pci_express;
0033
0034 ret_val = igb_read_pcie_cap_reg(hw,
0035 PCI_EXP_LNKSTA,
0036 &pcie_link_status);
0037 if (ret_val) {
0038 bus->width = e1000_bus_width_unknown;
0039 bus->speed = e1000_bus_speed_unknown;
0040 } else {
0041 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
0042 case PCI_EXP_LNKSTA_CLS_2_5GB:
0043 bus->speed = e1000_bus_speed_2500;
0044 break;
0045 case PCI_EXP_LNKSTA_CLS_5_0GB:
0046 bus->speed = e1000_bus_speed_5000;
0047 break;
0048 default:
0049 bus->speed = e1000_bus_speed_unknown;
0050 break;
0051 }
0052
0053 bus->width = (enum e1000_bus_width)((pcie_link_status &
0054 PCI_EXP_LNKSTA_NLW) >>
0055 PCI_EXP_LNKSTA_NLW_SHIFT);
0056 }
0057
0058 reg = rd32(E1000_STATUS);
0059 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
0060
0061 return 0;
0062 }
0063
0064
0065
0066
0067
0068
0069
0070
0071 void igb_clear_vfta(struct e1000_hw *hw)
0072 {
0073 u32 offset;
0074
0075 for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;)
0076 hw->mac.ops.write_vfta(hw, offset, 0);
0077 }
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088 void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
0089 {
0090 struct igb_adapter *adapter = hw->back;
0091
0092 array_wr32(E1000_VFTA, offset, value);
0093 wrfl();
0094
0095 adapter->shadow_vfta[offset] = value;
0096 }
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107 void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
0108 {
0109 u32 i;
0110 u8 mac_addr[ETH_ALEN] = {0};
0111
0112
0113 hw_dbg("Programming MAC Address into RAR[0]\n");
0114
0115 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
0116
0117
0118 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
0119 for (i = 1; i < rar_count; i++)
0120 hw->mac.ops.rar_set(hw, mac_addr, i);
0121 }
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass)
0133 {
0134 s32 regindex, first_empty_slot;
0135 u32 bits;
0136
0137
0138 if (vlan == 0)
0139 return 0;
0140
0141
0142
0143
0144
0145 first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0;
0146
0147
0148
0149
0150
0151
0152 for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) {
0153 bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK;
0154 if (bits == vlan)
0155 return regindex;
0156 if (!first_empty_slot && !bits)
0157 first_empty_slot = regindex;
0158 }
0159
0160 return first_empty_slot ? : -E1000_ERR_NO_SPACE;
0161 }
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174 s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
0175 bool vlan_on, bool vlvf_bypass)
0176 {
0177 struct igb_adapter *adapter = hw->back;
0178 u32 regidx, vfta_delta, vfta, bits;
0179 s32 vlvf_index;
0180
0181 if ((vlan > 4095) || (vind > 7))
0182 return -E1000_ERR_PARAM;
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195 regidx = vlan / 32;
0196 vfta_delta = BIT(vlan % 32);
0197 vfta = adapter->shadow_vfta[regidx];
0198
0199
0200
0201
0202
0203 vfta_delta &= vlan_on ? ~vfta : vfta;
0204 vfta ^= vfta_delta;
0205
0206
0207
0208
0209
0210
0211
0212
0213
0214 if (!adapter->vfs_allocated_count)
0215 goto vfta_update;
0216
0217 vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass);
0218 if (vlvf_index < 0) {
0219 if (vlvf_bypass)
0220 goto vfta_update;
0221 return vlvf_index;
0222 }
0223
0224 bits = rd32(E1000_VLVF(vlvf_index));
0225
0226
0227 bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
0228 if (vlan_on)
0229 goto vlvf_update;
0230
0231
0232 bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
0233
0234 if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
0235
0236
0237
0238
0239 if (vfta_delta)
0240 hw->mac.ops.write_vfta(hw, regidx, vfta);
0241
0242
0243 wr32(E1000_VLVF(vlvf_index), 0);
0244
0245 return 0;
0246 }
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262 vfta_delta = 0;
0263
0264 vlvf_update:
0265
0266 wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE);
0267
0268 vfta_update:
0269
0270 if (vfta_delta)
0271 hw->mac.ops.write_vfta(hw, regidx, vfta);
0272
0273 return 0;
0274 }
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287 s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
0288 {
0289 u32 i;
0290 s32 ret_val = 0;
0291 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
0292 u8 alt_mac_addr[ETH_ALEN];
0293
0294
0295
0296
0297 if (hw->mac.type >= e1000_82580)
0298 goto out;
0299
0300 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
0301 &nvm_alt_mac_addr_offset);
0302 if (ret_val) {
0303 hw_dbg("NVM Read Error\n");
0304 goto out;
0305 }
0306
0307 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
0308 (nvm_alt_mac_addr_offset == 0x0000))
0309
0310 goto out;
0311
0312 if (hw->bus.func == E1000_FUNC_1)
0313 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
0314 if (hw->bus.func == E1000_FUNC_2)
0315 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
0316
0317 if (hw->bus.func == E1000_FUNC_3)
0318 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
0319 for (i = 0; i < ETH_ALEN; i += 2) {
0320 offset = nvm_alt_mac_addr_offset + (i >> 1);
0321 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
0322 if (ret_val) {
0323 hw_dbg("NVM Read Error\n");
0324 goto out;
0325 }
0326
0327 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
0328 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
0329 }
0330
0331
0332 if (is_multicast_ether_addr(alt_mac_addr)) {
0333 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
0334 goto out;
0335 }
0336
0337
0338
0339
0340
0341 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
0342
0343 out:
0344 return ret_val;
0345 }
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
0357 {
0358 u32 rar_low, rar_high;
0359
0360
0361
0362
0363 rar_low = ((u32) addr[0] |
0364 ((u32) addr[1] << 8) |
0365 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
0366
0367 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
0368
0369
0370 if (rar_low || rar_high)
0371 rar_high |= E1000_RAH_AV;
0372
0373
0374
0375
0376
0377 wr32(E1000_RAL(index), rar_low);
0378 wrfl();
0379 wr32(E1000_RAH(index), rar_high);
0380 wrfl();
0381 }
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393 void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
0394 {
0395 u32 hash_bit, hash_reg, mta;
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
0407 hash_bit = hash_value & 0x1F;
0408
0409 mta = array_rd32(E1000_MTA, hash_reg);
0410
0411 mta |= BIT(hash_bit);
0412
0413 array_wr32(E1000_MTA, hash_reg, mta);
0414 wrfl();
0415 }
0416
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
0427 {
0428 u32 hash_value, hash_mask;
0429 u8 bit_shift = 0;
0430
0431
0432 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
0433
0434
0435
0436
0437 while (hash_mask >> bit_shift != 0xFF)
0438 bit_shift++;
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464
0465 switch (hw->mac.mc_filter_type) {
0466 default:
0467 case 0:
0468 break;
0469 case 1:
0470 bit_shift += 1;
0471 break;
0472 case 2:
0473 bit_shift += 2;
0474 break;
0475 case 3:
0476 bit_shift += 4;
0477 break;
0478 }
0479
0480 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
0481 (((u16) mc_addr[5]) << bit_shift)));
0482
0483 return hash_value;
0484 }
0485
0486
0487
0488
0489
0490
0491
0492
0493 static void igb_i21x_hw_doublecheck(struct e1000_hw *hw)
0494 {
0495 int failed_cnt = 3;
0496 bool is_failed;
0497 int i;
0498
0499 do {
0500 is_failed = false;
0501 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) {
0502 if (array_rd32(E1000_MTA, i) != hw->mac.mta_shadow[i]) {
0503 is_failed = true;
0504 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
0505 wrfl();
0506 }
0507 }
0508 if (is_failed && --failed_cnt <= 0) {
0509 hw_dbg("Failed to update MTA_REGISTER, too many retries");
0510 break;
0511 }
0512 } while (is_failed);
0513 }
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524 void igb_update_mc_addr_list(struct e1000_hw *hw,
0525 u8 *mc_addr_list, u32 mc_addr_count)
0526 {
0527 u32 hash_value, hash_bit, hash_reg;
0528 int i;
0529
0530
0531 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
0532
0533
0534 for (i = 0; (u32) i < mc_addr_count; i++) {
0535 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
0536
0537 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
0538 hash_bit = hash_value & 0x1F;
0539
0540 hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
0541 mc_addr_list += (ETH_ALEN);
0542 }
0543
0544
0545 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
0546 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
0547 wrfl();
0548 if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211)
0549 igb_i21x_hw_doublecheck(hw);
0550 }
0551
0552
0553
0554
0555
0556
0557
0558 void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
0559 {
0560 rd32(E1000_CRCERRS);
0561 rd32(E1000_SYMERRS);
0562 rd32(E1000_MPC);
0563 rd32(E1000_SCC);
0564 rd32(E1000_ECOL);
0565 rd32(E1000_MCC);
0566 rd32(E1000_LATECOL);
0567 rd32(E1000_COLC);
0568 rd32(E1000_DC);
0569 rd32(E1000_SEC);
0570 rd32(E1000_RLEC);
0571 rd32(E1000_XONRXC);
0572 rd32(E1000_XONTXC);
0573 rd32(E1000_XOFFRXC);
0574 rd32(E1000_XOFFTXC);
0575 rd32(E1000_FCRUC);
0576 rd32(E1000_GPRC);
0577 rd32(E1000_BPRC);
0578 rd32(E1000_MPRC);
0579 rd32(E1000_GPTC);
0580 rd32(E1000_GORCL);
0581 rd32(E1000_GORCH);
0582 rd32(E1000_GOTCL);
0583 rd32(E1000_GOTCH);
0584 rd32(E1000_RNBC);
0585 rd32(E1000_RUC);
0586 rd32(E1000_RFC);
0587 rd32(E1000_ROC);
0588 rd32(E1000_RJC);
0589 rd32(E1000_TORL);
0590 rd32(E1000_TORH);
0591 rd32(E1000_TOTL);
0592 rd32(E1000_TOTH);
0593 rd32(E1000_TPR);
0594 rd32(E1000_TPT);
0595 rd32(E1000_MPTC);
0596 rd32(E1000_BPTC);
0597 }
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607 s32 igb_check_for_copper_link(struct e1000_hw *hw)
0608 {
0609 struct e1000_mac_info *mac = &hw->mac;
0610 s32 ret_val;
0611 bool link;
0612
0613
0614
0615
0616
0617
0618 if (!mac->get_link_status) {
0619 ret_val = 0;
0620 goto out;
0621 }
0622
0623
0624
0625
0626
0627 ret_val = igb_phy_has_link(hw, 1, 0, &link);
0628 if (ret_val)
0629 goto out;
0630
0631 if (!link)
0632 goto out;
0633
0634 mac->get_link_status = false;
0635
0636
0637
0638
0639 igb_check_downshift(hw);
0640
0641
0642
0643
0644 if (!mac->autoneg) {
0645 ret_val = -E1000_ERR_CONFIG;
0646 goto out;
0647 }
0648
0649
0650
0651
0652
0653 igb_config_collision_dist(hw);
0654
0655
0656
0657
0658
0659
0660 ret_val = igb_config_fc_after_link_up(hw);
0661 if (ret_val)
0662 hw_dbg("Error configuring flow control\n");
0663
0664 out:
0665 return ret_val;
0666 }
0667
0668
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678 s32 igb_setup_link(struct e1000_hw *hw)
0679 {
0680 s32 ret_val = 0;
0681
0682
0683
0684
0685 if (igb_check_reset_block(hw))
0686 goto out;
0687
0688
0689
0690
0691 if (hw->fc.requested_mode == e1000_fc_default) {
0692 ret_val = igb_set_default_fc(hw);
0693 if (ret_val)
0694 goto out;
0695 }
0696
0697
0698
0699
0700
0701 hw->fc.current_mode = hw->fc.requested_mode;
0702
0703 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
0704
0705
0706 ret_val = hw->mac.ops.setup_physical_interface(hw);
0707 if (ret_val)
0708 goto out;
0709
0710
0711
0712
0713
0714
0715 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
0716 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
0717 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
0718 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
0719
0720 wr32(E1000_FCTTV, hw->fc.pause_time);
0721
0722 igb_set_fc_watermarks(hw);
0723
0724 out:
0725
0726 return ret_val;
0727 }
0728
0729
0730
0731
0732
0733
0734
0735
0736
0737 void igb_config_collision_dist(struct e1000_hw *hw)
0738 {
0739 u32 tctl;
0740
0741 tctl = rd32(E1000_TCTL);
0742
0743 tctl &= ~E1000_TCTL_COLD;
0744 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
0745
0746 wr32(E1000_TCTL, tctl);
0747 wrfl();
0748 }
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758 static void igb_set_fc_watermarks(struct e1000_hw *hw)
0759 {
0760 u32 fcrtl = 0, fcrth = 0;
0761
0762
0763
0764
0765
0766
0767
0768 if (hw->fc.current_mode & e1000_fc_tx_pause) {
0769
0770
0771
0772
0773 fcrtl = hw->fc.low_water;
0774 if (hw->fc.send_xon)
0775 fcrtl |= E1000_FCRTL_XONE;
0776
0777 fcrth = hw->fc.high_water;
0778 }
0779 wr32(E1000_FCRTL, fcrtl);
0780 wr32(E1000_FCRTH, fcrth);
0781 }
0782
0783
0784
0785
0786
0787
0788
0789
0790 static s32 igb_set_default_fc(struct e1000_hw *hw)
0791 {
0792 s32 ret_val = 0;
0793 u16 lan_offset;
0794 u16 nvm_data;
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804 if (hw->mac.type == e1000_i350)
0805 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
0806 else
0807 lan_offset = 0;
0808
0809 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset,
0810 1, &nvm_data);
0811 if (ret_val) {
0812 hw_dbg("NVM Read Error\n");
0813 goto out;
0814 }
0815
0816 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
0817 hw->fc.requested_mode = e1000_fc_none;
0818 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
0819 hw->fc.requested_mode = e1000_fc_tx_pause;
0820 else
0821 hw->fc.requested_mode = e1000_fc_full;
0822
0823 out:
0824 return ret_val;
0825 }
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837 s32 igb_force_mac_fc(struct e1000_hw *hw)
0838 {
0839 u32 ctrl;
0840 s32 ret_val = 0;
0841
0842 ctrl = rd32(E1000_CTRL);
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858
0859
0860
0861 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
0862
0863 switch (hw->fc.current_mode) {
0864 case e1000_fc_none:
0865 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
0866 break;
0867 case e1000_fc_rx_pause:
0868 ctrl &= (~E1000_CTRL_TFCE);
0869 ctrl |= E1000_CTRL_RFCE;
0870 break;
0871 case e1000_fc_tx_pause:
0872 ctrl &= (~E1000_CTRL_RFCE);
0873 ctrl |= E1000_CTRL_TFCE;
0874 break;
0875 case e1000_fc_full:
0876 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
0877 break;
0878 default:
0879 hw_dbg("Flow control param set incorrectly\n");
0880 ret_val = -E1000_ERR_CONFIG;
0881 goto out;
0882 }
0883
0884 wr32(E1000_CTRL, ctrl);
0885
0886 out:
0887 return ret_val;
0888 }
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899
0900 s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
0901 {
0902 struct e1000_mac_info *mac = &hw->mac;
0903 s32 ret_val = 0;
0904 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
0905 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
0906 u16 speed, duplex;
0907
0908
0909
0910
0911
0912 if (mac->autoneg_failed) {
0913 if (hw->phy.media_type == e1000_media_type_internal_serdes)
0914 ret_val = igb_force_mac_fc(hw);
0915 } else {
0916 if (hw->phy.media_type == e1000_media_type_copper)
0917 ret_val = igb_force_mac_fc(hw);
0918 }
0919
0920 if (ret_val) {
0921 hw_dbg("Error forcing flow control settings\n");
0922 goto out;
0923 }
0924
0925
0926
0927
0928
0929
0930 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
0931
0932
0933
0934
0935 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
0936 &mii_status_reg);
0937 if (ret_val)
0938 goto out;
0939 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
0940 &mii_status_reg);
0941 if (ret_val)
0942 goto out;
0943
0944 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
0945 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
0946 goto out;
0947 }
0948
0949
0950
0951
0952
0953
0954
0955 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
0956 &mii_nway_adv_reg);
0957 if (ret_val)
0958 goto out;
0959 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
0960 &mii_nway_lp_ability_reg);
0961 if (ret_val)
0962 goto out;
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991
0992
0993
0994
0995
0996
0997 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
0998 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
0999
1000
1001
1002
1003
1004
1005 if (hw->fc.requested_mode == e1000_fc_full) {
1006 hw->fc.current_mode = e1000_fc_full;
1007 hw_dbg("Flow Control = FULL.\n");
1008 } else {
1009 hw->fc.current_mode = e1000_fc_rx_pause;
1010 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1011 }
1012 }
1013
1014
1015
1016
1017
1018
1019
1020 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1021 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1022 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1023 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1024 hw->fc.current_mode = e1000_fc_tx_pause;
1025 hw_dbg("Flow Control = TX PAUSE frames only.\n");
1026 }
1027
1028
1029
1030
1031
1032
1033
1034 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1035 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1036 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1037 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1038 hw->fc.current_mode = e1000_fc_rx_pause;
1039 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1040 }
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061 else if ((hw->fc.requested_mode == e1000_fc_none) ||
1062 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
1063 (hw->fc.strict_ieee)) {
1064 hw->fc.current_mode = e1000_fc_none;
1065 hw_dbg("Flow Control = NONE.\n");
1066 } else {
1067 hw->fc.current_mode = e1000_fc_rx_pause;
1068 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1069 }
1070
1071
1072
1073
1074
1075 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
1076 if (ret_val) {
1077 hw_dbg("Error getting link speed and duplex\n");
1078 goto out;
1079 }
1080
1081 if (duplex == HALF_DUPLEX)
1082 hw->fc.current_mode = e1000_fc_none;
1083
1084
1085
1086
1087 ret_val = igb_force_mac_fc(hw);
1088 if (ret_val) {
1089 hw_dbg("Error forcing flow control settings\n");
1090 goto out;
1091 }
1092 }
1093
1094
1095
1096
1097
1098 if ((hw->phy.media_type == e1000_media_type_internal_serdes)
1099 && mac->autoneg) {
1100
1101
1102
1103 pcs_status_reg = rd32(E1000_PCS_LSTAT);
1104
1105 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1106 hw_dbg("PCS Auto Neg has not completed.\n");
1107 return ret_val;
1108 }
1109
1110
1111
1112
1113
1114
1115
1116 pcs_adv_reg = rd32(E1000_PCS_ANADV);
1117 pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1153 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1154
1155
1156
1157
1158
1159
1160 if (hw->fc.requested_mode == e1000_fc_full) {
1161 hw->fc.current_mode = e1000_fc_full;
1162 hw_dbg("Flow Control = FULL.\n");
1163 } else {
1164 hw->fc.current_mode = e1000_fc_rx_pause;
1165 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1166 }
1167 }
1168
1169
1170
1171
1172
1173
1174
1175 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1176 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1177 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1178 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1179 hw->fc.current_mode = e1000_fc_tx_pause;
1180 hw_dbg("Flow Control = Tx PAUSE frames only.\n");
1181 }
1182
1183
1184
1185
1186
1187
1188
1189 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1190 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1191 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1192 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1193 hw->fc.current_mode = e1000_fc_rx_pause;
1194 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1195 } else {
1196
1197
1198
1199 hw->fc.current_mode = e1000_fc_none;
1200 hw_dbg("Flow Control = NONE.\n");
1201 }
1202
1203
1204
1205
1206 pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
1207 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1208 wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
1209
1210 ret_val = igb_force_mac_fc(hw);
1211 if (ret_val) {
1212 hw_dbg("Error forcing flow control settings\n");
1213 return ret_val;
1214 }
1215 }
1216
1217 out:
1218 return ret_val;
1219 }
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230 s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1231 u16 *duplex)
1232 {
1233 u32 status;
1234
1235 status = rd32(E1000_STATUS);
1236 if (status & E1000_STATUS_SPEED_1000) {
1237 *speed = SPEED_1000;
1238 hw_dbg("1000 Mbs, ");
1239 } else if (status & E1000_STATUS_SPEED_100) {
1240 *speed = SPEED_100;
1241 hw_dbg("100 Mbs, ");
1242 } else {
1243 *speed = SPEED_10;
1244 hw_dbg("10 Mbs, ");
1245 }
1246
1247 if (status & E1000_STATUS_FD) {
1248 *duplex = FULL_DUPLEX;
1249 hw_dbg("Full Duplex\n");
1250 } else {
1251 *duplex = HALF_DUPLEX;
1252 hw_dbg("Half Duplex\n");
1253 }
1254
1255 return 0;
1256 }
1257
1258
1259
1260
1261
1262
1263
1264 s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1265 {
1266 u32 swsm;
1267 s32 ret_val = 0;
1268 s32 timeout = hw->nvm.word_size + 1;
1269 s32 i = 0;
1270
1271
1272 while (i < timeout) {
1273 swsm = rd32(E1000_SWSM);
1274 if (!(swsm & E1000_SWSM_SMBI))
1275 break;
1276
1277 udelay(50);
1278 i++;
1279 }
1280
1281 if (i == timeout) {
1282 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1283 ret_val = -E1000_ERR_NVM;
1284 goto out;
1285 }
1286
1287
1288 for (i = 0; i < timeout; i++) {
1289 swsm = rd32(E1000_SWSM);
1290 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1291
1292
1293 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1294 break;
1295
1296 udelay(50);
1297 }
1298
1299 if (i == timeout) {
1300
1301 igb_put_hw_semaphore(hw);
1302 hw_dbg("Driver can't access the NVM\n");
1303 ret_val = -E1000_ERR_NVM;
1304 goto out;
1305 }
1306
1307 out:
1308 return ret_val;
1309 }
1310
1311
1312
1313
1314
1315
1316
1317 void igb_put_hw_semaphore(struct e1000_hw *hw)
1318 {
1319 u32 swsm;
1320
1321 swsm = rd32(E1000_SWSM);
1322
1323 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1324
1325 wr32(E1000_SWSM, swsm);
1326 }
1327
1328
1329
1330
1331
1332
1333
1334 s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1335 {
1336 s32 i = 0;
1337 s32 ret_val = 0;
1338
1339
1340 while (i < AUTO_READ_DONE_TIMEOUT) {
1341 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1342 break;
1343 usleep_range(1000, 2000);
1344 i++;
1345 }
1346
1347 if (i == AUTO_READ_DONE_TIMEOUT) {
1348 hw_dbg("Auto read by HW from NVM has not completed.\n");
1349 ret_val = -E1000_ERR_RESET;
1350 goto out;
1351 }
1352
1353 out:
1354 return ret_val;
1355 }
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365 static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1366 {
1367 s32 ret_val;
1368
1369 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1370 if (ret_val) {
1371 hw_dbg("NVM Read Error\n");
1372 goto out;
1373 }
1374
1375 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1376 switch (hw->phy.media_type) {
1377 case e1000_media_type_internal_serdes:
1378 *data = ID_LED_DEFAULT_82575_SERDES;
1379 break;
1380 case e1000_media_type_copper:
1381 default:
1382 *data = ID_LED_DEFAULT;
1383 break;
1384 }
1385 }
1386 out:
1387 return ret_val;
1388 }
1389
1390
1391
1392
1393
1394
1395 s32 igb_id_led_init(struct e1000_hw *hw)
1396 {
1397 struct e1000_mac_info *mac = &hw->mac;
1398 s32 ret_val;
1399 const u32 ledctl_mask = 0x000000FF;
1400 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1401 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1402 u16 data, i, temp;
1403 const u16 led_mask = 0x0F;
1404
1405
1406 if ((hw->mac.type == e1000_i210) ||
1407 (hw->mac.type == e1000_i211))
1408 ret_val = igb_valid_led_default_i210(hw, &data);
1409 else
1410 ret_val = igb_valid_led_default(hw, &data);
1411
1412 if (ret_val)
1413 goto out;
1414
1415 mac->ledctl_default = rd32(E1000_LEDCTL);
1416 mac->ledctl_mode1 = mac->ledctl_default;
1417 mac->ledctl_mode2 = mac->ledctl_default;
1418
1419 for (i = 0; i < 4; i++) {
1420 temp = (data >> (i << 2)) & led_mask;
1421 switch (temp) {
1422 case ID_LED_ON1_DEF2:
1423 case ID_LED_ON1_ON2:
1424 case ID_LED_ON1_OFF2:
1425 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1426 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1427 break;
1428 case ID_LED_OFF1_DEF2:
1429 case ID_LED_OFF1_ON2:
1430 case ID_LED_OFF1_OFF2:
1431 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1432 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1433 break;
1434 default:
1435
1436 break;
1437 }
1438 switch (temp) {
1439 case ID_LED_DEF1_ON2:
1440 case ID_LED_ON1_ON2:
1441 case ID_LED_OFF1_ON2:
1442 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1443 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1444 break;
1445 case ID_LED_DEF1_OFF2:
1446 case ID_LED_ON1_OFF2:
1447 case ID_LED_OFF1_OFF2:
1448 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1449 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1450 break;
1451 default:
1452
1453 break;
1454 }
1455 }
1456
1457 out:
1458 return ret_val;
1459 }
1460
1461
1462
1463
1464
1465
1466
1467
1468 s32 igb_cleanup_led(struct e1000_hw *hw)
1469 {
1470 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1471 return 0;
1472 }
1473
1474
1475
1476
1477
1478
1479
1480 s32 igb_blink_led(struct e1000_hw *hw)
1481 {
1482 u32 ledctl_blink = 0;
1483 u32 i;
1484
1485 if (hw->phy.media_type == e1000_media_type_fiber) {
1486
1487 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1488 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1489 } else {
1490
1491
1492
1493
1494
1495
1496 ledctl_blink = hw->mac.ledctl_mode2;
1497 for (i = 0; i < 32; i += 8) {
1498 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1499 E1000_LEDCTL_LED0_MODE_MASK;
1500 u32 led_default = hw->mac.ledctl_default >> i;
1501
1502 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1503 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1504 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1505 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1506 ledctl_blink &=
1507 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1508 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1509 E1000_LEDCTL_MODE_LED_ON) << i;
1510 }
1511 }
1512 }
1513
1514 wr32(E1000_LEDCTL, ledctl_blink);
1515
1516 return 0;
1517 }
1518
1519
1520
1521
1522
1523
1524
1525 s32 igb_led_off(struct e1000_hw *hw)
1526 {
1527 switch (hw->phy.media_type) {
1528 case e1000_media_type_copper:
1529 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1530 break;
1531 default:
1532 break;
1533 }
1534
1535 return 0;
1536 }
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549 s32 igb_disable_pcie_master(struct e1000_hw *hw)
1550 {
1551 u32 ctrl;
1552 s32 timeout = MASTER_DISABLE_TIMEOUT;
1553 s32 ret_val = 0;
1554
1555 if (hw->bus.type != e1000_bus_type_pci_express)
1556 goto out;
1557
1558 ctrl = rd32(E1000_CTRL);
1559 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1560 wr32(E1000_CTRL, ctrl);
1561
1562 while (timeout) {
1563 if (!(rd32(E1000_STATUS) &
1564 E1000_STATUS_GIO_MASTER_ENABLE))
1565 break;
1566 udelay(100);
1567 timeout--;
1568 }
1569
1570 if (!timeout) {
1571 hw_dbg("Master requests are pending.\n");
1572 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1573 goto out;
1574 }
1575
1576 out:
1577 return ret_val;
1578 }
1579
1580
1581
1582
1583
1584
1585
1586
1587 s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1588 {
1589 s32 ret_val = 0;
1590
1591
1592 if (hw->mac.type >= e1000_82580)
1593 goto out;
1594
1595 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1596 hw_dbg("Invalid MDI setting detected\n");
1597 hw->phy.mdix = 1;
1598 ret_val = -E1000_ERR_CONFIG;
1599 goto out;
1600 }
1601
1602 out:
1603 return ret_val;
1604 }
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617 s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1618 u32 offset, u8 data)
1619 {
1620 u32 i, regvalue = 0;
1621 s32 ret_val = 0;
1622
1623
1624 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1625 wr32(reg, regvalue);
1626
1627
1628 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1629 udelay(5);
1630 regvalue = rd32(reg);
1631 if (regvalue & E1000_GEN_CTL_READY)
1632 break;
1633 }
1634 if (!(regvalue & E1000_GEN_CTL_READY)) {
1635 hw_dbg("Reg %08x did not indicate ready\n", reg);
1636 ret_val = -E1000_ERR_PHY;
1637 goto out;
1638 }
1639
1640 out:
1641 return ret_val;
1642 }
1643
1644
1645
1646
1647
1648
1649
1650
1651 bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1652 {
1653 u32 manc;
1654 u32 fwsm, factps;
1655 bool ret_val = false;
1656
1657 if (!hw->mac.asf_firmware_present)
1658 goto out;
1659
1660 manc = rd32(E1000_MANC);
1661
1662 if (!(manc & E1000_MANC_RCV_TCO_EN))
1663 goto out;
1664
1665 if (hw->mac.arc_subsystem_valid) {
1666 fwsm = rd32(E1000_FWSM);
1667 factps = rd32(E1000_FACTPS);
1668
1669 if (!(factps & E1000_FACTPS_MNGCG) &&
1670 ((fwsm & E1000_FWSM_MODE_MASK) ==
1671 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1672 ret_val = true;
1673 goto out;
1674 }
1675 } else {
1676 if ((manc & E1000_MANC_SMBUS_EN) &&
1677 !(manc & E1000_MANC_ASF_EN)) {
1678 ret_val = true;
1679 goto out;
1680 }
1681 }
1682
1683 out:
1684 return ret_val;
1685 }