0001
0002
0003
0004 #include <linux/pci.h>
0005 #include <linux/delay.h>
0006
0007 #include "igc_mac.h"
0008 #include "igc_hw.h"
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021 s32 igc_disable_pcie_master(struct igc_hw *hw)
0022 {
0023 s32 timeout = MASTER_DISABLE_TIMEOUT;
0024 s32 ret_val = 0;
0025 u32 ctrl;
0026
0027 ctrl = rd32(IGC_CTRL);
0028 ctrl |= IGC_CTRL_GIO_MASTER_DISABLE;
0029 wr32(IGC_CTRL, ctrl);
0030
0031 while (timeout) {
0032 if (!(rd32(IGC_STATUS) &
0033 IGC_STATUS_GIO_MASTER_ENABLE))
0034 break;
0035 usleep_range(2000, 3000);
0036 timeout--;
0037 }
0038
0039 if (!timeout) {
0040 hw_dbg("Master requests are pending.\n");
0041 ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING;
0042 goto out;
0043 }
0044
0045 out:
0046 return ret_val;
0047 }
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058 void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count)
0059 {
0060 u8 mac_addr[ETH_ALEN] = {0};
0061 u32 i;
0062
0063
0064 hw_dbg("Programming MAC Address into RAR[0]\n");
0065
0066 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
0067
0068
0069 hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
0070 for (i = 1; i < rar_count; i++)
0071 hw->mac.ops.rar_set(hw, mac_addr, i);
0072 }
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082 static s32 igc_set_fc_watermarks(struct igc_hw *hw)
0083 {
0084 u32 fcrtl = 0, fcrth = 0;
0085
0086
0087
0088
0089
0090
0091
0092 if (hw->fc.current_mode & igc_fc_tx_pause) {
0093
0094
0095
0096
0097 fcrtl = hw->fc.low_water;
0098 if (hw->fc.send_xon)
0099 fcrtl |= IGC_FCRTL_XONE;
0100
0101 fcrth = hw->fc.high_water;
0102 }
0103 wr32(IGC_FCRTL, fcrtl);
0104 wr32(IGC_FCRTH, fcrth);
0105
0106 return 0;
0107 }
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119 s32 igc_setup_link(struct igc_hw *hw)
0120 {
0121 s32 ret_val = 0;
0122
0123
0124
0125
0126 if (igc_check_reset_block(hw))
0127 goto out;
0128
0129
0130
0131
0132 if (hw->fc.requested_mode == igc_fc_default)
0133 hw->fc.requested_mode = igc_fc_full;
0134
0135
0136
0137
0138
0139 hw->fc.current_mode = hw->fc.requested_mode;
0140
0141 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
0142
0143
0144 ret_val = hw->mac.ops.setup_physical_interface(hw);
0145 if (ret_val)
0146 goto out;
0147
0148
0149
0150
0151
0152
0153 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
0154 wr32(IGC_FCT, FLOW_CONTROL_TYPE);
0155 wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
0156 wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW);
0157
0158 wr32(IGC_FCTTV, hw->fc.pause_time);
0159
0160 ret_val = igc_set_fc_watermarks(hw);
0161
0162 out:
0163 return ret_val;
0164 }
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176 s32 igc_force_mac_fc(struct igc_hw *hw)
0177 {
0178 s32 ret_val = 0;
0179 u32 ctrl;
0180
0181 ctrl = rd32(IGC_CTRL);
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
0201
0202 switch (hw->fc.current_mode) {
0203 case igc_fc_none:
0204 ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE));
0205 break;
0206 case igc_fc_rx_pause:
0207 ctrl &= (~IGC_CTRL_TFCE);
0208 ctrl |= IGC_CTRL_RFCE;
0209 break;
0210 case igc_fc_tx_pause:
0211 ctrl &= (~IGC_CTRL_RFCE);
0212 ctrl |= IGC_CTRL_TFCE;
0213 break;
0214 case igc_fc_full:
0215 ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE);
0216 break;
0217 default:
0218 hw_dbg("Flow control param set incorrectly\n");
0219 ret_val = -IGC_ERR_CONFIG;
0220 goto out;
0221 }
0222
0223 wr32(IGC_CTRL, ctrl);
0224
0225 out:
0226 return ret_val;
0227 }
0228
0229
0230
0231
0232
0233
0234
0235 void igc_clear_hw_cntrs_base(struct igc_hw *hw)
0236 {
0237 rd32(IGC_CRCERRS);
0238 rd32(IGC_MPC);
0239 rd32(IGC_SCC);
0240 rd32(IGC_ECOL);
0241 rd32(IGC_MCC);
0242 rd32(IGC_LATECOL);
0243 rd32(IGC_COLC);
0244 rd32(IGC_RERC);
0245 rd32(IGC_DC);
0246 rd32(IGC_RLEC);
0247 rd32(IGC_XONRXC);
0248 rd32(IGC_XONTXC);
0249 rd32(IGC_XOFFRXC);
0250 rd32(IGC_XOFFTXC);
0251 rd32(IGC_FCRUC);
0252 rd32(IGC_GPRC);
0253 rd32(IGC_BPRC);
0254 rd32(IGC_MPRC);
0255 rd32(IGC_GPTC);
0256 rd32(IGC_GORCL);
0257 rd32(IGC_GORCH);
0258 rd32(IGC_GOTCL);
0259 rd32(IGC_GOTCH);
0260 rd32(IGC_RNBC);
0261 rd32(IGC_RUC);
0262 rd32(IGC_RFC);
0263 rd32(IGC_ROC);
0264 rd32(IGC_RJC);
0265 rd32(IGC_TORL);
0266 rd32(IGC_TORH);
0267 rd32(IGC_TOTL);
0268 rd32(IGC_TOTH);
0269 rd32(IGC_TPR);
0270 rd32(IGC_TPT);
0271 rd32(IGC_MPTC);
0272 rd32(IGC_BPTC);
0273
0274 rd32(IGC_PRC64);
0275 rd32(IGC_PRC127);
0276 rd32(IGC_PRC255);
0277 rd32(IGC_PRC511);
0278 rd32(IGC_PRC1023);
0279 rd32(IGC_PRC1522);
0280 rd32(IGC_PTC64);
0281 rd32(IGC_PTC127);
0282 rd32(IGC_PTC255);
0283 rd32(IGC_PTC511);
0284 rd32(IGC_PTC1023);
0285 rd32(IGC_PTC1522);
0286
0287 rd32(IGC_ALGNERRC);
0288 rd32(IGC_RXERRC);
0289 rd32(IGC_TNCRS);
0290 rd32(IGC_HTDPMC);
0291 rd32(IGC_TSCTC);
0292
0293 rd32(IGC_MGTPRC);
0294 rd32(IGC_MGTPDC);
0295 rd32(IGC_MGTPTC);
0296
0297 rd32(IGC_IAC);
0298
0299 rd32(IGC_RPTHC);
0300 rd32(IGC_TLPIC);
0301 rd32(IGC_RLPIC);
0302 rd32(IGC_HGPTC);
0303 rd32(IGC_RXDMTC);
0304 rd32(IGC_HGORCL);
0305 rd32(IGC_HGORCH);
0306 rd32(IGC_HGOTCL);
0307 rd32(IGC_HGOTCH);
0308 rd32(IGC_LENERRS);
0309 }
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320 void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index)
0321 {
0322 u32 rar_low, rar_high;
0323
0324
0325
0326
0327 rar_low = ((u32)addr[0] |
0328 ((u32)addr[1] << 8) |
0329 ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
0330
0331 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
0332
0333
0334 if (rar_low || rar_high)
0335 rar_high |= IGC_RAH_AV;
0336
0337
0338
0339
0340
0341 wr32(IGC_RAL(index), rar_low);
0342 wrfl();
0343 wr32(IGC_RAH(index), rar_high);
0344 wrfl();
0345 }
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355 s32 igc_check_for_copper_link(struct igc_hw *hw)
0356 {
0357 struct igc_mac_info *mac = &hw->mac;
0358 bool link = false;
0359 s32 ret_val;
0360
0361
0362
0363
0364
0365
0366 if (!mac->get_link_status) {
0367 ret_val = 0;
0368 goto out;
0369 }
0370
0371
0372
0373
0374
0375 ret_val = igc_phy_has_link(hw, 1, 0, &link);
0376 if (ret_val)
0377 goto out;
0378
0379 if (!link)
0380 goto out;
0381
0382 mac->get_link_status = false;
0383
0384
0385
0386
0387 igc_check_downshift(hw);
0388
0389
0390
0391
0392 if (!mac->autoneg) {
0393 ret_val = -IGC_ERR_CONFIG;
0394 goto out;
0395 }
0396
0397
0398
0399
0400
0401 igc_config_collision_dist(hw);
0402
0403
0404
0405
0406
0407
0408 ret_val = igc_config_fc_after_link_up(hw);
0409 if (ret_val)
0410 hw_dbg("Error configuring flow control\n");
0411
0412 out:
0413
0414
0415
0416 ret_val = igc_set_ltr_i225(hw, link);
0417
0418 return ret_val;
0419 }
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429 void igc_config_collision_dist(struct igc_hw *hw)
0430 {
0431 u32 tctl;
0432
0433 tctl = rd32(IGC_TCTL);
0434
0435 tctl &= ~IGC_TCTL_COLD;
0436 tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT;
0437
0438 wr32(IGC_TCTL, tctl);
0439 wrfl();
0440 }
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450
0451
0452 s32 igc_config_fc_after_link_up(struct igc_hw *hw)
0453 {
0454 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
0455 struct igc_mac_info *mac = &hw->mac;
0456 u16 speed, duplex;
0457 s32 ret_val = 0;
0458
0459
0460
0461
0462
0463 if (mac->autoneg_failed)
0464 ret_val = igc_force_mac_fc(hw);
0465
0466 if (ret_val) {
0467 hw_dbg("Error forcing flow control settings\n");
0468 goto out;
0469 }
0470
0471
0472
0473
0474
0475
0476 if (mac->autoneg) {
0477
0478
0479
0480
0481 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
0482 &mii_status_reg);
0483 if (ret_val)
0484 goto out;
0485 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
0486 &mii_status_reg);
0487 if (ret_val)
0488 goto out;
0489
0490 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
0491 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
0492 goto out;
0493 }
0494
0495
0496
0497
0498
0499
0500
0501 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
0502 &mii_nway_adv_reg);
0503 if (ret_val)
0504 goto out;
0505 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
0506 &mii_nway_lp_ability_reg);
0507 if (ret_val)
0508 goto out;
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
0543 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
0544
0545
0546
0547
0548
0549
0550 if (hw->fc.requested_mode == igc_fc_full) {
0551 hw->fc.current_mode = igc_fc_full;
0552 hw_dbg("Flow Control = FULL.\n");
0553 } else {
0554 hw->fc.current_mode = igc_fc_rx_pause;
0555 hw_dbg("Flow Control = RX PAUSE frames only.\n");
0556 }
0557 }
0558
0559
0560
0561
0562
0563
0564
0565
0566 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
0567 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
0568 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
0569 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
0570 hw->fc.current_mode = igc_fc_tx_pause;
0571 hw_dbg("Flow Control = TX PAUSE frames only.\n");
0572 }
0573
0574
0575
0576
0577
0578
0579
0580 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
0581 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
0582 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
0583 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
0584 hw->fc.current_mode = igc_fc_rx_pause;
0585 hw_dbg("Flow Control = RX PAUSE frames only.\n");
0586 }
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607 else if ((hw->fc.requested_mode == igc_fc_none) ||
0608 (hw->fc.requested_mode == igc_fc_tx_pause) ||
0609 (hw->fc.strict_ieee)) {
0610 hw->fc.current_mode = igc_fc_none;
0611 hw_dbg("Flow Control = NONE.\n");
0612 } else {
0613 hw->fc.current_mode = igc_fc_rx_pause;
0614 hw_dbg("Flow Control = RX PAUSE frames only.\n");
0615 }
0616
0617
0618
0619
0620
0621 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
0622 if (ret_val) {
0623 hw_dbg("Error getting link speed and duplex\n");
0624 goto out;
0625 }
0626
0627 if (duplex == HALF_DUPLEX)
0628 hw->fc.current_mode = igc_fc_none;
0629
0630
0631
0632
0633 ret_val = igc_force_mac_fc(hw);
0634 if (ret_val) {
0635 hw_dbg("Error forcing flow control settings\n");
0636 goto out;
0637 }
0638 }
0639
0640 out:
0641 return ret_val;
0642 }
0643
0644
0645
0646
0647
0648
0649
0650 s32 igc_get_auto_rd_done(struct igc_hw *hw)
0651 {
0652 s32 ret_val = 0;
0653 s32 i = 0;
0654
0655 while (i < AUTO_READ_DONE_TIMEOUT) {
0656 if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD)
0657 break;
0658 usleep_range(1000, 2000);
0659 i++;
0660 }
0661
0662 if (i == AUTO_READ_DONE_TIMEOUT) {
0663 hw_dbg("Auto read by HW from NVM has not completed.\n");
0664 ret_val = -IGC_ERR_RESET;
0665 goto out;
0666 }
0667
0668 out:
0669 return ret_val;
0670 }
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681 s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed,
0682 u16 *duplex)
0683 {
0684 u32 status;
0685
0686 status = rd32(IGC_STATUS);
0687 if (status & IGC_STATUS_SPEED_1000) {
0688
0689
0690
0691
0692 if (hw->mac.type == igc_i225 &&
0693 (status & IGC_STATUS_SPEED_2500)) {
0694 *speed = SPEED_2500;
0695 hw_dbg("2500 Mbs, ");
0696 } else {
0697 *speed = SPEED_1000;
0698 hw_dbg("1000 Mbs, ");
0699 }
0700 } else if (status & IGC_STATUS_SPEED_100) {
0701 *speed = SPEED_100;
0702 hw_dbg("100 Mbs, ");
0703 } else {
0704 *speed = SPEED_10;
0705 hw_dbg("10 Mbs, ");
0706 }
0707
0708 if (status & IGC_STATUS_FD) {
0709 *duplex = FULL_DUPLEX;
0710 hw_dbg("Full Duplex\n");
0711 } else {
0712 *duplex = HALF_DUPLEX;
0713 hw_dbg("Half Duplex\n");
0714 }
0715
0716 return 0;
0717 }
0718
0719
0720
0721
0722
0723
0724
0725 void igc_put_hw_semaphore(struct igc_hw *hw)
0726 {
0727 u32 swsm;
0728
0729 swsm = rd32(IGC_SWSM);
0730
0731 swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI);
0732
0733 wr32(IGC_SWSM, swsm);
0734 }
0735
0736
0737
0738
0739
0740
0741
0742
0743 bool igc_enable_mng_pass_thru(struct igc_hw *hw)
0744 {
0745 bool ret_val = false;
0746 u32 fwsm, factps;
0747 u32 manc;
0748
0749 if (!hw->mac.asf_firmware_present)
0750 goto out;
0751
0752 manc = rd32(IGC_MANC);
0753
0754 if (!(manc & IGC_MANC_RCV_TCO_EN))
0755 goto out;
0756
0757 if (hw->mac.arc_subsystem_valid) {
0758 fwsm = rd32(IGC_FWSM);
0759 factps = rd32(IGC_FACTPS);
0760
0761 if (!(factps & IGC_FACTPS_MNGCG) &&
0762 ((fwsm & IGC_FWSM_MODE_MASK) ==
0763 (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) {
0764 ret_val = true;
0765 goto out;
0766 }
0767 } else {
0768 if ((manc & IGC_MANC_SMBUS_EN) &&
0769 !(manc & IGC_MANC_ASF_EN)) {
0770 ret_val = true;
0771 goto out;
0772 }
0773 }
0774
0775 out:
0776 return ret_val;
0777 }
0778
0779
0780
0781
0782
0783
0784
0785
0786
0787
0788 static u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr)
0789 {
0790 u32 hash_value, hash_mask;
0791 u8 bit_shift = 0;
0792
0793
0794 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
0795
0796
0797
0798
0799 while (hash_mask >> bit_shift != 0xFF)
0800 bit_shift++;
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827 switch (hw->mac.mc_filter_type) {
0828 default:
0829 case 0:
0830 break;
0831 case 1:
0832 bit_shift += 1;
0833 break;
0834 case 2:
0835 bit_shift += 2;
0836 break;
0837 case 3:
0838 bit_shift += 4;
0839 break;
0840 }
0841
0842 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
0843 (((u16)mc_addr[5]) << bit_shift)));
0844
0845 return hash_value;
0846 }
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857 void igc_update_mc_addr_list(struct igc_hw *hw,
0858 u8 *mc_addr_list, u32 mc_addr_count)
0859 {
0860 u32 hash_value, hash_bit, hash_reg;
0861 int i;
0862
0863
0864 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
0865
0866
0867 for (i = 0; (u32)i < mc_addr_count; i++) {
0868 hash_value = igc_hash_mc_addr(hw, mc_addr_list);
0869
0870 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
0871 hash_bit = hash_value & 0x1F;
0872
0873 hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
0874 mc_addr_list += ETH_ALEN;
0875 }
0876
0877
0878 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
0879 array_wr32(IGC_MTA, i, hw->mac.mta_shadow[i]);
0880 wrfl();
0881 }