0001
0002
0003
0004 #include <linux/pci.h>
0005 #include <linux/delay.h>
0006 #include <linux/sched.h>
0007
0008 #include "ixgbe.h"
0009 #include "ixgbe_phy.h"
0010 #include "ixgbe_mbx.h"
0011
0012 #define IXGBE_82599_MAX_TX_QUEUES 128
0013 #define IXGBE_82599_MAX_RX_QUEUES 128
0014 #define IXGBE_82599_RAR_ENTRIES 128
0015 #define IXGBE_82599_MC_TBL_SIZE 128
0016 #define IXGBE_82599_VFT_TBL_SIZE 128
0017 #define IXGBE_82599_RX_PB_SIZE 512
0018
0019 static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
0020 static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
0021 static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
0022 static void
0023 ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed);
0024 static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
0025 ixgbe_link_speed speed,
0026 bool autoneg_wait_to_complete);
0027 static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
0028 static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
0029 bool autoneg_wait_to_complete);
0030 static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
0031 ixgbe_link_speed speed,
0032 bool autoneg_wait_to_complete);
0033 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
0034 ixgbe_link_speed speed,
0035 bool autoneg_wait_to_complete);
0036 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
0037 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
0038 u8 dev_addr, u8 *data);
0039 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
0040 u8 dev_addr, u8 data);
0041 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
0042 static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
0043
0044 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
0045 {
0046 u32 fwsm, manc, factps;
0047
0048 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
0049 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
0050 return false;
0051
0052 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
0053 if (!(manc & IXGBE_MANC_RCV_TCO_EN))
0054 return false;
0055
0056 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
0057 if (factps & IXGBE_FACTPS_MNGCG)
0058 return false;
0059
0060 return true;
0061 }
0062
0063 static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
0064 {
0065 struct ixgbe_mac_info *mac = &hw->mac;
0066
0067
0068
0069
0070 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
0071 !ixgbe_mng_enabled(hw)) {
0072 mac->ops.disable_tx_laser =
0073 &ixgbe_disable_tx_laser_multispeed_fiber;
0074 mac->ops.enable_tx_laser =
0075 &ixgbe_enable_tx_laser_multispeed_fiber;
0076 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
0077 } else {
0078 mac->ops.disable_tx_laser = NULL;
0079 mac->ops.enable_tx_laser = NULL;
0080 mac->ops.flap_tx_laser = NULL;
0081 }
0082
0083 if (hw->phy.multispeed_fiber) {
0084
0085 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
0086 mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599;
0087 mac->ops.set_rate_select_speed =
0088 ixgbe_set_hard_rate_select_speed;
0089 } else {
0090 if ((mac->ops.get_media_type(hw) ==
0091 ixgbe_media_type_backplane) &&
0092 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
0093 hw->phy.smart_speed == ixgbe_smart_speed_on) &&
0094 !ixgbe_verify_lesm_fw_enabled_82599(hw))
0095 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
0096 else
0097 mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
0098 }
0099 }
0100
0101 static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
0102 {
0103 s32 ret_val;
0104 u16 list_offset, data_offset, data_value;
0105
0106 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
0107 ixgbe_init_mac_link_ops_82599(hw);
0108
0109 hw->phy.ops.reset = NULL;
0110
0111 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
0112 &data_offset);
0113 if (ret_val)
0114 return ret_val;
0115
0116
0117 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
0118 IXGBE_GSSR_MAC_CSR_SM);
0119 if (ret_val)
0120 return IXGBE_ERR_SWFW_SYNC;
0121
0122 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
0123 goto setup_sfp_err;
0124 while (data_value != 0xffff) {
0125 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
0126 IXGBE_WRITE_FLUSH(hw);
0127 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
0128 goto setup_sfp_err;
0129 }
0130
0131
0132 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
0133
0134
0135
0136
0137 usleep_range(hw->eeprom.semaphore_delay * 1000,
0138 hw->eeprom.semaphore_delay * 2000);
0139
0140
0141 ret_val = hw->mac.ops.prot_autoc_write(hw,
0142 hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
0143 false);
0144
0145 if (ret_val) {
0146 hw_dbg(hw, " sfp module setup not complete\n");
0147 return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
0148 }
0149 }
0150
0151 return 0;
0152
0153 setup_sfp_err:
0154
0155 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
0156
0157
0158
0159 usleep_range(hw->eeprom.semaphore_delay * 1000,
0160 hw->eeprom.semaphore_delay * 2000);
0161 hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
0162 return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
0163 }
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176 static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
0177 u32 *reg_val)
0178 {
0179 s32 ret_val;
0180
0181 *locked = false;
0182
0183 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
0184 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
0185 IXGBE_GSSR_MAC_CSR_SM);
0186 if (ret_val)
0187 return IXGBE_ERR_SWFW_SYNC;
0188
0189 *locked = true;
0190 }
0191
0192 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
0193 return 0;
0194 }
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206 static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
0207 {
0208 s32 ret_val = 0;
0209
0210
0211 if (ixgbe_check_reset_blocked(hw))
0212 goto out;
0213
0214
0215
0216
0217
0218 if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
0219 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
0220 IXGBE_GSSR_MAC_CSR_SM);
0221 if (ret_val)
0222 return IXGBE_ERR_SWFW_SYNC;
0223
0224 locked = true;
0225 }
0226
0227 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
0228 ret_val = ixgbe_reset_pipeline_82599(hw);
0229
0230 out:
0231
0232
0233
0234 if (locked)
0235 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
0236
0237 return ret_val;
0238 }
0239
0240 static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
0241 {
0242 struct ixgbe_mac_info *mac = &hw->mac;
0243
0244 ixgbe_init_mac_link_ops_82599(hw);
0245
0246 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
0247 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
0248 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
0249 mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
0250 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
0251 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
0252 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
0253
0254 return 0;
0255 }
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266 static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
0267 {
0268 struct ixgbe_mac_info *mac = &hw->mac;
0269 struct ixgbe_phy_info *phy = &hw->phy;
0270 s32 ret_val;
0271 u32 esdp;
0272
0273 if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
0274
0275 hw->phy.qsfp_shared_i2c_bus = true;
0276
0277
0278 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
0279 esdp |= IXGBE_ESDP_SDP0_DIR;
0280 esdp &= ~IXGBE_ESDP_SDP1_DIR;
0281 esdp &= ~IXGBE_ESDP_SDP0;
0282 esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
0283 esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
0284 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
0285 IXGBE_WRITE_FLUSH(hw);
0286
0287 phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599;
0288 phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599;
0289 }
0290
0291
0292 ret_val = phy->ops.identify(hw);
0293
0294
0295 ixgbe_init_mac_link_ops_82599(hw);
0296
0297
0298 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
0299 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
0300 mac->ops.get_link_capabilities =
0301 &ixgbe_get_copper_link_capabilities_generic;
0302 }
0303
0304
0305 switch (hw->phy.type) {
0306 case ixgbe_phy_tn:
0307 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
0308 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
0309 break;
0310 default:
0311 break;
0312 }
0313
0314 return ret_val;
0315 }
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325 static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
0326 ixgbe_link_speed *speed,
0327 bool *autoneg)
0328 {
0329 u32 autoc = 0;
0330
0331
0332 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
0333 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
0334 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
0335 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
0336 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
0337 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
0338 *speed = IXGBE_LINK_SPEED_1GB_FULL;
0339 *autoneg = true;
0340 return 0;
0341 }
0342
0343
0344
0345
0346
0347
0348 if (hw->mac.orig_link_settings_stored)
0349 autoc = hw->mac.orig_autoc;
0350 else
0351 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
0352
0353 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
0354 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
0355 *speed = IXGBE_LINK_SPEED_1GB_FULL;
0356 *autoneg = false;
0357 break;
0358
0359 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
0360 *speed = IXGBE_LINK_SPEED_10GB_FULL;
0361 *autoneg = false;
0362 break;
0363
0364 case IXGBE_AUTOC_LMS_1G_AN:
0365 *speed = IXGBE_LINK_SPEED_1GB_FULL;
0366 *autoneg = true;
0367 break;
0368
0369 case IXGBE_AUTOC_LMS_10G_SERIAL:
0370 *speed = IXGBE_LINK_SPEED_10GB_FULL;
0371 *autoneg = false;
0372 break;
0373
0374 case IXGBE_AUTOC_LMS_KX4_KX_KR:
0375 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
0376 *speed = IXGBE_LINK_SPEED_UNKNOWN;
0377 if (autoc & IXGBE_AUTOC_KR_SUPP)
0378 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
0379 if (autoc & IXGBE_AUTOC_KX4_SUPP)
0380 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
0381 if (autoc & IXGBE_AUTOC_KX_SUPP)
0382 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
0383 *autoneg = true;
0384 break;
0385
0386 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
0387 *speed = IXGBE_LINK_SPEED_100_FULL;
0388 if (autoc & IXGBE_AUTOC_KR_SUPP)
0389 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
0390 if (autoc & IXGBE_AUTOC_KX4_SUPP)
0391 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
0392 if (autoc & IXGBE_AUTOC_KX_SUPP)
0393 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
0394 *autoneg = true;
0395 break;
0396
0397 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
0398 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
0399 *autoneg = false;
0400 break;
0401
0402 default:
0403 return IXGBE_ERR_LINK_SETUP;
0404 }
0405
0406 if (hw->phy.multispeed_fiber) {
0407 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
0408 IXGBE_LINK_SPEED_1GB_FULL;
0409
0410
0411 if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
0412 *autoneg = false;
0413 else
0414 *autoneg = true;
0415 }
0416
0417 return 0;
0418 }
0419
0420
0421
0422
0423
0424
0425
0426 static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
0427 {
0428
0429 switch (hw->phy.type) {
0430 case ixgbe_phy_cu_unknown:
0431 case ixgbe_phy_tn:
0432 return ixgbe_media_type_copper;
0433
0434 default:
0435 break;
0436 }
0437
0438 switch (hw->device_id) {
0439 case IXGBE_DEV_ID_82599_KX4:
0440 case IXGBE_DEV_ID_82599_KX4_MEZZ:
0441 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
0442 case IXGBE_DEV_ID_82599_KR:
0443 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
0444 case IXGBE_DEV_ID_82599_XAUI_LOM:
0445
0446 return ixgbe_media_type_backplane;
0447
0448 case IXGBE_DEV_ID_82599_SFP:
0449 case IXGBE_DEV_ID_82599_SFP_FCOE:
0450 case IXGBE_DEV_ID_82599_SFP_EM:
0451 case IXGBE_DEV_ID_82599_SFP_SF2:
0452 case IXGBE_DEV_ID_82599_SFP_SF_QP:
0453 case IXGBE_DEV_ID_82599EN_SFP:
0454 return ixgbe_media_type_fiber;
0455
0456 case IXGBE_DEV_ID_82599_CX4:
0457 return ixgbe_media_type_cx4;
0458
0459 case IXGBE_DEV_ID_82599_T3_LOM:
0460 return ixgbe_media_type_copper;
0461
0462 case IXGBE_DEV_ID_82599_LS:
0463 return ixgbe_media_type_fiber_lco;
0464
0465 case IXGBE_DEV_ID_82599_QSFP_SF_QP:
0466 return ixgbe_media_type_fiber_qsfp;
0467
0468 default:
0469 return ixgbe_media_type_unknown;
0470 }
0471 }
0472
0473
0474
0475
0476
0477
0478
0479
0480 static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
0481 {
0482 u32 autoc2_reg;
0483 u16 ee_ctrl_2 = 0;
0484
0485 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
0486
0487 if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
0488 ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
0489 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
0490 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
0491 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
0492 }
0493 }
0494
0495
0496
0497
0498
0499
0500
0501
0502
0503 static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
0504 bool autoneg_wait_to_complete)
0505 {
0506 u32 autoc_reg;
0507 u32 links_reg;
0508 u32 i;
0509 s32 status = 0;
0510 bool got_lock = false;
0511
0512 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
0513 status = hw->mac.ops.acquire_swfw_sync(hw,
0514 IXGBE_GSSR_MAC_CSR_SM);
0515 if (status)
0516 return status;
0517
0518 got_lock = true;
0519 }
0520
0521
0522 ixgbe_reset_pipeline_82599(hw);
0523
0524 if (got_lock)
0525 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
0526
0527
0528 if (autoneg_wait_to_complete) {
0529 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
0530 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
0531 IXGBE_AUTOC_LMS_KX4_KX_KR ||
0532 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
0533 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
0534 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
0535 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
0536 links_reg = 0;
0537 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
0538 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
0539 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
0540 break;
0541 msleep(100);
0542 }
0543 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
0544 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
0545 hw_dbg(hw, "Autoneg did not complete.\n");
0546 }
0547 }
0548 }
0549
0550
0551 msleep(50);
0552
0553 return status;
0554 }
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564 static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
0565 {
0566 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
0567
0568
0569 if (ixgbe_check_reset_blocked(hw))
0570 return;
0571
0572
0573 esdp_reg |= IXGBE_ESDP_SDP3;
0574 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
0575 IXGBE_WRITE_FLUSH(hw);
0576 udelay(100);
0577 }
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587 static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
0588 {
0589 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
0590
0591
0592 esdp_reg &= ~IXGBE_ESDP_SDP3;
0593 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
0594 IXGBE_WRITE_FLUSH(hw);
0595 msleep(100);
0596 }
0597
0598
0599
0600
0601
0602
0603
0604
0605
0606
0607
0608
0609
0610 static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
0611 {
0612
0613 if (ixgbe_check_reset_blocked(hw))
0614 return;
0615
0616 if (hw->mac.autotry_restart) {
0617 ixgbe_disable_tx_laser_multispeed_fiber(hw);
0618 ixgbe_enable_tx_laser_multispeed_fiber(hw);
0619 hw->mac.autotry_restart = false;
0620 }
0621 }
0622
0623
0624
0625
0626
0627
0628
0629
0630 static void
0631 ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
0632 {
0633 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
0634
0635 switch (speed) {
0636 case IXGBE_LINK_SPEED_10GB_FULL:
0637 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
0638 break;
0639 case IXGBE_LINK_SPEED_1GB_FULL:
0640 esdp_reg &= ~IXGBE_ESDP_SDP5;
0641 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
0642 break;
0643 default:
0644 hw_dbg(hw, "Invalid fixed module speed\n");
0645 return;
0646 }
0647
0648 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
0649 IXGBE_WRITE_FLUSH(hw);
0650 }
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660 static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
0661 ixgbe_link_speed speed,
0662 bool autoneg_wait_to_complete)
0663 {
0664 s32 status = 0;
0665 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
0666 s32 i, j;
0667 bool link_up = false;
0668 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
0669
0670
0671 hw->phy.autoneg_advertised = 0;
0672
0673 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
0674 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
0675
0676 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
0677 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
0678
0679 if (speed & IXGBE_LINK_SPEED_100_FULL)
0680 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690 hw->phy.smart_speed_active = false;
0691 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
0692 status = ixgbe_setup_mac_link_82599(hw, speed,
0693 autoneg_wait_to_complete);
0694 if (status != 0)
0695 goto out;
0696
0697
0698
0699
0700
0701
0702
0703 for (i = 0; i < 5; i++) {
0704 mdelay(100);
0705
0706
0707 status = hw->mac.ops.check_link(hw, &link_speed,
0708 &link_up, false);
0709 if (status != 0)
0710 goto out;
0711
0712 if (link_up)
0713 goto out;
0714 }
0715 }
0716
0717
0718
0719
0720
0721 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
0722 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
0723 goto out;
0724
0725
0726 hw->phy.smart_speed_active = true;
0727 status = ixgbe_setup_mac_link_82599(hw, speed,
0728 autoneg_wait_to_complete);
0729 if (status != 0)
0730 goto out;
0731
0732
0733
0734
0735
0736
0737
0738 for (i = 0; i < 6; i++) {
0739 mdelay(100);
0740
0741
0742 status = hw->mac.ops.check_link(hw, &link_speed,
0743 &link_up, false);
0744 if (status != 0)
0745 goto out;
0746
0747 if (link_up)
0748 goto out;
0749 }
0750
0751
0752 hw->phy.smart_speed_active = false;
0753 status = ixgbe_setup_mac_link_82599(hw, speed,
0754 autoneg_wait_to_complete);
0755
0756 out:
0757 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
0758 hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n");
0759 return status;
0760 }
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770 static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
0771 ixgbe_link_speed speed,
0772 bool autoneg_wait_to_complete)
0773 {
0774 bool autoneg = false;
0775 s32 status;
0776 u32 pma_pmd_1g, link_mode, links_reg, i;
0777 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
0778 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
0779 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
0780
0781
0782 u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
0783
0784 u32 orig_autoc = 0;
0785
0786 u32 autoc = current_autoc;
0787
0788
0789 status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
0790 &autoneg);
0791 if (status)
0792 return status;
0793
0794 speed &= link_capabilities;
0795
0796 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
0797 return IXGBE_ERR_LINK_SETUP;
0798
0799
0800 if (hw->mac.orig_link_settings_stored)
0801 orig_autoc = hw->mac.orig_autoc;
0802 else
0803 orig_autoc = autoc;
0804
0805 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
0806 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
0807
0808 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
0809 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
0810 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
0811
0812 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
0813 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
0814 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
0815 autoc |= IXGBE_AUTOC_KX4_SUPP;
0816 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
0817 (hw->phy.smart_speed_active == false))
0818 autoc |= IXGBE_AUTOC_KR_SUPP;
0819 }
0820 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
0821 autoc |= IXGBE_AUTOC_KX_SUPP;
0822 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
0823 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
0824 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
0825
0826 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
0827 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
0828 autoc &= ~IXGBE_AUTOC_LMS_MASK;
0829 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
0830 }
0831 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
0832 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
0833
0834 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
0835 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
0836 autoc &= ~IXGBE_AUTOC_LMS_MASK;
0837 if (autoneg)
0838 autoc |= IXGBE_AUTOC_LMS_1G_AN;
0839 else
0840 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
0841 }
0842 }
0843
0844 if (autoc != current_autoc) {
0845
0846 status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
0847 if (status)
0848 return status;
0849
0850
0851 if (autoneg_wait_to_complete) {
0852 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
0853 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
0854 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
0855 links_reg = 0;
0856 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
0857 links_reg =
0858 IXGBE_READ_REG(hw, IXGBE_LINKS);
0859 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
0860 break;
0861 msleep(100);
0862 }
0863 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
0864 status =
0865 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
0866 hw_dbg(hw, "Autoneg did not complete.\n");
0867 }
0868 }
0869 }
0870
0871
0872 msleep(50);
0873 }
0874
0875 return status;
0876 }
0877
0878
0879
0880
0881
0882
0883
0884
0885
0886 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
0887 ixgbe_link_speed speed,
0888 bool autoneg_wait_to_complete)
0889 {
0890 s32 status;
0891
0892
0893 status = hw->phy.ops.setup_link_speed(hw, speed,
0894 autoneg_wait_to_complete);
0895
0896 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
0897
0898 return status;
0899 }
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909 static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
0910 {
0911 ixgbe_link_speed link_speed;
0912 s32 status;
0913 u32 ctrl, i, autoc, autoc2;
0914 u32 curr_lms;
0915 bool link_up = false;
0916
0917
0918 status = hw->mac.ops.stop_adapter(hw);
0919 if (status)
0920 return status;
0921
0922
0923 ixgbe_clear_tx_pending(hw);
0924
0925
0926
0927
0928 status = hw->phy.ops.init(hw);
0929
0930 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
0931 return status;
0932
0933
0934 if (hw->phy.sfp_setup_needed) {
0935 status = hw->mac.ops.setup_sfp(hw);
0936 hw->phy.sfp_setup_needed = false;
0937 }
0938
0939 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
0940 return status;
0941
0942
0943 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
0944 hw->phy.ops.reset(hw);
0945
0946
0947 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
0948
0949 mac_reset_top:
0950
0951
0952
0953
0954
0955
0956 ctrl = IXGBE_CTRL_LNK_RST;
0957 if (!hw->force_full_reset) {
0958 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
0959 if (link_up)
0960 ctrl = IXGBE_CTRL_RST;
0961 }
0962
0963 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
0964 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
0965 IXGBE_WRITE_FLUSH(hw);
0966 usleep_range(1000, 1200);
0967
0968
0969 for (i = 0; i < 10; i++) {
0970 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
0971 if (!(ctrl & IXGBE_CTRL_RST_MASK))
0972 break;
0973 udelay(1);
0974 }
0975
0976 if (ctrl & IXGBE_CTRL_RST_MASK) {
0977 status = IXGBE_ERR_RESET_FAILED;
0978 hw_dbg(hw, "Reset polling failed to complete.\n");
0979 }
0980
0981 msleep(50);
0982
0983
0984
0985
0986
0987
0988 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
0989 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
0990 goto mac_reset_top;
0991 }
0992
0993
0994
0995
0996
0997
0998 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
0999 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1000
1001
1002 if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1003 autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1004 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1005 IXGBE_WRITE_FLUSH(hw);
1006 }
1007
1008 if (hw->mac.orig_link_settings_stored == false) {
1009 hw->mac.orig_autoc = autoc;
1010 hw->mac.orig_autoc2 = autoc2;
1011 hw->mac.orig_link_settings_stored = true;
1012 } else {
1013
1014
1015
1016
1017
1018
1019
1020 if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
1021 hw->wol_enabled)
1022 hw->mac.orig_autoc =
1023 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1024 curr_lms;
1025
1026 if (autoc != hw->mac.orig_autoc) {
1027 status = hw->mac.ops.prot_autoc_write(hw,
1028 hw->mac.orig_autoc,
1029 false);
1030 if (status)
1031 return status;
1032 }
1033
1034 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1035 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1036 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1037 autoc2 |= (hw->mac.orig_autoc2 &
1038 IXGBE_AUTOC2_UPPER_MASK);
1039 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1040 }
1041 }
1042
1043
1044 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1045
1046
1047
1048
1049
1050
1051 hw->mac.num_rar_entries = IXGBE_82599_RAR_ENTRIES;
1052 hw->mac.ops.init_rx_addrs(hw);
1053
1054
1055 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1056
1057
1058 if (is_valid_ether_addr(hw->mac.san_addr)) {
1059
1060 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1061
1062 hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
1063 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1064
1065
1066 hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
1067 IXGBE_CLEAR_VMDQ_ALL);
1068
1069
1070 hw->mac.num_rar_entries--;
1071 }
1072
1073
1074 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1075 &hw->mac.wwpn_prefix);
1076
1077 return status;
1078 }
1079
1080
1081
1082
1083
1084
1085 static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
1086 {
1087 int i;
1088
1089 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1090 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1091 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1092 return 0;
1093 udelay(10);
1094 }
1095
1096 return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
1097 }
1098
1099
1100
1101
1102
1103 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1104 {
1105 int i;
1106 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1107 u32 fdircmd;
1108 s32 err;
1109
1110 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1111
1112
1113
1114
1115
1116 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1117 if (err) {
1118 hw_dbg(hw, "Flow Director previous command did not complete, aborting table re-initialization.\n");
1119 return err;
1120 }
1121
1122 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1123 IXGBE_WRITE_FLUSH(hw);
1124
1125
1126
1127
1128
1129
1130
1131 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1132 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1133 IXGBE_FDIRCMD_CLEARHT));
1134 IXGBE_WRITE_FLUSH(hw);
1135 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1136 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1137 ~IXGBE_FDIRCMD_CLEARHT));
1138 IXGBE_WRITE_FLUSH(hw);
1139
1140
1141
1142
1143 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1144 IXGBE_WRITE_FLUSH(hw);
1145
1146 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1147 IXGBE_WRITE_FLUSH(hw);
1148
1149
1150 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1151 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1152 IXGBE_FDIRCTRL_INIT_DONE)
1153 break;
1154 usleep_range(1000, 2000);
1155 }
1156 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1157 hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
1158 return IXGBE_ERR_FDIR_REINIT_FAILED;
1159 }
1160
1161
1162 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1163 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1164 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1165 IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1166 IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1167
1168 return 0;
1169 }
1170
1171
1172
1173
1174
1175
1176 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1177 {
1178 int i;
1179
1180
1181 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1182 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1198 IXGBE_WRITE_FLUSH(hw);
1199 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1200 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1201 IXGBE_FDIRCTRL_INIT_DONE)
1202 break;
1203 usleep_range(1000, 2000);
1204 }
1205
1206 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1207 hw_dbg(hw, "Flow Director poll time exceeded!\n");
1208 }
1209
1210
1211
1212
1213
1214
1215
1216 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1217 {
1218
1219
1220
1221
1222
1223
1224 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1225 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1226 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1227
1228
1229 ixgbe_fdir_enable_82599(hw, fdirctrl);
1230
1231 return 0;
1232 }
1233
1234
1235
1236
1237
1238
1239
1240 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1241 {
1242
1243
1244
1245
1246
1247
1248
1249
1250 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1251 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1252 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1253 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1254 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1255
1256
1257 ixgbe_fdir_enable_82599(hw, fdirctrl);
1258
1259 return 0;
1260 }
1261
1262
1263
1264
1265
1266
1267 #define IXGBE_ATR_COMMON_HASH_KEY \
1268 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1269 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1270 do { \
1271 u32 n = (_n); \
1272 if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \
1273 common_hash ^= lo_hash_dword >> n; \
1274 else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
1275 bucket_hash ^= lo_hash_dword >> n; \
1276 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \
1277 sig_hash ^= lo_hash_dword << (16 - n); \
1278 if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \
1279 common_hash ^= hi_hash_dword >> n; \
1280 else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
1281 bucket_hash ^= hi_hash_dword >> n; \
1282 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \
1283 sig_hash ^= hi_hash_dword << (16 - n); \
1284 } while (0)
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297 static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1298 union ixgbe_atr_hash_dword common)
1299 {
1300 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1301 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1302
1303
1304 flow_vm_vlan = ntohl(input.dword);
1305
1306
1307 hi_hash_dword = ntohl(common.dword);
1308
1309
1310 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1311
1312
1313 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1314
1315
1316 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1317
1318
1319
1320
1321
1322
1323 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1324
1325
1326 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1327 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1328 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1329 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1330 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1331 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1332 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1333 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1334 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1335 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1336 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1337 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1338 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1339 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1340 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1341
1342
1343 bucket_hash ^= common_hash;
1344 bucket_hash &= IXGBE_ATR_HASH_MASK;
1345
1346 sig_hash ^= common_hash << 16;
1347 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1348
1349
1350 return sig_hash ^ bucket_hash;
1351 }
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1364 union ixgbe_atr_hash_dword input,
1365 union ixgbe_atr_hash_dword common,
1366 u8 queue)
1367 {
1368 u64 fdirhashcmd;
1369 u8 flow_type;
1370 bool tunnel;
1371 u32 fdircmd;
1372
1373
1374
1375
1376
1377 tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
1378 flow_type = input.formatted.flow_type &
1379 (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
1380 switch (flow_type) {
1381 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1382 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1383 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1384 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1385 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1386 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1387 break;
1388 default:
1389 hw_dbg(hw, " Error on flow type input\n");
1390 return IXGBE_ERR_CONFIG;
1391 }
1392
1393
1394 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1395 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1396 fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1397 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1398 if (tunnel)
1399 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
1400
1401
1402
1403
1404
1405 fdirhashcmd = (u64)fdircmd << 32;
1406 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1407 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1408
1409 hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1410
1411 return 0;
1412 }
1413
1414 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1415 do { \
1416 u32 n = (_n); \
1417 if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
1418 bucket_hash ^= lo_hash_dword >> n; \
1419 if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
1420 bucket_hash ^= hi_hash_dword >> n; \
1421 } while (0)
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1435 union ixgbe_atr_input *input_mask)
1436 {
1437
1438 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1439 u32 bucket_hash = 0;
1440 __be32 hi_dword = 0;
1441 int i;
1442
1443
1444 for (i = 0; i <= 10; i++)
1445 input->dword_stream[i] &= input_mask->dword_stream[i];
1446
1447
1448 flow_vm_vlan = ntohl(input->dword_stream[0]);
1449
1450
1451 for (i = 1; i <= 10; i++)
1452 hi_dword ^= input->dword_stream[i];
1453 hi_hash_dword = ntohl(hi_dword);
1454
1455
1456 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1457
1458
1459 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1460
1461
1462 IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1463
1464
1465
1466
1467
1468
1469 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1470
1471
1472 for (i = 1; i <= 15; i++)
1473 IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
1474
1475
1476
1477
1478
1479 input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF);
1480 }
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1492 {
1493 u32 mask = ntohs(input_mask->formatted.dst_port);
1494
1495 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1496 mask |= ntohs(input_mask->formatted.src_port);
1497 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1498 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1499 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1500 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1501 }
1502
1503
1504
1505
1506
1507
1508
1509
1510 #define IXGBE_STORE_AS_BE32(_value) \
1511 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1512 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1513
1514 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1515 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
1516
1517 #define IXGBE_STORE_AS_BE16(_value) __swab16(ntohs((_value)))
1518
1519 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1520 union ixgbe_atr_input *input_mask)
1521 {
1522
1523 u32 fdirm = IXGBE_FDIRM_DIPv6;
1524 u32 fdirtcpm;
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537 if (input_mask->formatted.bkt_hash)
1538 hw_dbg(hw, " bucket hash should always be 0 in mask\n");
1539
1540
1541 switch (input_mask->formatted.vm_pool & 0x7F) {
1542 case 0x0:
1543 fdirm |= IXGBE_FDIRM_POOL;
1544 break;
1545 case 0x7F:
1546 break;
1547 default:
1548 hw_dbg(hw, " Error on vm pool mask\n");
1549 return IXGBE_ERR_CONFIG;
1550 }
1551
1552 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1553 case 0x0:
1554 fdirm |= IXGBE_FDIRM_L4P;
1555 if (input_mask->formatted.dst_port ||
1556 input_mask->formatted.src_port) {
1557 hw_dbg(hw, " Error on src/dst port mask\n");
1558 return IXGBE_ERR_CONFIG;
1559 }
1560 break;
1561 case IXGBE_ATR_L4TYPE_MASK:
1562 break;
1563 default:
1564 hw_dbg(hw, " Error on flow type mask\n");
1565 return IXGBE_ERR_CONFIG;
1566 }
1567
1568 switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
1569 case 0x0000:
1570
1571 fdirm |= IXGBE_FDIRM_VLANID;
1572 fallthrough;
1573 case 0x0FFF:
1574
1575 fdirm |= IXGBE_FDIRM_VLANP;
1576 break;
1577 case 0xE000:
1578
1579 fdirm |= IXGBE_FDIRM_VLANID;
1580 fallthrough;
1581 case 0xEFFF:
1582
1583 break;
1584 default:
1585 hw_dbg(hw, " Error on VLAN mask\n");
1586 return IXGBE_ERR_CONFIG;
1587 }
1588
1589 switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
1590 case 0x0000:
1591
1592 fdirm |= IXGBE_FDIRM_FLEX;
1593 fallthrough;
1594 case 0xFFFF:
1595 break;
1596 default:
1597 hw_dbg(hw, " Error on flexible byte mask\n");
1598 return IXGBE_ERR_CONFIG;
1599 }
1600
1601
1602 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1603
1604
1605 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1606
1607
1608 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1609 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1610
1611
1612 switch (hw->mac.type) {
1613 case ixgbe_mac_X550:
1614 case ixgbe_mac_X550EM_x:
1615 case ixgbe_mac_x550em_a:
1616 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
1617 break;
1618 default:
1619 break;
1620 }
1621
1622
1623 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1624 ~input_mask->formatted.src_ip[0]);
1625 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1626 ~input_mask->formatted.dst_ip[0]);
1627
1628 return 0;
1629 }
1630
1631 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1632 union ixgbe_atr_input *input,
1633 u16 soft_id, u8 queue)
1634 {
1635 u32 fdirport, fdirvlan, fdirhash, fdircmd;
1636 s32 err;
1637
1638
1639 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1640 input->formatted.src_ip[0]);
1641 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1642 input->formatted.src_ip[1]);
1643 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1644 input->formatted.src_ip[2]);
1645
1646
1647 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1648
1649
1650 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1651
1652
1653 fdirport = be16_to_cpu(input->formatted.dst_port);
1654 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1655 fdirport |= be16_to_cpu(input->formatted.src_port);
1656 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1657
1658
1659 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1660 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1661 fdirvlan |= ntohs(input->formatted.vlan_id);
1662 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1663
1664
1665 fdirhash = (__force u32)input->formatted.bkt_hash;
1666 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1667 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1668
1669
1670
1671
1672
1673 IXGBE_WRITE_FLUSH(hw);
1674
1675
1676 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1677 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1678 if (queue == IXGBE_FDIR_DROP_QUEUE)
1679 fdircmd |= IXGBE_FDIRCMD_DROP;
1680 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1681 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1682 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1683
1684 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1685 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1686 if (err) {
1687 hw_dbg(hw, "Flow Director command did not complete!\n");
1688 return err;
1689 }
1690
1691 return 0;
1692 }
1693
1694 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1695 union ixgbe_atr_input *input,
1696 u16 soft_id)
1697 {
1698 u32 fdirhash;
1699 u32 fdircmd;
1700 s32 err;
1701
1702
1703 fdirhash = (__force u32)input->formatted.bkt_hash;
1704 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1705 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1706
1707
1708 IXGBE_WRITE_FLUSH(hw);
1709
1710
1711 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1712
1713 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
1714 if (err) {
1715 hw_dbg(hw, "Flow Director command did not complete!\n");
1716 return err;
1717 }
1718
1719
1720 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1721 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1722 IXGBE_WRITE_FLUSH(hw);
1723 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1724 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1725 }
1726
1727 return 0;
1728 }
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738 static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
1739 {
1740 u32 core_ctl;
1741
1742 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
1743 (reg << 8));
1744 IXGBE_WRITE_FLUSH(hw);
1745 udelay(10);
1746 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
1747 *val = (u8)core_ctl;
1748
1749 return 0;
1750 }
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760 static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
1761 {
1762 u32 core_ctl;
1763
1764 core_ctl = (reg << 8) | val;
1765 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
1766 IXGBE_WRITE_FLUSH(hw);
1767 udelay(10);
1768
1769 return 0;
1770 }
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780 static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1781 {
1782 s32 ret_val = 0;
1783
1784 ret_val = ixgbe_start_hw_generic(hw);
1785 if (ret_val)
1786 return ret_val;
1787
1788 ret_val = ixgbe_start_hw_gen2(hw);
1789 if (ret_val)
1790 return ret_val;
1791
1792
1793 hw->mac.autotry_restart = true;
1794
1795 return ixgbe_verify_fw_version_82599(hw);
1796 }
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806 static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1807 {
1808 s32 status;
1809
1810
1811 status = ixgbe_identify_phy_generic(hw);
1812 if (status) {
1813
1814 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1815 return status;
1816 status = ixgbe_identify_module_generic(hw);
1817 }
1818
1819
1820 if (hw->phy.type == ixgbe_phy_unknown) {
1821 hw->phy.type = ixgbe_phy_none;
1822 status = 0;
1823 }
1824
1825
1826 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
1827 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1828
1829 return status;
1830 }
1831
1832
1833
1834
1835
1836
1837
1838
1839 static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
1840 {
1841
1842
1843
1844
1845
1846
1847 hw->mac.ops.disable_rx_buff(hw);
1848
1849 if (regval & IXGBE_RXCTRL_RXEN)
1850 hw->mac.ops.enable_rx(hw);
1851 else
1852 hw->mac.ops.disable_rx(hw);
1853
1854 hw->mac.ops.enable_rx_buff(hw);
1855
1856 return 0;
1857 }
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
1870 {
1871 s32 status = IXGBE_ERR_EEPROM_VERSION;
1872 u16 fw_offset, fw_ptp_cfg_offset;
1873 u16 offset;
1874 u16 fw_version = 0;
1875
1876
1877 if (hw->phy.media_type != ixgbe_media_type_fiber)
1878 return 0;
1879
1880
1881 offset = IXGBE_FW_PTR;
1882 if (hw->eeprom.ops.read(hw, offset, &fw_offset))
1883 goto fw_version_err;
1884
1885 if (fw_offset == 0 || fw_offset == 0xFFFF)
1886 return IXGBE_ERR_EEPROM_VERSION;
1887
1888
1889 offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
1890 if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset))
1891 goto fw_version_err;
1892
1893 if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF)
1894 return IXGBE_ERR_EEPROM_VERSION;
1895
1896
1897 offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
1898 if (hw->eeprom.ops.read(hw, offset, &fw_version))
1899 goto fw_version_err;
1900
1901 if (fw_version > 0x5)
1902 status = 0;
1903
1904 return status;
1905
1906 fw_version_err:
1907 hw_err(hw, "eeprom read at offset %d failed\n", offset);
1908 return IXGBE_ERR_EEPROM_VERSION;
1909 }
1910
1911
1912
1913
1914
1915
1916
1917
1918 static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
1919 {
1920 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
1921 s32 status;
1922
1923
1924 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
1925
1926 if (status || fw_offset == 0 || fw_offset == 0xFFFF)
1927 return false;
1928
1929
1930 status = hw->eeprom.ops.read(hw, (fw_offset +
1931 IXGBE_FW_LESM_PARAMETERS_PTR),
1932 &fw_lesm_param_offset);
1933
1934 if (status ||
1935 fw_lesm_param_offset == 0 || fw_lesm_param_offset == 0xFFFF)
1936 return false;
1937
1938
1939 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
1940 IXGBE_FW_LESM_STATE_1),
1941 &fw_lesm_state);
1942
1943 if (!status && (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
1944 return true;
1945
1946 return false;
1947 }
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
1961 u16 words, u16 *data)
1962 {
1963 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1964
1965
1966
1967
1968 if (eeprom->type == ixgbe_eeprom_spi &&
1969 offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)
1970 return ixgbe_read_eerd_buffer_generic(hw, offset, words, data);
1971
1972 return ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, words,
1973 data);
1974 }
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
1987 u16 offset, u16 *data)
1988 {
1989 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1990
1991
1992
1993
1994
1995 if (eeprom->type == ixgbe_eeprom_spi && offset <= IXGBE_EERD_MAX_ADDR)
1996 return ixgbe_read_eerd_generic(hw, offset, data);
1997
1998 return ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
1999 }
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010 static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2011 {
2012 s32 ret_val;
2013 u32 anlp1_reg = 0;
2014 u32 i, autoc_reg, autoc2_reg;
2015
2016
2017 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2018 if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2019 autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2020 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2021 IXGBE_WRITE_FLUSH(hw);
2022 }
2023
2024 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2025 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2026
2027
2028 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
2029 autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
2030
2031
2032 for (i = 0; i < 10; i++) {
2033 usleep_range(4000, 8000);
2034 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2035 if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2036 break;
2037 }
2038
2039 if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2040 hw_dbg(hw, "auto negotiation not completed\n");
2041 ret_val = IXGBE_ERR_RESET_FAILED;
2042 goto reset_pipeline_out;
2043 }
2044
2045 ret_val = 0;
2046
2047 reset_pipeline_out:
2048
2049 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2050 IXGBE_WRITE_FLUSH(hw);
2051
2052 return ret_val;
2053 }
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065 static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2066 u8 dev_addr, u8 *data)
2067 {
2068 u32 esdp;
2069 s32 status;
2070 s32 timeout = 200;
2071
2072 if (hw->phy.qsfp_shared_i2c_bus == true) {
2073
2074 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2075 esdp |= IXGBE_ESDP_SDP0;
2076 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2077 IXGBE_WRITE_FLUSH(hw);
2078
2079 while (timeout) {
2080 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2081 if (esdp & IXGBE_ESDP_SDP1)
2082 break;
2083
2084 usleep_range(5000, 10000);
2085 timeout--;
2086 }
2087
2088 if (!timeout) {
2089 hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
2090 status = IXGBE_ERR_I2C;
2091 goto release_i2c_access;
2092 }
2093 }
2094
2095 status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2096
2097 release_i2c_access:
2098 if (hw->phy.qsfp_shared_i2c_bus == true) {
2099
2100 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2101 esdp &= ~IXGBE_ESDP_SDP0;
2102 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2103 IXGBE_WRITE_FLUSH(hw);
2104 }
2105
2106 return status;
2107 }
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119 static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
2120 u8 dev_addr, u8 data)
2121 {
2122 u32 esdp;
2123 s32 status;
2124 s32 timeout = 200;
2125
2126 if (hw->phy.qsfp_shared_i2c_bus == true) {
2127
2128 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2129 esdp |= IXGBE_ESDP_SDP0;
2130 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2131 IXGBE_WRITE_FLUSH(hw);
2132
2133 while (timeout) {
2134 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2135 if (esdp & IXGBE_ESDP_SDP1)
2136 break;
2137
2138 usleep_range(5000, 10000);
2139 timeout--;
2140 }
2141
2142 if (!timeout) {
2143 hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
2144 status = IXGBE_ERR_I2C;
2145 goto release_i2c_access;
2146 }
2147 }
2148
2149 status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
2150
2151 release_i2c_access:
2152 if (hw->phy.qsfp_shared_i2c_bus == true) {
2153
2154 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2155 esdp &= ~IXGBE_ESDP_SDP0;
2156 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
2157 IXGBE_WRITE_FLUSH(hw);
2158 }
2159
2160 return status;
2161 }
2162
2163 static const struct ixgbe_mac_operations mac_ops_82599 = {
2164 .init_hw = &ixgbe_init_hw_generic,
2165 .reset_hw = &ixgbe_reset_hw_82599,
2166 .start_hw = &ixgbe_start_hw_82599,
2167 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
2168 .get_media_type = &ixgbe_get_media_type_82599,
2169 .enable_rx_dma = &ixgbe_enable_rx_dma_82599,
2170 .disable_rx_buff = &ixgbe_disable_rx_buff_generic,
2171 .enable_rx_buff = &ixgbe_enable_rx_buff_generic,
2172 .get_mac_addr = &ixgbe_get_mac_addr_generic,
2173 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
2174 .get_device_caps = &ixgbe_get_device_caps_generic,
2175 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
2176 .stop_adapter = &ixgbe_stop_adapter_generic,
2177 .get_bus_info = &ixgbe_get_bus_info_generic,
2178 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
2179 .read_analog_reg8 = &ixgbe_read_analog_reg8_82599,
2180 .write_analog_reg8 = &ixgbe_write_analog_reg8_82599,
2181 .stop_link_on_d3 = &ixgbe_stop_mac_link_on_d3_82599,
2182 .setup_link = &ixgbe_setup_mac_link_82599,
2183 .set_rxpba = &ixgbe_set_rxpba_generic,
2184 .check_link = &ixgbe_check_mac_link_generic,
2185 .get_link_capabilities = &ixgbe_get_link_capabilities_82599,
2186 .led_on = &ixgbe_led_on_generic,
2187 .led_off = &ixgbe_led_off_generic,
2188 .init_led_link_act = ixgbe_init_led_link_act_generic,
2189 .blink_led_start = &ixgbe_blink_led_start_generic,
2190 .blink_led_stop = &ixgbe_blink_led_stop_generic,
2191 .set_rar = &ixgbe_set_rar_generic,
2192 .clear_rar = &ixgbe_clear_rar_generic,
2193 .set_vmdq = &ixgbe_set_vmdq_generic,
2194 .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic,
2195 .clear_vmdq = &ixgbe_clear_vmdq_generic,
2196 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
2197 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
2198 .enable_mc = &ixgbe_enable_mc_generic,
2199 .disable_mc = &ixgbe_disable_mc_generic,
2200 .clear_vfta = &ixgbe_clear_vfta_generic,
2201 .set_vfta = &ixgbe_set_vfta_generic,
2202 .fc_enable = &ixgbe_fc_enable_generic,
2203 .setup_fc = ixgbe_setup_fc_generic,
2204 .fc_autoneg = ixgbe_fc_autoneg,
2205 .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
2206 .init_uta_tables = &ixgbe_init_uta_tables_generic,
2207 .setup_sfp = &ixgbe_setup_sfp_modules_82599,
2208 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
2209 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
2210 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
2211 .release_swfw_sync = &ixgbe_release_swfw_sync,
2212 .init_swfw_sync = NULL,
2213 .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
2214 .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
2215 .prot_autoc_read = &prot_autoc_read_82599,
2216 .prot_autoc_write = &prot_autoc_write_82599,
2217 .enable_rx = &ixgbe_enable_rx_generic,
2218 .disable_rx = &ixgbe_disable_rx_generic,
2219 };
2220
2221 static const struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2222 .init_params = &ixgbe_init_eeprom_params_generic,
2223 .read = &ixgbe_read_eeprom_82599,
2224 .read_buffer = &ixgbe_read_eeprom_buffer_82599,
2225 .write = &ixgbe_write_eeprom_generic,
2226 .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic,
2227 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
2228 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
2229 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
2230 };
2231
2232 static const struct ixgbe_phy_operations phy_ops_82599 = {
2233 .identify = &ixgbe_identify_phy_82599,
2234 .identify_sfp = &ixgbe_identify_module_generic,
2235 .init = &ixgbe_init_phy_ops_82599,
2236 .reset = &ixgbe_reset_phy_generic,
2237 .read_reg = &ixgbe_read_phy_reg_generic,
2238 .write_reg = &ixgbe_write_phy_reg_generic,
2239 .setup_link = &ixgbe_setup_phy_link_generic,
2240 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
2241 .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
2242 .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
2243 .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic,
2244 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
2245 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
2246 .check_overtemp = &ixgbe_tn_check_overtemp,
2247 };
2248
2249 const struct ixgbe_info ixgbe_82599_info = {
2250 .mac = ixgbe_mac_82599EB,
2251 .get_invariants = &ixgbe_get_invariants_82599,
2252 .mac_ops = &mac_ops_82599,
2253 .eeprom_ops = &eeprom_ops_82599,
2254 .phy_ops = &phy_ops_82599,
2255 .mbx_ops = &mbx_ops_generic,
2256 .mvals = ixgbe_mvals_8259X,
2257 };