0001
0002
0003
0004 #include "ixgbe_x540.h"
0005 #include "ixgbe_type.h"
0006 #include "ixgbe_common.h"
0007 #include "ixgbe_phy.h"
0008
0009 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
0010 static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *);
0011 static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *);
0012 static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *);
0013 static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *);
0014
0015 static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
0016 {
0017 struct ixgbe_mac_info *mac = &hw->mac;
0018 struct ixgbe_phy_info *phy = &hw->phy;
0019 struct ixgbe_link_info *link = &hw->link;
0020
0021
0022 ixgbe_get_invariants_X540(hw);
0023
0024 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
0025 phy->ops.set_phy_power = NULL;
0026
0027 link->addr = IXGBE_CS4227;
0028
0029 return 0;
0030 }
0031
0032 static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
0033 {
0034 struct ixgbe_phy_info *phy = &hw->phy;
0035
0036
0037 ixgbe_get_invariants_X540(hw);
0038
0039 phy->ops.set_phy_power = NULL;
0040
0041 return 0;
0042 }
0043
0044 static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
0045 {
0046 struct ixgbe_mac_info *mac = &hw->mac;
0047 struct ixgbe_phy_info *phy = &hw->phy;
0048
0049
0050 ixgbe_get_invariants_X540(hw);
0051
0052 if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
0053 phy->ops.set_phy_power = NULL;
0054
0055 return 0;
0056 }
0057
0058 static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw)
0059 {
0060 struct ixgbe_phy_info *phy = &hw->phy;
0061
0062
0063 ixgbe_get_invariants_X540(hw);
0064
0065 phy->ops.set_phy_power = NULL;
0066
0067 return 0;
0068 }
0069
0070
0071
0072
0073 static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
0074 {
0075 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
0076
0077 if (hw->bus.lan_id) {
0078 esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
0079 esdp |= IXGBE_ESDP_SDP1_DIR;
0080 }
0081 esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
0082 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
0083 IXGBE_WRITE_FLUSH(hw);
0084 }
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094 static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
0095 {
0096 return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
0097 }
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107 static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
0108 {
0109 return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
0110 }
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120 static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
0121 {
0122 s32 status;
0123
0124 status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value);
0125 if (status)
0126 hw_err(hw, "port expander access failed with %d\n", status);
0127 return status;
0128 }
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138 static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
0139 {
0140 s32 status;
0141
0142 status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE,
0143 value);
0144 if (status)
0145 hw_err(hw, "port expander access failed with %d\n", status);
0146 return status;
0147 }
0148
0149
0150
0151
0152
0153
0154
0155
0156 static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
0157 {
0158 s32 status;
0159 u32 retry;
0160 u16 value;
0161 u8 reg;
0162
0163
0164 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
0165 if (status)
0166 return status;
0167 reg |= IXGBE_PE_BIT1;
0168 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
0169 if (status)
0170 return status;
0171
0172 status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
0173 if (status)
0174 return status;
0175 reg &= ~IXGBE_PE_BIT1;
0176 status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
0177 if (status)
0178 return status;
0179
0180 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
0181 if (status)
0182 return status;
0183 reg &= ~IXGBE_PE_BIT1;
0184 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
0185 if (status)
0186 return status;
0187
0188 usleep_range(IXGBE_CS4227_RESET_HOLD, IXGBE_CS4227_RESET_HOLD + 100);
0189
0190 status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
0191 if (status)
0192 return status;
0193 reg |= IXGBE_PE_BIT1;
0194 status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
0195 if (status)
0196 return status;
0197
0198
0199 msleep(IXGBE_CS4227_RESET_DELAY);
0200 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
0201 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
0202 &value);
0203 if (!status && value == IXGBE_CS4227_EEPROM_LOAD_OK)
0204 break;
0205 msleep(IXGBE_CS4227_CHECK_DELAY);
0206 }
0207 if (retry == IXGBE_CS4227_RETRIES) {
0208 hw_err(hw, "CS4227 reset did not complete\n");
0209 return IXGBE_ERR_PHY;
0210 }
0211
0212 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
0213 if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
0214 hw_err(hw, "CS4227 EEPROM did not load successfully\n");
0215 return IXGBE_ERR_PHY;
0216 }
0217
0218 return 0;
0219 }
0220
0221
0222
0223
0224
0225 static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
0226 {
0227 u32 swfw_mask = hw->phy.phy_semaphore_mask;
0228 s32 status;
0229 u16 value;
0230 u8 retry;
0231
0232 for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
0233 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
0234 if (status) {
0235 hw_err(hw, "semaphore failed with %d\n", status);
0236 msleep(IXGBE_CS4227_CHECK_DELAY);
0237 continue;
0238 }
0239
0240
0241 status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
0242 if (!status && value == IXGBE_CS4227_RESET_COMPLETE)
0243 goto out;
0244
0245 if (status || value != IXGBE_CS4227_RESET_PENDING)
0246 break;
0247
0248
0249 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
0250 msleep(IXGBE_CS4227_CHECK_DELAY);
0251 }
0252
0253 if (retry == IXGBE_CS4227_RETRIES) {
0254 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
0255 if (status) {
0256 hw_err(hw, "semaphore failed with %d\n", status);
0257 return;
0258 }
0259 }
0260
0261
0262 status = ixgbe_reset_cs4227(hw);
0263 if (status) {
0264 hw_err(hw, "CS4227 reset failed: %d", status);
0265 goto out;
0266 }
0267
0268
0269
0270
0271 ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
0272 IXGBE_CS4227_RESET_PENDING);
0273 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
0274 usleep_range(10000, 12000);
0275 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
0276 if (status) {
0277 hw_err(hw, "semaphore failed with %d", status);
0278 return;
0279 }
0280
0281
0282 status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
0283 IXGBE_CS4227_RESET_COMPLETE);
0284
0285 out:
0286 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
0287 msleep(hw->eeprom.semaphore_delay);
0288 }
0289
0290
0291
0292
0293
0294
0295 static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
0296 {
0297 switch (hw->device_id) {
0298 case IXGBE_DEV_ID_X550EM_A_SFP:
0299 if (hw->bus.lan_id)
0300 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
0301 else
0302 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
0303 return ixgbe_identify_module_generic(hw);
0304 case IXGBE_DEV_ID_X550EM_X_SFP:
0305
0306 hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
0307 ixgbe_setup_mux_ctl(hw);
0308 ixgbe_check_cs4227(hw);
0309 fallthrough;
0310 case IXGBE_DEV_ID_X550EM_A_SFP_N:
0311 return ixgbe_identify_module_generic(hw);
0312 case IXGBE_DEV_ID_X550EM_X_KX4:
0313 hw->phy.type = ixgbe_phy_x550em_kx4;
0314 break;
0315 case IXGBE_DEV_ID_X550EM_X_XFI:
0316 hw->phy.type = ixgbe_phy_x550em_xfi;
0317 break;
0318 case IXGBE_DEV_ID_X550EM_X_KR:
0319 case IXGBE_DEV_ID_X550EM_A_KR:
0320 case IXGBE_DEV_ID_X550EM_A_KR_L:
0321 hw->phy.type = ixgbe_phy_x550em_kr;
0322 break;
0323 case IXGBE_DEV_ID_X550EM_A_10G_T:
0324 if (hw->bus.lan_id)
0325 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
0326 else
0327 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
0328 fallthrough;
0329 case IXGBE_DEV_ID_X550EM_X_10G_T:
0330 return ixgbe_identify_phy_generic(hw);
0331 case IXGBE_DEV_ID_X550EM_X_1G_T:
0332 hw->phy.type = ixgbe_phy_ext_1g_t;
0333 break;
0334 case IXGBE_DEV_ID_X550EM_A_1G_T:
0335 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
0336 hw->phy.type = ixgbe_phy_fw;
0337 hw->phy.ops.read_reg = NULL;
0338 hw->phy.ops.write_reg = NULL;
0339 if (hw->bus.lan_id)
0340 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
0341 else
0342 hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
0343 break;
0344 default:
0345 break;
0346 }
0347 return 0;
0348 }
0349
0350 static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
0351 u32 device_type, u16 *phy_data)
0352 {
0353 return IXGBE_NOT_IMPLEMENTED;
0354 }
0355
0356 static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
0357 u32 device_type, u16 phy_data)
0358 {
0359 return IXGBE_NOT_IMPLEMENTED;
0360 }
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371 static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
0372 u16 reg, u16 *val)
0373 {
0374 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
0375 }
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386 static s32
0387 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
0388 u16 reg, u16 *val)
0389 {
0390 return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false);
0391 }
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402 static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
0403 u8 addr, u16 reg, u16 val)
0404 {
0405 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
0406 }
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417 static s32
0418 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
0419 u8 addr, u16 reg, u16 val)
0420 {
0421 return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false);
0422 }
0423
0424
0425
0426
0427
0428
0429
0430 s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
0431 u32 (*data)[FW_PHY_ACT_DATA_COUNT])
0432 {
0433 union {
0434 struct ixgbe_hic_phy_activity_req cmd;
0435 struct ixgbe_hic_phy_activity_resp rsp;
0436 } hic;
0437 u16 retries = FW_PHY_ACT_RETRIES;
0438 s32 rc;
0439 u32 i;
0440
0441 do {
0442 memset(&hic, 0, sizeof(hic));
0443 hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
0444 hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
0445 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
0446 hic.cmd.port_number = hw->bus.lan_id;
0447 hic.cmd.activity_id = cpu_to_le16(activity);
0448 for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i)
0449 hic.cmd.data[i] = cpu_to_be32((*data)[i]);
0450
0451 rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd),
0452 IXGBE_HI_COMMAND_TIMEOUT,
0453 true);
0454 if (rc)
0455 return rc;
0456 if (hic.rsp.hdr.cmd_or_resp.ret_status ==
0457 FW_CEM_RESP_STATUS_SUCCESS) {
0458 for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
0459 (*data)[i] = be32_to_cpu(hic.rsp.data[i]);
0460 return 0;
0461 }
0462 usleep_range(20, 30);
0463 --retries;
0464 } while (retries > 0);
0465
0466 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
0467 }
0468
0469 static const struct {
0470 u16 fw_speed;
0471 ixgbe_link_speed phy_speed;
0472 } ixgbe_fw_map[] = {
0473 { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
0474 { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
0475 { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
0476 { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
0477 { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
0478 { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
0479 };
0480
0481
0482
0483
0484
0485
0486
0487 static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
0488 {
0489 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
0490 u16 phy_speeds;
0491 u16 phy_id_lo;
0492 s32 rc;
0493 u16 i;
0494
0495 if (hw->phy.id)
0496 return 0;
0497
0498 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
0499 if (rc)
0500 return rc;
0501
0502 hw->phy.speeds_supported = 0;
0503 phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
0504 for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) {
0505 if (phy_speeds & ixgbe_fw_map[i].fw_speed)
0506 hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
0507 }
0508
0509 hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
0510 phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
0511 hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
0512 hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
0513 if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
0514 return IXGBE_ERR_PHY_ADDR_INVALID;
0515
0516 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
0517 hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
0518 IXGBE_LINK_SPEED_1GB_FULL;
0519 hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
0520 return 0;
0521 }
0522
0523
0524
0525
0526
0527
0528
0529 static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
0530 {
0531 if (hw->bus.lan_id)
0532 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
0533 else
0534 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
0535
0536 hw->phy.type = ixgbe_phy_fw;
0537 hw->phy.ops.read_reg = NULL;
0538 hw->phy.ops.write_reg = NULL;
0539 return ixgbe_get_phy_id_fw(hw);
0540 }
0541
0542
0543
0544
0545
0546
0547
0548 static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
0549 {
0550 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
0551
0552 setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
0553 return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
0554 }
0555
0556
0557
0558
0559
0560 static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
0561 {
0562 u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
0563 s32 rc;
0564 u16 i;
0565
0566 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
0567 return 0;
0568
0569 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
0570 hw_err(hw, "rx_pause not valid in strict IEEE mode\n");
0571 return IXGBE_ERR_INVALID_LINK_SETTINGS;
0572 }
0573
0574 switch (hw->fc.requested_mode) {
0575 case ixgbe_fc_full:
0576 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
0577 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
0578 break;
0579 case ixgbe_fc_rx_pause:
0580 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
0581 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
0582 break;
0583 case ixgbe_fc_tx_pause:
0584 setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
0585 FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
0586 break;
0587 default:
0588 break;
0589 }
0590
0591 for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) {
0592 if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
0593 setup[0] |= ixgbe_fw_map[i].fw_speed;
0594 }
0595 setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
0596
0597 if (hw->phy.eee_speeds_advertised)
0598 setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
0599
0600 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
0601 if (rc)
0602 return rc;
0603 if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
0604 return IXGBE_ERR_OVERTEMP;
0605 return 0;
0606 }
0607
0608
0609
0610
0611
0612
0613
0614 static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
0615 {
0616 if (hw->fc.requested_mode == ixgbe_fc_default)
0617 hw->fc.requested_mode = ixgbe_fc_full;
0618
0619 return ixgbe_setup_fw_link(hw);
0620 }
0621
0622
0623
0624
0625
0626
0627
0628 static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
0629 {
0630 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
0631 u32 eec;
0632 u16 eeprom_size;
0633
0634 if (eeprom->type == ixgbe_eeprom_uninitialized) {
0635 eeprom->semaphore_delay = 10;
0636 eeprom->type = ixgbe_flash;
0637
0638 eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
0639 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
0640 IXGBE_EEC_SIZE_SHIFT);
0641 eeprom->word_size = BIT(eeprom_size +
0642 IXGBE_EEPROM_WORD_SIZE_SHIFT);
0643
0644 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
0645 eeprom->type, eeprom->word_size);
0646 }
0647
0648 return 0;
0649 }
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660 static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
0661 {
0662 u32 i, command;
0663
0664
0665
0666
0667
0668 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
0669 command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
0670 if (!(command & IXGBE_SB_IOSF_CTRL_BUSY))
0671 break;
0672 udelay(10);
0673 }
0674 if (ctrl)
0675 *ctrl = command;
0676 if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
0677 hw_dbg(hw, "IOSF wait timed out\n");
0678 return IXGBE_ERR_PHY;
0679 }
0680
0681 return 0;
0682 }
0683
0684
0685
0686
0687
0688
0689
0690
0691 static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
0692 u32 device_type, u32 *data)
0693 {
0694 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
0695 u32 command, error;
0696 s32 ret;
0697
0698 ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
0699 if (ret)
0700 return ret;
0701
0702 ret = ixgbe_iosf_wait(hw, NULL);
0703 if (ret)
0704 goto out;
0705
0706 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
0707 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
0708
0709
0710 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
0711
0712 ret = ixgbe_iosf_wait(hw, &command);
0713
0714 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
0715 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
0716 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
0717 hw_dbg(hw, "Failed to read, error %x\n", error);
0718 return IXGBE_ERR_PHY;
0719 }
0720
0721 if (!ret)
0722 *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
0723
0724 out:
0725 hw->mac.ops.release_swfw_sync(hw, gssr);
0726 return ret;
0727 }
0728
0729
0730
0731
0732
0733 static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
0734 {
0735 struct ixgbe_hic_phy_token_req token_cmd;
0736 s32 status;
0737
0738 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
0739 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
0740 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
0741 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
0742 token_cmd.port_number = hw->bus.lan_id;
0743 token_cmd.command_type = FW_PHY_TOKEN_REQ;
0744 token_cmd.pad = 0;
0745 status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
0746 IXGBE_HI_COMMAND_TIMEOUT,
0747 true);
0748 if (status)
0749 return status;
0750 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
0751 return 0;
0752 if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
0753 return IXGBE_ERR_FW_RESP_INVALID;
0754
0755 return IXGBE_ERR_TOKEN_RETRY;
0756 }
0757
0758
0759
0760
0761
0762 static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
0763 {
0764 struct ixgbe_hic_phy_token_req token_cmd;
0765 s32 status;
0766
0767 token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
0768 token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
0769 token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
0770 token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
0771 token_cmd.port_number = hw->bus.lan_id;
0772 token_cmd.command_type = FW_PHY_TOKEN_REL;
0773 token_cmd.pad = 0;
0774 status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
0775 IXGBE_HI_COMMAND_TIMEOUT,
0776 true);
0777 if (status)
0778 return status;
0779 if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
0780 return 0;
0781 return IXGBE_ERR_FW_RESP_INVALID;
0782 }
0783
0784
0785
0786
0787
0788
0789
0790
0791 static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
0792 __always_unused u32 device_type,
0793 u32 data)
0794 {
0795 struct ixgbe_hic_internal_phy_req write_cmd;
0796
0797 memset(&write_cmd, 0, sizeof(write_cmd));
0798 write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
0799 write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
0800 write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
0801 write_cmd.port_number = hw->bus.lan_id;
0802 write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
0803 write_cmd.address = cpu_to_be16(reg_addr);
0804 write_cmd.write_data = cpu_to_be32(data);
0805
0806 return ixgbe_host_interface_command(hw, &write_cmd, sizeof(write_cmd),
0807 IXGBE_HI_COMMAND_TIMEOUT, false);
0808 }
0809
0810
0811
0812
0813
0814
0815
0816
0817 static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
0818 __always_unused u32 device_type,
0819 u32 *data)
0820 {
0821 union {
0822 struct ixgbe_hic_internal_phy_req cmd;
0823 struct ixgbe_hic_internal_phy_resp rsp;
0824 } hic;
0825 s32 status;
0826
0827 memset(&hic, 0, sizeof(hic));
0828 hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
0829 hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
0830 hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
0831 hic.cmd.port_number = hw->bus.lan_id;
0832 hic.cmd.command_type = FW_INT_PHY_REQ_READ;
0833 hic.cmd.address = cpu_to_be16(reg_addr);
0834
0835 status = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd),
0836 IXGBE_HI_COMMAND_TIMEOUT, true);
0837
0838
0839 *data = be32_to_cpu(hic.rsp.read_data);
0840
0841 return status;
0842 }
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852 static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
0853 u16 offset, u16 words, u16 *data)
0854 {
0855 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
0856 struct ixgbe_hic_read_shadow_ram buffer;
0857 u32 current_word = 0;
0858 u16 words_to_read;
0859 s32 status;
0860 u32 i;
0861
0862
0863 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
0864 if (status) {
0865 hw_dbg(hw, "EEPROM read buffer - semaphore failed\n");
0866 return status;
0867 }
0868
0869 while (words) {
0870 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
0871 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
0872 else
0873 words_to_read = words;
0874
0875 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
0876 buffer.hdr.req.buf_lenh = 0;
0877 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
0878 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
0879
0880
0881 buffer.address = (__force u32)cpu_to_be32((offset +
0882 current_word) * 2);
0883 buffer.length = (__force u16)cpu_to_be16(words_to_read * 2);
0884 buffer.pad2 = 0;
0885 buffer.pad3 = 0;
0886
0887 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
0888 IXGBE_HI_COMMAND_TIMEOUT);
0889 if (status) {
0890 hw_dbg(hw, "Host interface command failed\n");
0891 goto out;
0892 }
0893
0894 for (i = 0; i < words_to_read; i++) {
0895 u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
0896 2 * i;
0897 u32 value = IXGBE_READ_REG(hw, reg);
0898
0899 data[current_word] = (u16)(value & 0xffff);
0900 current_word++;
0901 i++;
0902 if (i < words_to_read) {
0903 value >>= 16;
0904 data[current_word] = (u16)(value & 0xffff);
0905 current_word++;
0906 }
0907 }
0908 words -= words_to_read;
0909 }
0910
0911 out:
0912 hw->mac.ops.release_swfw_sync(hw, mask);
0913 return status;
0914 }
0915
0916
0917
0918
0919
0920
0921
0922
0923
0924 static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
0925 u16 size, u16 *csum, u16 *buffer,
0926 u32 buffer_size)
0927 {
0928 u16 buf[256];
0929 s32 status;
0930 u16 length, bufsz, i, start;
0931 u16 *local_buffer;
0932
0933 bufsz = ARRAY_SIZE(buf);
0934
0935
0936 if (!buffer) {
0937 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
0938 if (status) {
0939 hw_dbg(hw, "Failed to read EEPROM image\n");
0940 return status;
0941 }
0942 local_buffer = buf;
0943 } else {
0944 if (buffer_size < ptr)
0945 return IXGBE_ERR_PARAM;
0946 local_buffer = &buffer[ptr];
0947 }
0948
0949 if (size) {
0950 start = 0;
0951 length = size;
0952 } else {
0953 start = 1;
0954 length = local_buffer[0];
0955
0956
0957 if (length == 0xFFFF || length == 0 ||
0958 (ptr + length) >= hw->eeprom.word_size)
0959 return 0;
0960 }
0961
0962 if (buffer && ((u32)start + (u32)length > buffer_size))
0963 return IXGBE_ERR_PARAM;
0964
0965 for (i = start; length; i++, length--) {
0966 if (i == bufsz && !buffer) {
0967 ptr += bufsz;
0968 i = 0;
0969 if (length < bufsz)
0970 bufsz = length;
0971
0972
0973 status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
0974 bufsz, buf);
0975 if (status) {
0976 hw_dbg(hw, "Failed to read EEPROM image\n");
0977 return status;
0978 }
0979 }
0980 *csum += local_buffer[i];
0981 }
0982 return 0;
0983 }
0984
0985
0986
0987
0988
0989
0990
0991
0992 static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
0993 u32 buffer_size)
0994 {
0995 u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
0996 u16 *local_buffer;
0997 s32 status;
0998 u16 checksum = 0;
0999 u16 pointer, i, size;
1000
1001 hw->eeprom.ops.init_params(hw);
1002
1003 if (!buffer) {
1004
1005 status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
1006 IXGBE_EEPROM_LAST_WORD + 1,
1007 eeprom_ptrs);
1008 if (status) {
1009 hw_dbg(hw, "Failed to read EEPROM image\n");
1010 return status;
1011 }
1012 local_buffer = eeprom_ptrs;
1013 } else {
1014 if (buffer_size < IXGBE_EEPROM_LAST_WORD)
1015 return IXGBE_ERR_PARAM;
1016 local_buffer = buffer;
1017 }
1018
1019
1020
1021
1022 for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
1023 if (i != IXGBE_EEPROM_CHECKSUM)
1024 checksum += local_buffer[i];
1025
1026
1027
1028
1029 for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
1030 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
1031 continue;
1032
1033 pointer = local_buffer[i];
1034
1035
1036 if (pointer == 0xFFFF || pointer == 0 ||
1037 pointer >= hw->eeprom.word_size)
1038 continue;
1039
1040 switch (i) {
1041 case IXGBE_PCIE_GENERAL_PTR:
1042 size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
1043 break;
1044 case IXGBE_PCIE_CONFIG0_PTR:
1045 case IXGBE_PCIE_CONFIG1_PTR:
1046 size = IXGBE_PCIE_CONFIG_SIZE;
1047 break;
1048 default:
1049 size = 0;
1050 break;
1051 }
1052
1053 status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
1054 buffer, buffer_size);
1055 if (status)
1056 return status;
1057 }
1058
1059 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1060
1061 return (s32)checksum;
1062 }
1063
1064
1065
1066
1067
1068
1069 static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
1070 {
1071 return ixgbe_calc_checksum_X550(hw, NULL, 0);
1072 }
1073
1074
1075
1076
1077
1078
1079
1080
1081 static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
1082 {
1083 const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
1084 struct ixgbe_hic_read_shadow_ram buffer;
1085 s32 status;
1086
1087 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
1088 buffer.hdr.req.buf_lenh = 0;
1089 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
1090 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
1091
1092
1093 buffer.address = (__force u32)cpu_to_be32(offset * 2);
1094
1095 buffer.length = (__force u16)cpu_to_be16(sizeof(u16));
1096
1097 status = hw->mac.ops.acquire_swfw_sync(hw, mask);
1098 if (status)
1099 return status;
1100
1101 status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
1102 IXGBE_HI_COMMAND_TIMEOUT);
1103 if (!status) {
1104 *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
1105 FW_NVM_DATA_OFFSET);
1106 }
1107
1108 hw->mac.ops.release_swfw_sync(hw, mask);
1109 return status;
1110 }
1111
1112
1113
1114
1115
1116
1117
1118
1119 static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
1120 u16 *checksum_val)
1121 {
1122 s32 status;
1123 u16 checksum;
1124 u16 read_checksum = 0;
1125
1126
1127
1128
1129
1130 status = hw->eeprom.ops.read(hw, 0, &checksum);
1131 if (status) {
1132 hw_dbg(hw, "EEPROM read failed\n");
1133 return status;
1134 }
1135
1136 status = hw->eeprom.ops.calc_checksum(hw);
1137 if (status < 0)
1138 return status;
1139
1140 checksum = (u16)(status & 0xffff);
1141
1142 status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
1143 &read_checksum);
1144 if (status)
1145 return status;
1146
1147
1148
1149
1150 if (read_checksum != checksum) {
1151 status = IXGBE_ERR_EEPROM_CHECKSUM;
1152 hw_dbg(hw, "Invalid EEPROM checksum");
1153 }
1154
1155
1156 if (checksum_val)
1157 *checksum_val = checksum;
1158
1159 return status;
1160 }
1161
1162
1163
1164
1165
1166
1167
1168
1169 static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
1170 u16 data)
1171 {
1172 s32 status;
1173 struct ixgbe_hic_write_shadow_ram buffer;
1174
1175 buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
1176 buffer.hdr.req.buf_lenh = 0;
1177 buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
1178 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
1179
1180
1181 buffer.length = cpu_to_be16(sizeof(u16));
1182 buffer.data = data;
1183 buffer.address = cpu_to_be32(offset * 2);
1184
1185 status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
1186 IXGBE_HI_COMMAND_TIMEOUT, false);
1187 return status;
1188 }
1189
1190
1191
1192
1193
1194
1195
1196
1197 static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
1198 {
1199 s32 status = 0;
1200
1201 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
1202 status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
1203 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1204 } else {
1205 hw_dbg(hw, "write ee hostif failed to get semaphore");
1206 status = IXGBE_ERR_SWFW_SYNC;
1207 }
1208
1209 return status;
1210 }
1211
1212
1213
1214
1215
1216
1217 static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
1218 {
1219 s32 status = 0;
1220 union ixgbe_hic_hdr2 buffer;
1221
1222 buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
1223 buffer.req.buf_lenh = 0;
1224 buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
1225 buffer.req.checksum = FW_DEFAULT_CHECKSUM;
1226
1227 status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
1228 IXGBE_HI_COMMAND_TIMEOUT, false);
1229 return status;
1230 }
1231
1232
1233
1234
1235
1236
1237
1238
1239 static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
1240 {
1241 hw->bus.type = ixgbe_bus_type_internal;
1242 hw->bus.width = ixgbe_bus_width_unknown;
1243 hw->bus.speed = ixgbe_bus_speed_unknown;
1244
1245 hw->mac.ops.set_lan_id(hw);
1246
1247 return 0;
1248 }
1249
1250
1251
1252
1253
1254
1255
1256 static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
1257 {
1258 u32 fwsm;
1259
1260 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
1261 return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
1262 }
1263
1264
1265
1266
1267
1268 static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
1269 {
1270 u32 rxctrl, pfdtxgswc;
1271 s32 status;
1272 struct ixgbe_hic_disable_rxen fw_cmd;
1273
1274 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1275 if (rxctrl & IXGBE_RXCTRL_RXEN) {
1276 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
1277 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
1278 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
1279 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
1280 hw->mac.set_lben = true;
1281 } else {
1282 hw->mac.set_lben = false;
1283 }
1284
1285 fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
1286 fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
1287 fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1288 fw_cmd.port_number = hw->bus.lan_id;
1289
1290 status = ixgbe_host_interface_command(hw, &fw_cmd,
1291 sizeof(struct ixgbe_hic_disable_rxen),
1292 IXGBE_HI_COMMAND_TIMEOUT, true);
1293
1294
1295 if (status) {
1296 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1297 if (rxctrl & IXGBE_RXCTRL_RXEN) {
1298 rxctrl &= ~IXGBE_RXCTRL_RXEN;
1299 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
1300 }
1301 }
1302 }
1303 }
1304
1305
1306
1307
1308
1309
1310
1311
1312 static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
1313 {
1314 s32 status;
1315 u16 checksum = 0;
1316
1317
1318
1319
1320
1321 status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
1322 if (status) {
1323 hw_dbg(hw, "EEPROM read failed\n");
1324 return status;
1325 }
1326
1327 status = ixgbe_calc_eeprom_checksum_X550(hw);
1328 if (status < 0)
1329 return status;
1330
1331 checksum = (u16)(status & 0xffff);
1332
1333 status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
1334 checksum);
1335 if (status)
1336 return status;
1337
1338 status = ixgbe_update_flash_X550(hw);
1339
1340 return status;
1341 }
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352 static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
1353 u16 offset, u16 words,
1354 u16 *data)
1355 {
1356 s32 status = 0;
1357 u32 i = 0;
1358
1359
1360 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1361 if (status) {
1362 hw_dbg(hw, "EEPROM write buffer - semaphore failed\n");
1363 return status;
1364 }
1365
1366 for (i = 0; i < words; i++) {
1367 status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
1368 data[i]);
1369 if (status) {
1370 hw_dbg(hw, "Eeprom buffered write failed\n");
1371 break;
1372 }
1373 }
1374
1375 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1376
1377 return status;
1378 }
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388 static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1389 u32 device_type, u32 data)
1390 {
1391 u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1392 u32 command, error;
1393 s32 ret;
1394
1395 ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
1396 if (ret)
1397 return ret;
1398
1399 ret = ixgbe_iosf_wait(hw, NULL);
1400 if (ret)
1401 goto out;
1402
1403 command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1404 (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1405
1406
1407 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1408
1409
1410 IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1411
1412 ret = ixgbe_iosf_wait(hw, &command);
1413
1414 if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1415 error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1416 IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1417 hw_dbg(hw, "Failed to write, error %x\n", error);
1418 return IXGBE_ERR_PHY;
1419 }
1420
1421 out:
1422 hw->mac.ops.release_swfw_sync(hw, gssr);
1423 return ret;
1424 }
1425
1426
1427
1428
1429
1430
1431
1432 static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
1433 {
1434 s32 status;
1435 u32 reg_val;
1436
1437
1438 status = ixgbe_read_iosf_sb_reg_x550(hw,
1439 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
1440 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1441 if (status)
1442 return status;
1443
1444 reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
1445 status = ixgbe_write_iosf_sb_reg_x550(hw,
1446 IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
1447 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1448 if (status)
1449 return status;
1450
1451
1452 status = ixgbe_read_iosf_sb_reg_x550(hw,
1453 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
1454 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1455 if (status)
1456 return status;
1457
1458 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
1459 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
1460 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
1461 status = ixgbe_write_iosf_sb_reg_x550(hw,
1462 IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
1463 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1464 if (status)
1465 return status;
1466
1467 status = ixgbe_read_iosf_sb_reg_x550(hw,
1468 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
1469 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1470 if (status)
1471 return status;
1472
1473 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
1474 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
1475 reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
1476 status = ixgbe_write_iosf_sb_reg_x550(hw,
1477 IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
1478 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1479 if (status)
1480 return status;
1481
1482
1483 status = ixgbe_read_iosf_sb_reg_x550(hw,
1484 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
1485 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1486 if (status)
1487 return status;
1488
1489 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
1490 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
1491 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
1492 reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
1493 status = ixgbe_write_iosf_sb_reg_x550(hw,
1494 IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
1495 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1496 return status;
1497 }
1498
1499
1500
1501
1502
1503
1504 static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1505 {
1506 s32 status;
1507 u32 link_ctrl;
1508
1509
1510 status = hw->mac.ops.read_iosf_sb_reg(hw,
1511 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1512 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1513
1514 if (status) {
1515 hw_dbg(hw, "Auto-negotiation did not complete\n");
1516 return status;
1517 }
1518
1519 link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1520 status = hw->mac.ops.write_iosf_sb_reg(hw,
1521 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1522 IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1523
1524 if (hw->mac.type == ixgbe_mac_x550em_a) {
1525 u32 flx_mask_st20;
1526
1527
1528 status = hw->mac.ops.read_iosf_sb_reg(hw,
1529 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1530 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1531
1532 if (status) {
1533 hw_dbg(hw, "Auto-negotiation did not complete\n");
1534 return status;
1535 }
1536
1537 flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1538 status = hw->mac.ops.write_iosf_sb_reg(hw,
1539 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1540 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1541 }
1542
1543 return status;
1544 }
1545
1546
1547
1548
1549
1550
1551
1552
1553 static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
1554 {
1555 struct ixgbe_mac_info *mac = &hw->mac;
1556 s32 status;
1557 u32 reg_val;
1558
1559
1560 if (mac->type != ixgbe_mac_X550EM_x)
1561 return IXGBE_ERR_LINK_SETUP;
1562
1563
1564 status = ixgbe_read_iosf_sb_reg_x550(hw,
1565 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1566 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1567 if (status)
1568 return status;
1569
1570 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1571 reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1572
1573
1574 switch (*speed) {
1575 case IXGBE_LINK_SPEED_10GB_FULL:
1576 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
1577 break;
1578 case IXGBE_LINK_SPEED_1GB_FULL:
1579 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1580 break;
1581 default:
1582
1583 return IXGBE_ERR_LINK_SETUP;
1584 }
1585
1586 status = ixgbe_write_iosf_sb_reg_x550(hw,
1587 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1588 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1589 if (status)
1590 return status;
1591
1592
1593 if (hw->mac.type == ixgbe_mac_X550EM_x) {
1594 status = ixgbe_setup_ixfi_x550em_x(hw);
1595 if (status)
1596 return status;
1597 }
1598
1599
1600 status = ixgbe_restart_an_internal_phy_x550em(hw);
1601
1602 return status;
1603 }
1604
1605
1606
1607
1608
1609
1610 static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1611 {
1612 switch (hw->phy.sfp_type) {
1613 case ixgbe_sfp_type_not_present:
1614 return IXGBE_ERR_SFP_NOT_PRESENT;
1615 case ixgbe_sfp_type_da_cu_core0:
1616 case ixgbe_sfp_type_da_cu_core1:
1617 *linear = true;
1618 break;
1619 case ixgbe_sfp_type_srlr_core0:
1620 case ixgbe_sfp_type_srlr_core1:
1621 case ixgbe_sfp_type_da_act_lmt_core0:
1622 case ixgbe_sfp_type_da_act_lmt_core1:
1623 case ixgbe_sfp_type_1g_sx_core0:
1624 case ixgbe_sfp_type_1g_sx_core1:
1625 case ixgbe_sfp_type_1g_lx_core0:
1626 case ixgbe_sfp_type_1g_lx_core1:
1627 *linear = false;
1628 break;
1629 case ixgbe_sfp_type_unknown:
1630 case ixgbe_sfp_type_1g_cu_core0:
1631 case ixgbe_sfp_type_1g_cu_core1:
1632 default:
1633 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1634 }
1635
1636 return 0;
1637 }
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647 static s32
1648 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
1649 ixgbe_link_speed speed,
1650 __always_unused bool autoneg_wait_to_complete)
1651 {
1652 s32 status;
1653 u16 reg_slice, reg_val;
1654 bool setup_linear = false;
1655
1656
1657 status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
1658
1659
1660
1661
1662
1663 if (status == IXGBE_ERR_SFP_NOT_PRESENT)
1664 return 0;
1665
1666 if (status)
1667 return status;
1668
1669
1670 ixgbe_setup_kr_speed_x550em(hw, speed);
1671
1672
1673 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12);
1674 if (setup_linear)
1675 reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
1676 else
1677 reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
1678
1679 status = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
1680 reg_val);
1681
1682 return status;
1683 }
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693 static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
1694 {
1695 struct ixgbe_mac_info *mac = &hw->mac;
1696 s32 status;
1697 u32 reg_val;
1698
1699
1700 status = mac->ops.read_iosf_sb_reg(hw,
1701 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1702 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1703 if (status)
1704 return status;
1705
1706 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1707 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1708 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1709 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1710
1711
1712 switch (*speed) {
1713 case IXGBE_LINK_SPEED_10GB_FULL:
1714 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
1715 break;
1716 case IXGBE_LINK_SPEED_1GB_FULL:
1717 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1718 break;
1719 default:
1720
1721 return IXGBE_ERR_LINK_SETUP;
1722 }
1723
1724 status = mac->ops.write_iosf_sb_reg(hw,
1725 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1726 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1727
1728
1729 status = ixgbe_restart_an_internal_phy_x550em(hw);
1730
1731 return status;
1732 }
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742 static s32
1743 ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1744 __always_unused bool autoneg_wait_to_complete)
1745 {
1746 bool setup_linear = false;
1747 u32 reg_phy_int;
1748 s32 ret_val;
1749
1750
1751 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
1752
1753
1754
1755
1756 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
1757 return 0;
1758
1759 if (ret_val)
1760 return ret_val;
1761
1762
1763 ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
1764 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1765 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
1766 if (ret_val)
1767 return ret_val;
1768
1769 reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
1770 if (!setup_linear)
1771 reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
1772
1773 ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
1774 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1775 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
1776 if (ret_val)
1777 return ret_val;
1778
1779
1780 return ixgbe_setup_sfi_x550a(hw, &speed);
1781 }
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791 static s32
1792 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1793 __always_unused bool autoneg_wait_to_complete)
1794 {
1795 u32 reg_slice, slice_offset;
1796 bool setup_linear = false;
1797 u16 reg_phy_ext;
1798 s32 ret_val;
1799
1800
1801 ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
1802
1803
1804
1805
1806 if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
1807 return 0;
1808
1809 if (ret_val)
1810 return ret_val;
1811
1812
1813 ixgbe_setup_kr_speed_x550em(hw, speed);
1814
1815 if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE)
1816 return IXGBE_ERR_PHY_ADDR_INVALID;
1817
1818
1819 ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
1820 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
1821 if (ret_val)
1822 return ret_val;
1823
1824
1825
1826
1827 if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
1828 slice_offset = (hw->bus.lan_id +
1829 (hw->bus.instance_id << 1)) << 12;
1830 else
1831 slice_offset = hw->bus.lan_id << 12;
1832
1833
1834 reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
1835
1836 ret_val = hw->phy.ops.read_reg(hw, reg_slice,
1837 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
1838 if (ret_val)
1839 return ret_val;
1840
1841 reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
1842 (IXGBE_CS4227_EDC_MODE_SR << 1));
1843
1844 if (setup_linear)
1845 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
1846 else
1847 reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
1848
1849 ret_val = hw->phy.ops.write_reg(hw, reg_slice,
1850 IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
1851 if (ret_val)
1852 return ret_val;
1853
1854
1855 return hw->phy.ops.read_reg(hw, reg_slice,
1856 IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
1857 }
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870 static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
1871 ixgbe_link_speed speed,
1872 bool autoneg_wait)
1873 {
1874 s32 status;
1875 ixgbe_link_speed force_speed;
1876
1877
1878
1879
1880 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
1881 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
1882 else
1883 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
1884
1885
1886
1887 if (hw->mac.type == ixgbe_mac_X550EM_x &&
1888 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
1889 status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
1890
1891 if (status)
1892 return status;
1893 }
1894
1895 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1896 }
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
1907 ixgbe_link_speed *speed,
1908 bool *link_up,
1909 bool link_up_wait_to_complete)
1910 {
1911 u32 status;
1912 u16 i, autoneg_status;
1913
1914 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
1915 return IXGBE_ERR_CONFIG;
1916
1917 status = ixgbe_check_mac_link_generic(hw, speed, link_up,
1918 link_up_wait_to_complete);
1919
1920
1921 if (status || !(*link_up))
1922 return status;
1923
1924
1925
1926
1927
1928
1929 for (i = 0; i < 2; i++) {
1930 status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN,
1931 &autoneg_status);
1932
1933 if (status)
1934 return status;
1935 }
1936
1937
1938 if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
1939 *link_up = false;
1940
1941 return 0;
1942 }
1943
1944
1945
1946
1947
1948
1949
1950 static s32
1951 ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
1952 __always_unused bool autoneg_wait_to_complete)
1953 {
1954 struct ixgbe_mac_info *mac = &hw->mac;
1955 u32 lval, sval, flx_val;
1956 s32 rc;
1957
1958 rc = mac->ops.read_iosf_sb_reg(hw,
1959 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1960 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1961 if (rc)
1962 return rc;
1963
1964 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1965 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1966 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1967 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1968 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1969 rc = mac->ops.write_iosf_sb_reg(hw,
1970 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1971 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1972 if (rc)
1973 return rc;
1974
1975 rc = mac->ops.read_iosf_sb_reg(hw,
1976 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1977 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1978 if (rc)
1979 return rc;
1980
1981 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1982 sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1983 rc = mac->ops.write_iosf_sb_reg(hw,
1984 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1985 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1986 if (rc)
1987 return rc;
1988
1989 rc = mac->ops.read_iosf_sb_reg(hw,
1990 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1991 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1992 if (rc)
1993 return rc;
1994
1995 rc = mac->ops.read_iosf_sb_reg(hw,
1996 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1997 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1998 if (rc)
1999 return rc;
2000
2001 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2002 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2003 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2004 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2005 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2006
2007 rc = mac->ops.write_iosf_sb_reg(hw,
2008 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2009 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
2010 if (rc)
2011 return rc;
2012
2013 rc = ixgbe_restart_an_internal_phy_x550em(hw);
2014 return rc;
2015 }
2016
2017
2018
2019
2020
2021
2022
2023 static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
2024 bool autoneg_wait)
2025 {
2026 struct ixgbe_mac_info *mac = &hw->mac;
2027 u32 lval, sval, flx_val;
2028 s32 rc;
2029
2030 rc = mac->ops.read_iosf_sb_reg(hw,
2031 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2032 IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
2033 if (rc)
2034 return rc;
2035
2036 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2037 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
2038 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
2039 lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
2040 lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
2041 rc = mac->ops.write_iosf_sb_reg(hw,
2042 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2043 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
2044 if (rc)
2045 return rc;
2046
2047 rc = mac->ops.read_iosf_sb_reg(hw,
2048 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
2049 IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
2050 if (rc)
2051 return rc;
2052
2053 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
2054 sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
2055 rc = mac->ops.write_iosf_sb_reg(hw,
2056 IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
2057 IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
2058 if (rc)
2059 return rc;
2060
2061 rc = mac->ops.write_iosf_sb_reg(hw,
2062 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2063 IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
2064 if (rc)
2065 return rc;
2066
2067 rc = mac->ops.read_iosf_sb_reg(hw,
2068 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2069 IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
2070 if (rc)
2071 return rc;
2072
2073 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2074 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2075 flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2076 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2077 flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2078
2079 rc = mac->ops.write_iosf_sb_reg(hw,
2080 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2081 IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
2082 if (rc)
2083 return rc;
2084
2085 ixgbe_restart_an_internal_phy_x550em(hw);
2086
2087 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
2088 }
2089
2090
2091
2092
2093
2094
2095
2096 static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
2097 {
2098 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
2099 u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
2100 ixgbe_link_speed speed;
2101 bool link_up;
2102
2103
2104
2105
2106
2107
2108 if (hw->fc.disable_fc_autoneg)
2109 goto out;
2110
2111 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2112 if (!link_up)
2113 goto out;
2114
2115
2116 status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
2117 if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
2118 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
2119 goto out;
2120 }
2121
2122
2123 status = ixgbe_negotiate_fc(hw, info[0], info[0],
2124 FW_PHY_ACT_GET_LINK_INFO_FC_RX,
2125 FW_PHY_ACT_GET_LINK_INFO_FC_TX,
2126 FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
2127 FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
2128
2129 out:
2130 if (!status) {
2131 hw->fc.fc_was_autonegged = true;
2132 } else {
2133 hw->fc.fc_was_autonegged = false;
2134 hw->fc.current_mode = hw->fc.requested_mode;
2135 }
2136 }
2137
2138
2139
2140
2141 static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw)
2142 {
2143 struct ixgbe_mac_info *mac = &hw->mac;
2144
2145 switch (mac->ops.get_media_type(hw)) {
2146 case ixgbe_media_type_fiber:
2147 mac->ops.setup_fc = NULL;
2148 mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
2149 break;
2150 case ixgbe_media_type_copper:
2151 if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T &&
2152 hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) {
2153 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
2154 break;
2155 }
2156 mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
2157 mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
2158 mac->ops.setup_link = ixgbe_setup_sgmii_fw;
2159 mac->ops.check_link = ixgbe_check_mac_link_generic;
2160 break;
2161 case ixgbe_media_type_backplane:
2162 mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
2163 mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
2164 break;
2165 default:
2166 break;
2167 }
2168 }
2169
2170
2171
2172
2173 static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
2174 {
2175 struct ixgbe_mac_info *mac = &hw->mac;
2176
2177 mac->ops.setup_fc = ixgbe_setup_fc_x550em;
2178
2179 switch (mac->ops.get_media_type(hw)) {
2180 case ixgbe_media_type_fiber:
2181
2182
2183
2184 mac->ops.disable_tx_laser = NULL;
2185 mac->ops.enable_tx_laser = NULL;
2186 mac->ops.flap_tx_laser = NULL;
2187 mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
2188 switch (hw->device_id) {
2189 case IXGBE_DEV_ID_X550EM_A_SFP_N:
2190 mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n;
2191 break;
2192 case IXGBE_DEV_ID_X550EM_A_SFP:
2193 mac->ops.setup_mac_link =
2194 ixgbe_setup_mac_link_sfp_x550a;
2195 break;
2196 default:
2197 mac->ops.setup_mac_link =
2198 ixgbe_setup_mac_link_sfp_x550em;
2199 break;
2200 }
2201 mac->ops.set_rate_select_speed =
2202 ixgbe_set_soft_rate_select_speed;
2203 break;
2204 case ixgbe_media_type_copper:
2205 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
2206 break;
2207 mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
2208 mac->ops.setup_fc = ixgbe_setup_fc_generic;
2209 mac->ops.check_link = ixgbe_check_link_t_X550em;
2210 break;
2211 case ixgbe_media_type_backplane:
2212 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
2213 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
2214 mac->ops.setup_link = ixgbe_setup_sgmii;
2215 break;
2216 default:
2217 break;
2218 }
2219
2220
2221 if (hw->mac.type == ixgbe_mac_x550em_a)
2222 ixgbe_init_mac_link_ops_X550em_a(hw);
2223 }
2224
2225
2226
2227
2228 static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
2229 {
2230 s32 status;
2231 bool linear;
2232
2233
2234 status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
2235 if (status)
2236 return status;
2237
2238 ixgbe_init_mac_link_ops_X550em(hw);
2239 hw->phy.ops.reset = NULL;
2240
2241 return 0;
2242 }
2243
2244
2245
2246
2247
2248
2249 static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
2250 ixgbe_link_speed *speed,
2251 bool *autoneg)
2252 {
2253 if (hw->phy.type == ixgbe_phy_fw) {
2254 *autoneg = true;
2255 *speed = hw->phy.speeds_supported;
2256 return 0;
2257 }
2258
2259
2260 if (hw->phy.media_type == ixgbe_media_type_fiber) {
2261
2262 *autoneg = false;
2263
2264 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
2265 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
2266 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
2267 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
2268 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2269 return 0;
2270 }
2271
2272
2273 if (hw->phy.multispeed_fiber)
2274 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2275 IXGBE_LINK_SPEED_1GB_FULL;
2276 else
2277 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2278 } else {
2279 switch (hw->phy.type) {
2280 case ixgbe_phy_x550em_kx4:
2281 *speed = IXGBE_LINK_SPEED_1GB_FULL |
2282 IXGBE_LINK_SPEED_2_5GB_FULL |
2283 IXGBE_LINK_SPEED_10GB_FULL;
2284 break;
2285 case ixgbe_phy_x550em_xfi:
2286 *speed = IXGBE_LINK_SPEED_1GB_FULL |
2287 IXGBE_LINK_SPEED_10GB_FULL;
2288 break;
2289 case ixgbe_phy_ext_1g_t:
2290 case ixgbe_phy_sgmii:
2291 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2292 break;
2293 case ixgbe_phy_x550em_kr:
2294 if (hw->mac.type == ixgbe_mac_x550em_a) {
2295
2296 if (hw->phy.nw_mng_if_sel &
2297 IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
2298 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
2299 break;
2300 } else if (hw->device_id ==
2301 IXGBE_DEV_ID_X550EM_A_KR_L) {
2302 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2303 break;
2304 }
2305 }
2306 fallthrough;
2307 default:
2308 *speed = IXGBE_LINK_SPEED_10GB_FULL |
2309 IXGBE_LINK_SPEED_1GB_FULL;
2310 break;
2311 }
2312 *autoneg = true;
2313 }
2314 return 0;
2315 }
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329 static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
2330 {
2331 u32 status;
2332 u16 reg;
2333
2334 *lsc = false;
2335
2336
2337 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2338 MDIO_MMD_VEND1,
2339 ®);
2340
2341 if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
2342 return status;
2343
2344
2345 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
2346 MDIO_MMD_VEND1,
2347 ®);
2348
2349 if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2350 IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
2351 return status;
2352
2353
2354 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
2355 MDIO_MMD_VEND1,
2356 ®);
2357
2358 if (status)
2359 return status;
2360
2361
2362 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
2363
2364 ixgbe_set_copper_phy_power(hw, false);
2365 return IXGBE_ERR_OVERTEMP;
2366 }
2367 if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
2368
2369 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
2370 MDIO_MMD_VEND1,
2371 ®);
2372 if (status)
2373 return status;
2374
2375
2376 if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
2377
2378 ixgbe_set_copper_phy_power(hw, false);
2379 return IXGBE_ERR_OVERTEMP;
2380 }
2381 }
2382
2383
2384 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2385 MDIO_MMD_AN, ®);
2386
2387 if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
2388 return status;
2389
2390
2391 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
2392 MDIO_MMD_AN, ®);
2393
2394 if (status)
2395 return status;
2396
2397
2398 if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
2399 *lsc = true;
2400
2401 return 0;
2402 }
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413 static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2414 {
2415 u32 status;
2416 u16 reg;
2417 bool lsc;
2418
2419
2420 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432 if (hw->mac.type != ixgbe_mac_x550em_a) {
2433 status = hw->phy.ops.read_reg(hw,
2434 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2435 MDIO_MMD_AN, ®);
2436 if (status)
2437 return status;
2438
2439 reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2440
2441 status = hw->phy.ops.write_reg(hw,
2442 IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2443 MDIO_MMD_AN, reg);
2444 if (status)
2445 return status;
2446 }
2447
2448
2449 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2450 MDIO_MMD_VEND1,
2451 ®);
2452 if (status)
2453 return status;
2454
2455 reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2456 IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2457
2458 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2459 MDIO_MMD_VEND1,
2460 reg);
2461 if (status)
2462 return status;
2463
2464
2465 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2466 MDIO_MMD_VEND1,
2467 ®);
2468 if (status)
2469 return status;
2470
2471 reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2472 IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2473
2474 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2475 MDIO_MMD_VEND1,
2476 reg);
2477 if (status)
2478 return status;
2479
2480
2481 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2482 MDIO_MMD_VEND1,
2483 ®);
2484 if (status)
2485 return status;
2486
2487 reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2488
2489 status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2490 MDIO_MMD_VEND1,
2491 reg);
2492
2493 return status;
2494 }
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507 static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2508 {
2509 struct ixgbe_phy_info *phy = &hw->phy;
2510 bool lsc;
2511 u32 status;
2512
2513 status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2514 if (status)
2515 return status;
2516
2517 if (lsc && phy->ops.setup_internal_link)
2518 return phy->ops.setup_internal_link(hw);
2519
2520 return 0;
2521 }
2522
2523
2524
2525
2526
2527
2528
2529
2530 static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2531 ixgbe_link_speed speed)
2532 {
2533 s32 status;
2534 u32 reg_val;
2535
2536 status = hw->mac.ops.read_iosf_sb_reg(hw,
2537 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2538 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2539 if (status)
2540 return status;
2541
2542 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2543 reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2544 IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2545
2546
2547 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2548 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2549
2550
2551 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2552 reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2553
2554 status = hw->mac.ops.write_iosf_sb_reg(hw,
2555 IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2556 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2557
2558 if (hw->mac.type == ixgbe_mac_x550em_a) {
2559
2560 status = hw->mac.ops.read_iosf_sb_reg(hw,
2561 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2562 IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2563
2564 if (status)
2565 return status;
2566
2567 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2568 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2569 reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2570 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2571 reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2572
2573 status = hw->mac.ops.write_iosf_sb_reg(hw,
2574 IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2575 IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2576 }
2577
2578 return ixgbe_restart_an_internal_phy_x550em(hw);
2579 }
2580
2581
2582
2583
2584
2585 static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2586 {
2587
2588 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2589 return 0;
2590
2591 if (ixgbe_check_reset_blocked(hw))
2592 return 0;
2593
2594 return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2595 }
2596
2597
2598
2599
2600
2601
2602
2603 static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
2604 {
2605 u32 ret;
2606 u16 autoneg_status;
2607
2608 *link_up = false;
2609
2610
2611 ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN,
2612 &autoneg_status);
2613 if (ret)
2614 return ret;
2615
2616 ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN,
2617 &autoneg_status);
2618 if (ret)
2619 return ret;
2620
2621 *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
2622
2623 return 0;
2624 }
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637 static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
2638 {
2639 ixgbe_link_speed force_speed;
2640 bool link_up;
2641 u32 status;
2642 u16 speed;
2643
2644 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
2645 return IXGBE_ERR_CONFIG;
2646
2647 if (!(hw->mac.type == ixgbe_mac_X550EM_x &&
2648 !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) {
2649 speed = IXGBE_LINK_SPEED_10GB_FULL |
2650 IXGBE_LINK_SPEED_1GB_FULL;
2651 return ixgbe_setup_kr_speed_x550em(hw, speed);
2652 }
2653
2654
2655 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
2656 if (status)
2657 return status;
2658
2659 if (!link_up)
2660 return 0;
2661
2662 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
2663 MDIO_MMD_AN,
2664 &speed);
2665 if (status)
2666 return status;
2667
2668
2669 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
2670 if (status)
2671 return status;
2672
2673 if (!link_up)
2674 return 0;
2675
2676
2677 speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
2678
2679 switch (speed) {
2680 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
2681 force_speed = IXGBE_LINK_SPEED_10GB_FULL;
2682 break;
2683 case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
2684 force_speed = IXGBE_LINK_SPEED_1GB_FULL;
2685 break;
2686 default:
2687
2688 return IXGBE_ERR_INVALID_LINK_SETTINGS;
2689 }
2690
2691 return ixgbe_setup_ixfi_x550em(hw, &force_speed);
2692 }
2693
2694
2695
2696
2697 static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
2698 {
2699 s32 status;
2700
2701 status = ixgbe_reset_phy_generic(hw);
2702
2703 if (status)
2704 return status;
2705
2706
2707 return ixgbe_enable_lasi_ext_t_x550em(hw);
2708 }
2709
2710
2711
2712
2713
2714
2715 static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
2716 {
2717 u16 phy_data;
2718
2719 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
2720 return IXGBE_ERR_PARAM;
2721
2722
2723 hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
2724 MDIO_MMD_VEND1, &phy_data);
2725 phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
2726 hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
2727 MDIO_MMD_VEND1, phy_data);
2728
2729 return 0;
2730 }
2731
2732
2733
2734
2735
2736
2737 static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
2738 {
2739 u16 phy_data;
2740
2741 if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
2742 return IXGBE_ERR_PARAM;
2743
2744
2745 hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
2746 MDIO_MMD_VEND1, &phy_data);
2747 phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
2748 hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
2749 MDIO_MMD_VEND1, phy_data);
2750
2751 return 0;
2752 }
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769 static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
2770 u8 build, u8 sub, u16 len,
2771 const char *driver_ver)
2772 {
2773 struct ixgbe_hic_drv_info2 fw_cmd;
2774 s32 ret_val;
2775 int i;
2776
2777 if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string)))
2778 return IXGBE_ERR_INVALID_ARGUMENT;
2779
2780 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
2781 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
2782 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
2783 fw_cmd.port_num = (u8)hw->bus.func;
2784 fw_cmd.ver_maj = maj;
2785 fw_cmd.ver_min = min;
2786 fw_cmd.ver_build = build;
2787 fw_cmd.ver_sub = sub;
2788 fw_cmd.hdr.checksum = 0;
2789 memcpy(fw_cmd.driver_string, driver_ver, len);
2790 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
2791 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
2792
2793 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
2794 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
2795 sizeof(fw_cmd),
2796 IXGBE_HI_COMMAND_TIMEOUT,
2797 true);
2798 if (ret_val)
2799 continue;
2800
2801 if (fw_cmd.hdr.cmd_or_resp.ret_status !=
2802 FW_CEM_RESP_STATUS_SUCCESS)
2803 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
2804 return 0;
2805 }
2806
2807 return ret_val;
2808 }
2809
2810
2811
2812
2813
2814
2815
2816 static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
2817 ixgbe_link_speed *lcd_speed)
2818 {
2819 u16 an_lp_status;
2820 s32 status;
2821 u16 word = hw->eeprom.ctrl_word_3;
2822
2823 *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
2824
2825 status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
2826 MDIO_MMD_AN,
2827 &an_lp_status);
2828 if (status)
2829 return status;
2830
2831
2832 if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
2833 *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
2834 return status;
2835 }
2836
2837
2838 if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
2839 (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
2840 return status;
2841
2842
2843 *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
2844 return status;
2845 }
2846
2847
2848
2849
2850
2851 static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
2852 {
2853 bool pause, asm_dir;
2854 u32 reg_val;
2855 s32 rc = 0;
2856
2857
2858 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
2859 hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
2860 return IXGBE_ERR_INVALID_LINK_SETTINGS;
2861 }
2862
2863
2864
2865
2866 if (hw->fc.requested_mode == ixgbe_fc_default)
2867 hw->fc.requested_mode = ixgbe_fc_full;
2868
2869
2870 switch (hw->fc.requested_mode) {
2871 case ixgbe_fc_none:
2872 pause = false;
2873 asm_dir = false;
2874 break;
2875 case ixgbe_fc_tx_pause:
2876 pause = false;
2877 asm_dir = true;
2878 break;
2879 case ixgbe_fc_rx_pause:
2880
2881
2882
2883
2884
2885
2886
2887
2888 fallthrough;
2889 case ixgbe_fc_full:
2890 pause = true;
2891 asm_dir = true;
2892 break;
2893 default:
2894 hw_err(hw, "Flow control param set incorrectly\n");
2895 return IXGBE_ERR_CONFIG;
2896 }
2897
2898 switch (hw->device_id) {
2899 case IXGBE_DEV_ID_X550EM_X_KR:
2900 case IXGBE_DEV_ID_X550EM_A_KR:
2901 case IXGBE_DEV_ID_X550EM_A_KR_L:
2902 rc = hw->mac.ops.read_iosf_sb_reg(hw,
2903 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
2904 IXGBE_SB_IOSF_TARGET_KR_PHY,
2905 ®_val);
2906 if (rc)
2907 return rc;
2908
2909 reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
2910 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
2911 if (pause)
2912 reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
2913 if (asm_dir)
2914 reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
2915 rc = hw->mac.ops.write_iosf_sb_reg(hw,
2916 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
2917 IXGBE_SB_IOSF_TARGET_KR_PHY,
2918 reg_val);
2919
2920
2921 hw->fc.disable_fc_autoneg = true;
2922 break;
2923 case IXGBE_DEV_ID_X550EM_X_XFI:
2924 hw->fc.disable_fc_autoneg = true;
2925 break;
2926 default:
2927 break;
2928 }
2929 return rc;
2930 }
2931
2932
2933
2934
2935
2936 static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
2937 {
2938 u32 link_s1, lp_an_page_low, an_cntl_1;
2939 s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
2940 ixgbe_link_speed speed;
2941 bool link_up;
2942
2943
2944
2945
2946
2947
2948 if (hw->fc.disable_fc_autoneg) {
2949 hw_err(hw, "Flow control autoneg is disabled");
2950 goto out;
2951 }
2952
2953 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2954 if (!link_up) {
2955 hw_err(hw, "The link is down");
2956 goto out;
2957 }
2958
2959
2960 status = hw->mac.ops.read_iosf_sb_reg(hw,
2961 IXGBE_KRM_LINK_S1(hw->bus.lan_id),
2962 IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
2963
2964 if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
2965 hw_dbg(hw, "Auto-Negotiation did not complete\n");
2966 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
2967 goto out;
2968 }
2969
2970
2971
2972
2973 status = hw->mac.ops.read_iosf_sb_reg(hw,
2974 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
2975 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
2976
2977 if (status) {
2978 hw_dbg(hw, "Auto-Negotiation did not complete\n");
2979 goto out;
2980 }
2981
2982 status = hw->mac.ops.read_iosf_sb_reg(hw,
2983 IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
2984 IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
2985
2986 if (status) {
2987 hw_dbg(hw, "Auto-Negotiation did not complete\n");
2988 goto out;
2989 }
2990
2991 status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
2992 IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
2993 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
2994 IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
2995 IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
2996
2997 out:
2998 if (!status) {
2999 hw->fc.fc_was_autonegged = true;
3000 } else {
3001 hw->fc.fc_was_autonegged = false;
3002 hw->fc.current_mode = hw->fc.requested_mode;
3003 }
3004 }
3005
3006
3007
3008
3009
3010 static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
3011 {
3012 hw->fc.fc_was_autonegged = false;
3013 hw->fc.current_mode = hw->fc.requested_mode;
3014 }
3015
3016
3017
3018
3019
3020
3021
3022
3023 static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3024 {
3025 u16 an_10g_cntl_reg, autoneg_reg, speed;
3026 s32 status;
3027 ixgbe_link_speed lcd_speed;
3028 u32 save_autoneg;
3029 bool link_up;
3030
3031
3032 if (ixgbe_check_reset_blocked(hw))
3033 return 0;
3034
3035 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3036 if (status)
3037 return status;
3038
3039 status = hw->eeprom.ops.read(hw, NVM_INIT_CTRL_3,
3040 &hw->eeprom.ctrl_word_3);
3041 if (status)
3042 return status;
3043
3044
3045
3046
3047
3048 if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3049 !(hw->wol_enabled || ixgbe_mng_present(hw)))
3050 return ixgbe_set_copper_phy_power(hw, false);
3051
3052
3053 status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3054 if (status)
3055 return status;
3056
3057
3058 if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3059 return ixgbe_set_copper_phy_power(hw, false);
3060
3061 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3062 MDIO_MMD_AN,
3063 &speed);
3064 if (status)
3065 return status;
3066
3067
3068 status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3069 if (status)
3070 return ixgbe_set_copper_phy_power(hw, false);
3071
3072
3073 speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
3074
3075
3076 if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
3077 (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
3078 ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
3079 (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
3080 return status;
3081
3082
3083 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
3084 MDIO_MMD_AN,
3085 &autoneg_reg);
3086 if (status)
3087 return status;
3088
3089 status = hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
3090 MDIO_MMD_AN,
3091 &an_10g_cntl_reg);
3092 if (status)
3093 return status;
3094
3095 status = hw->phy.ops.read_reg(hw,
3096 IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
3097 MDIO_MMD_AN,
3098 &autoneg_reg);
3099 if (status)
3100 return status;
3101
3102 save_autoneg = hw->phy.autoneg_advertised;
3103
3104
3105 status = hw->mac.ops.setup_link(hw, lcd_speed, false);
3106
3107
3108 hw->phy.autoneg_advertised = save_autoneg;
3109
3110 return status;
3111 }
3112
3113
3114
3115
3116
3117 static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
3118 {
3119 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
3120 s32 rc;
3121
3122 if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
3123 return 0;
3124
3125 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
3126 if (rc)
3127 return rc;
3128 memset(store, 0, sizeof(store));
3129
3130 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
3131 if (rc)
3132 return rc;
3133
3134 return ixgbe_setup_fw_link(hw);
3135 }
3136
3137
3138
3139
3140
3141 static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
3142 {
3143 u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
3144 s32 rc;
3145
3146 rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
3147 if (rc)
3148 return rc;
3149
3150 if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
3151 ixgbe_shutdown_fw_phy(hw);
3152 return IXGBE_ERR_OVERTEMP;
3153 }
3154 return 0;
3155 }
3156
3157
3158
3159
3160
3161
3162
3163 static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
3164 {
3165
3166
3167
3168 hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
3169
3170
3171
3172
3173 if (hw->mac.type == ixgbe_mac_x550em_a &&
3174 hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
3175 hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
3176 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
3177 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
3178 }
3179 }
3180
3181
3182
3183
3184
3185
3186
3187
3188 static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
3189 {
3190 struct ixgbe_phy_info *phy = &hw->phy;
3191 s32 ret_val;
3192
3193 hw->mac.ops.set_lan_id(hw);
3194
3195 ixgbe_read_mng_if_sel_x550em(hw);
3196
3197 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
3198 phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
3199 ixgbe_setup_mux_ctl(hw);
3200 }
3201
3202
3203 ret_val = phy->ops.identify(hw);
3204 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
3205 ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
3206 return ret_val;
3207
3208
3209 ixgbe_init_mac_link_ops_X550em(hw);
3210 if (phy->sfp_type != ixgbe_sfp_type_unknown)
3211 phy->ops.reset = NULL;
3212
3213
3214 switch (hw->phy.type) {
3215 case ixgbe_phy_x550em_kx4:
3216 phy->ops.setup_link = NULL;
3217 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
3218 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
3219 break;
3220 case ixgbe_phy_x550em_kr:
3221 phy->ops.setup_link = ixgbe_setup_kr_x550em;
3222 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
3223 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
3224 break;
3225 case ixgbe_phy_x550em_xfi:
3226
3227 phy->ops.setup_link = NULL;
3228 phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
3229 phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
3230 break;
3231 case ixgbe_phy_x550em_ext_t:
3232
3233
3234
3235 phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
3236
3237
3238
3239
3240 phy->ops.setup_internal_link =
3241 ixgbe_setup_internal_phy_t_x550em;
3242
3243
3244 if (hw->mac.type == ixgbe_mac_X550EM_x &&
3245 !(IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)) &
3246 IXGBE_FUSES0_REV_MASK))
3247 phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
3248
3249 phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
3250 phy->ops.reset = ixgbe_reset_phy_t_X550em;
3251 break;
3252 case ixgbe_phy_sgmii:
3253 phy->ops.setup_link = NULL;
3254 break;
3255 case ixgbe_phy_fw:
3256 phy->ops.setup_link = ixgbe_setup_fw_link;
3257 phy->ops.reset = ixgbe_reset_phy_fw;
3258 break;
3259 case ixgbe_phy_ext_1g_t:
3260 phy->ops.setup_link = NULL;
3261 phy->ops.read_reg = NULL;
3262 phy->ops.write_reg = NULL;
3263 phy->ops.reset = NULL;
3264 break;
3265 default:
3266 break;
3267 }
3268
3269 return ret_val;
3270 }
3271
3272
3273
3274
3275
3276
3277
3278 static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
3279 {
3280 enum ixgbe_media_type media_type;
3281
3282
3283 switch (hw->device_id) {
3284 case IXGBE_DEV_ID_X550EM_A_SGMII:
3285 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
3286 hw->phy.type = ixgbe_phy_sgmii;
3287 fallthrough;
3288 case IXGBE_DEV_ID_X550EM_X_KR:
3289 case IXGBE_DEV_ID_X550EM_X_KX4:
3290 case IXGBE_DEV_ID_X550EM_X_XFI:
3291 case IXGBE_DEV_ID_X550EM_A_KR:
3292 case IXGBE_DEV_ID_X550EM_A_KR_L:
3293 media_type = ixgbe_media_type_backplane;
3294 break;
3295 case IXGBE_DEV_ID_X550EM_X_SFP:
3296 case IXGBE_DEV_ID_X550EM_A_SFP:
3297 case IXGBE_DEV_ID_X550EM_A_SFP_N:
3298 media_type = ixgbe_media_type_fiber;
3299 break;
3300 case IXGBE_DEV_ID_X550EM_X_1G_T:
3301 case IXGBE_DEV_ID_X550EM_X_10G_T:
3302 case IXGBE_DEV_ID_X550EM_A_10G_T:
3303 case IXGBE_DEV_ID_X550EM_A_1G_T:
3304 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
3305 media_type = ixgbe_media_type_copper;
3306 break;
3307 default:
3308 media_type = ixgbe_media_type_unknown;
3309 break;
3310 }
3311 return media_type;
3312 }
3313
3314
3315
3316
3317 static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
3318 {
3319 s32 status;
3320 u16 reg;
3321
3322 status = hw->phy.ops.read_reg(hw,
3323 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
3324 MDIO_MMD_PMAPMD,
3325 ®);
3326 if (status)
3327 return status;
3328
3329
3330
3331
3332 if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
3333 status = hw->phy.ops.read_reg(hw,
3334 IXGBE_MDIO_GLOBAL_RES_PR_10,
3335 MDIO_MMD_VEND1,
3336 ®);
3337 if (status)
3338 return status;
3339
3340 reg &= ~IXGBE_MDIO_POWER_UP_STALL;
3341
3342 status = hw->phy.ops.write_reg(hw,
3343 IXGBE_MDIO_GLOBAL_RES_PR_10,
3344 MDIO_MMD_VEND1,
3345 reg);
3346 if (status)
3347 return status;
3348 }
3349
3350 return status;
3351 }
3352
3353
3354
3355
3356
3357 static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
3358 {
3359 u32 hlreg0;
3360
3361 switch (hw->device_id) {
3362 case IXGBE_DEV_ID_X550EM_X_10G_T:
3363 case IXGBE_DEV_ID_X550EM_A_SGMII:
3364 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
3365 case IXGBE_DEV_ID_X550EM_A_10G_T:
3366 case IXGBE_DEV_ID_X550EM_A_SFP:
3367
3368 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3369 hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
3370 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3371 break;
3372 case IXGBE_DEV_ID_X550EM_A_1G_T:
3373 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
3374
3375 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3376 hlreg0 |= IXGBE_HLREG0_MDCSPD;
3377 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3378 break;
3379 default:
3380 break;
3381 }
3382 }
3383
3384
3385
3386
3387
3388
3389
3390
3391 static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
3392 {
3393 ixgbe_link_speed link_speed;
3394 s32 status;
3395 u32 ctrl = 0;
3396 u32 i;
3397 bool link_up = false;
3398 u32 swfw_mask = hw->phy.phy_semaphore_mask;
3399
3400
3401 status = hw->mac.ops.stop_adapter(hw);
3402 if (status)
3403 return status;
3404
3405
3406 ixgbe_clear_tx_pending(hw);
3407
3408
3409 ixgbe_set_mdio_speed(hw);
3410
3411
3412 status = hw->phy.ops.init(hw);
3413 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
3414 status == IXGBE_ERR_PHY_ADDR_INVALID)
3415 return status;
3416
3417
3418 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
3419 status = ixgbe_init_ext_t_x550em(hw);
3420 if (status)
3421 return status;
3422 }
3423
3424
3425 if (hw->phy.sfp_setup_needed) {
3426 status = hw->mac.ops.setup_sfp(hw);
3427 hw->phy.sfp_setup_needed = false;
3428 }
3429
3430 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
3431 return status;
3432
3433
3434 if (!hw->phy.reset_disable && hw->phy.ops.reset)
3435 hw->phy.ops.reset(hw);
3436
3437 mac_reset_top:
3438
3439
3440
3441
3442
3443 ctrl = IXGBE_CTRL_LNK_RST;
3444
3445 if (!hw->force_full_reset) {
3446 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3447 if (link_up)
3448 ctrl = IXGBE_CTRL_RST;
3449 }
3450
3451 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
3452 if (status) {
3453 hw_dbg(hw, "semaphore failed with %d", status);
3454 return IXGBE_ERR_SWFW_SYNC;
3455 }
3456
3457 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
3458 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
3459 IXGBE_WRITE_FLUSH(hw);
3460 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
3461 usleep_range(1000, 1200);
3462
3463
3464 for (i = 0; i < 10; i++) {
3465 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
3466 if (!(ctrl & IXGBE_CTRL_RST_MASK))
3467 break;
3468 udelay(1);
3469 }
3470
3471 if (ctrl & IXGBE_CTRL_RST_MASK) {
3472 status = IXGBE_ERR_RESET_FAILED;
3473 hw_dbg(hw, "Reset polling failed to complete.\n");
3474 }
3475
3476 msleep(50);
3477
3478
3479
3480
3481
3482 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
3483 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3484 goto mac_reset_top;
3485 }
3486
3487
3488 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
3489
3490
3491
3492
3493
3494 hw->mac.num_rar_entries = 128;
3495 hw->mac.ops.init_rx_addrs(hw);
3496
3497 ixgbe_set_mdio_speed(hw);
3498
3499 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
3500 ixgbe_setup_mux_ctl(hw);
3501
3502 return status;
3503 }
3504
3505
3506
3507
3508
3509
3510
3511 static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
3512 bool enable, int vf)
3513 {
3514 int vf_target_reg = vf >> 3;
3515 int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
3516 u32 pfvfspoof;
3517
3518 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3519 if (enable)
3520 pfvfspoof |= BIT(vf_target_shift);
3521 else
3522 pfvfspoof &= ~BIT(vf_target_shift);
3523
3524 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3525 }
3526
3527
3528
3529
3530
3531
3532 static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
3533 bool enable,
3534 unsigned int pool)
3535 {
3536 u64 pfflp;
3537
3538
3539 if (pool > 63)
3540 return;
3541
3542 pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
3543 pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
3544
3545 if (enable)
3546 pfflp |= (1ULL << pool);
3547 else
3548 pfflp &= ~(1ULL << pool);
3549
3550 IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
3551 IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
3552 }
3553
3554
3555
3556
3557
3558
3559
3560 static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
3561 {
3562 s32 status = 0;
3563 u32 an_cntl = 0;
3564
3565
3566 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
3567 hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
3568 return IXGBE_ERR_INVALID_LINK_SETTINGS;
3569 }
3570
3571 if (hw->fc.requested_mode == ixgbe_fc_default)
3572 hw->fc.requested_mode = ixgbe_fc_full;
3573
3574
3575
3576
3577
3578 status = hw->mac.ops.read_iosf_sb_reg(hw,
3579 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3580 IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
3581
3582 if (status) {
3583 hw_dbg(hw, "Auto-Negotiation did not complete\n");
3584 return status;
3585 }
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596 switch (hw->fc.requested_mode) {
3597 case ixgbe_fc_none:
3598
3599 an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
3600 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
3601 break;
3602 case ixgbe_fc_tx_pause:
3603
3604
3605
3606 an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
3607 an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
3608 break;
3609 case ixgbe_fc_rx_pause:
3610
3611
3612
3613
3614
3615
3616
3617
3618 case ixgbe_fc_full:
3619
3620 an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
3621 IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
3622 break;
3623 default:
3624 hw_err(hw, "Flow control param set incorrectly\n");
3625 return IXGBE_ERR_CONFIG;
3626 }
3627
3628 status = hw->mac.ops.write_iosf_sb_reg(hw,
3629 IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3630 IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
3631
3632
3633 status = ixgbe_restart_an_internal_phy_x550em(hw);
3634
3635 return status;
3636 }
3637
3638
3639
3640
3641
3642
3643 static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
3644 {
3645 u32 esdp;
3646
3647 if (!hw->bus.lan_id)
3648 return;
3649 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
3650 if (state)
3651 esdp |= IXGBE_ESDP_SDP1;
3652 else
3653 esdp &= ~IXGBE_ESDP_SDP1;
3654 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
3655 IXGBE_WRITE_FLUSH(hw);
3656 }
3657
3658
3659
3660
3661
3662
3663
3664
3665 static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
3666 {
3667 s32 status;
3668
3669 status = ixgbe_acquire_swfw_sync_X540(hw, mask);
3670 if (status)
3671 return status;
3672
3673 if (mask & IXGBE_GSSR_I2C_MASK)
3674 ixgbe_set_mux(hw, 1);
3675
3676 return 0;
3677 }
3678
3679
3680
3681
3682
3683
3684
3685
3686 static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
3687 {
3688 if (mask & IXGBE_GSSR_I2C_MASK)
3689 ixgbe_set_mux(hw, 0);
3690
3691 ixgbe_release_swfw_sync_X540(hw, mask);
3692 }
3693
3694
3695
3696
3697
3698
3699
3700
3701 static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
3702 {
3703 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
3704 int retries = FW_PHY_TOKEN_RETRIES;
3705 s32 status;
3706
3707 while (--retries) {
3708 status = 0;
3709 if (hmask)
3710 status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
3711 if (status)
3712 return status;
3713 if (!(mask & IXGBE_GSSR_TOKEN_SM))
3714 return 0;
3715
3716 status = ixgbe_get_phy_token(hw);
3717 if (!status)
3718 return 0;
3719 if (hmask)
3720 ixgbe_release_swfw_sync_X540(hw, hmask);
3721 if (status != IXGBE_ERR_TOKEN_RETRY)
3722 return status;
3723 msleep(FW_PHY_TOKEN_DELAY);
3724 }
3725
3726 return status;
3727 }
3728
3729
3730
3731
3732
3733
3734
3735
3736 static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
3737 {
3738 u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
3739
3740 if (mask & IXGBE_GSSR_TOKEN_SM)
3741 ixgbe_put_phy_token(hw);
3742
3743 if (hmask)
3744 ixgbe_release_swfw_sync_X540(hw, hmask);
3745 }
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758 static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
3759 u32 device_type, u16 *phy_data)
3760 {
3761 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
3762 s32 status;
3763
3764 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
3765 return IXGBE_ERR_SWFW_SYNC;
3766
3767 status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
3768
3769 hw->mac.ops.release_swfw_sync(hw, mask);
3770
3771 return status;
3772 }
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784 static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
3785 u32 device_type, u16 phy_data)
3786 {
3787 u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
3788 s32 status;
3789
3790 if (hw->mac.ops.acquire_swfw_sync(hw, mask))
3791 return IXGBE_ERR_SWFW_SYNC;
3792
3793 status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data);
3794 hw->mac.ops.release_swfw_sync(hw, mask);
3795
3796 return status;
3797 }
3798
3799 #define X550_COMMON_MAC \
3800 .init_hw = &ixgbe_init_hw_generic, \
3801 .start_hw = &ixgbe_start_hw_X540, \
3802 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, \
3803 .enable_rx_dma = &ixgbe_enable_rx_dma_generic, \
3804 .get_mac_addr = &ixgbe_get_mac_addr_generic, \
3805 .get_device_caps = &ixgbe_get_device_caps_generic, \
3806 .stop_adapter = &ixgbe_stop_adapter_generic, \
3807 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \
3808 .read_analog_reg8 = NULL, \
3809 .write_analog_reg8 = NULL, \
3810 .set_rxpba = &ixgbe_set_rxpba_generic, \
3811 .check_link = &ixgbe_check_mac_link_generic, \
3812 .blink_led_start = &ixgbe_blink_led_start_X540, \
3813 .blink_led_stop = &ixgbe_blink_led_stop_X540, \
3814 .set_rar = &ixgbe_set_rar_generic, \
3815 .clear_rar = &ixgbe_clear_rar_generic, \
3816 .set_vmdq = &ixgbe_set_vmdq_generic, \
3817 .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, \
3818 .clear_vmdq = &ixgbe_clear_vmdq_generic, \
3819 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, \
3820 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, \
3821 .enable_mc = &ixgbe_enable_mc_generic, \
3822 .disable_mc = &ixgbe_disable_mc_generic, \
3823 .clear_vfta = &ixgbe_clear_vfta_generic, \
3824 .set_vfta = &ixgbe_set_vfta_generic, \
3825 .fc_enable = &ixgbe_fc_enable_generic, \
3826 .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_x550, \
3827 .init_uta_tables = &ixgbe_init_uta_tables_generic, \
3828 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
3829 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
3830 .set_source_address_pruning = \
3831 &ixgbe_set_source_address_pruning_X550, \
3832 .set_ethertype_anti_spoofing = \
3833 &ixgbe_set_ethertype_anti_spoofing_X550, \
3834 .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
3835 .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
3836 .get_thermal_sensor_data = NULL, \
3837 .init_thermal_sensor_thresh = NULL, \
3838 .fw_recovery_mode = &ixgbe_fw_recovery_mode_X550, \
3839 .enable_rx = &ixgbe_enable_rx_generic, \
3840 .disable_rx = &ixgbe_disable_rx_x550, \
3841
3842 static const struct ixgbe_mac_operations mac_ops_X550 = {
3843 X550_COMMON_MAC
3844 .led_on = ixgbe_led_on_generic,
3845 .led_off = ixgbe_led_off_generic,
3846 .init_led_link_act = ixgbe_init_led_link_act_generic,
3847 .reset_hw = &ixgbe_reset_hw_X540,
3848 .get_media_type = &ixgbe_get_media_type_X540,
3849 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
3850 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
3851 .setup_link = &ixgbe_setup_mac_link_X540,
3852 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
3853 .get_bus_info = &ixgbe_get_bus_info_generic,
3854 .setup_sfp = NULL,
3855 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
3856 .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
3857 .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
3858 .prot_autoc_read = prot_autoc_read_generic,
3859 .prot_autoc_write = prot_autoc_write_generic,
3860 .setup_fc = ixgbe_setup_fc_generic,
3861 .fc_autoneg = ixgbe_fc_autoneg,
3862 };
3863
3864 static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
3865 X550_COMMON_MAC
3866 .led_on = ixgbe_led_on_t_x550em,
3867 .led_off = ixgbe_led_off_t_x550em,
3868 .init_led_link_act = ixgbe_init_led_link_act_generic,
3869 .reset_hw = &ixgbe_reset_hw_X550em,
3870 .get_media_type = &ixgbe_get_media_type_X550em,
3871 .get_san_mac_addr = NULL,
3872 .get_wwn_prefix = NULL,
3873 .setup_link = &ixgbe_setup_mac_link_X540,
3874 .get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
3875 .get_bus_info = &ixgbe_get_bus_info_X550em,
3876 .setup_sfp = ixgbe_setup_sfp_modules_X550em,
3877 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em,
3878 .release_swfw_sync = &ixgbe_release_swfw_sync_X550em,
3879 .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
3880 .setup_fc = NULL,
3881 .fc_autoneg = ixgbe_fc_autoneg,
3882 .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550,
3883 .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
3884 };
3885
3886 static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = {
3887 X550_COMMON_MAC
3888 .led_on = NULL,
3889 .led_off = NULL,
3890 .init_led_link_act = NULL,
3891 .reset_hw = &ixgbe_reset_hw_X550em,
3892 .get_media_type = &ixgbe_get_media_type_X550em,
3893 .get_san_mac_addr = NULL,
3894 .get_wwn_prefix = NULL,
3895 .setup_link = &ixgbe_setup_mac_link_X540,
3896 .get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
3897 .get_bus_info = &ixgbe_get_bus_info_X550em,
3898 .setup_sfp = ixgbe_setup_sfp_modules_X550em,
3899 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em,
3900 .release_swfw_sync = &ixgbe_release_swfw_sync_X550em,
3901 .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
3902 .setup_fc = NULL,
3903 .fc_autoneg = ixgbe_fc_autoneg,
3904 .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550,
3905 .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
3906 };
3907
3908 static const struct ixgbe_mac_operations mac_ops_x550em_a = {
3909 X550_COMMON_MAC
3910 .led_on = ixgbe_led_on_t_x550em,
3911 .led_off = ixgbe_led_off_t_x550em,
3912 .init_led_link_act = ixgbe_init_led_link_act_generic,
3913 .reset_hw = ixgbe_reset_hw_X550em,
3914 .get_media_type = ixgbe_get_media_type_X550em,
3915 .get_san_mac_addr = NULL,
3916 .get_wwn_prefix = NULL,
3917 .setup_link = &ixgbe_setup_mac_link_X540,
3918 .get_link_capabilities = ixgbe_get_link_capabilities_X550em,
3919 .get_bus_info = ixgbe_get_bus_info_X550em,
3920 .setup_sfp = ixgbe_setup_sfp_modules_X550em,
3921 .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a,
3922 .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a,
3923 .setup_fc = ixgbe_setup_fc_x550em,
3924 .fc_autoneg = ixgbe_fc_autoneg,
3925 .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a,
3926 .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
3927 };
3928
3929 static const struct ixgbe_mac_operations mac_ops_x550em_a_fw = {
3930 X550_COMMON_MAC
3931 .led_on = ixgbe_led_on_generic,
3932 .led_off = ixgbe_led_off_generic,
3933 .init_led_link_act = ixgbe_init_led_link_act_generic,
3934 .reset_hw = ixgbe_reset_hw_X550em,
3935 .get_media_type = ixgbe_get_media_type_X550em,
3936 .get_san_mac_addr = NULL,
3937 .get_wwn_prefix = NULL,
3938 .setup_link = NULL,
3939 .get_link_capabilities = ixgbe_get_link_capabilities_X550em,
3940 .get_bus_info = ixgbe_get_bus_info_X550em,
3941 .setup_sfp = ixgbe_setup_sfp_modules_X550em,
3942 .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a,
3943 .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a,
3944 .setup_fc = ixgbe_setup_fc_x550em,
3945 .fc_autoneg = ixgbe_fc_autoneg,
3946 .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a,
3947 .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
3948 };
3949
3950 #define X550_COMMON_EEP \
3951 .read = &ixgbe_read_ee_hostif_X550, \
3952 .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \
3953 .write = &ixgbe_write_ee_hostif_X550, \
3954 .write_buffer = &ixgbe_write_ee_hostif_buffer_X550, \
3955 .validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \
3956 .update_checksum = &ixgbe_update_eeprom_checksum_X550, \
3957 .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \
3958
3959 static const struct ixgbe_eeprom_operations eeprom_ops_X550 = {
3960 X550_COMMON_EEP
3961 .init_params = &ixgbe_init_eeprom_params_X550,
3962 };
3963
3964 static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
3965 X550_COMMON_EEP
3966 .init_params = &ixgbe_init_eeprom_params_X540,
3967 };
3968
3969 #define X550_COMMON_PHY \
3970 .identify_sfp = &ixgbe_identify_module_generic, \
3971 .reset = NULL, \
3972 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, \
3973 .read_i2c_byte = &ixgbe_read_i2c_byte_generic, \
3974 .write_i2c_byte = &ixgbe_write_i2c_byte_generic, \
3975 .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \
3976 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \
3977 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \
3978 .setup_link = &ixgbe_setup_phy_link_generic, \
3979 .set_phy_power = NULL,
3980
3981 static const struct ixgbe_phy_operations phy_ops_X550 = {
3982 X550_COMMON_PHY
3983 .check_overtemp = &ixgbe_tn_check_overtemp,
3984 .init = NULL,
3985 .identify = &ixgbe_identify_phy_generic,
3986 .read_reg = &ixgbe_read_phy_reg_generic,
3987 .write_reg = &ixgbe_write_phy_reg_generic,
3988 };
3989
3990 static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
3991 X550_COMMON_PHY
3992 .check_overtemp = &ixgbe_tn_check_overtemp,
3993 .init = &ixgbe_init_phy_ops_X550em,
3994 .identify = &ixgbe_identify_phy_x550em,
3995 .read_reg = &ixgbe_read_phy_reg_generic,
3996 .write_reg = &ixgbe_write_phy_reg_generic,
3997 };
3998
3999 static const struct ixgbe_phy_operations phy_ops_x550em_x_fw = {
4000 X550_COMMON_PHY
4001 .check_overtemp = NULL,
4002 .init = ixgbe_init_phy_ops_X550em,
4003 .identify = ixgbe_identify_phy_x550em,
4004 .read_reg = NULL,
4005 .write_reg = NULL,
4006 .read_reg_mdi = NULL,
4007 .write_reg_mdi = NULL,
4008 };
4009
4010 static const struct ixgbe_phy_operations phy_ops_x550em_a = {
4011 X550_COMMON_PHY
4012 .check_overtemp = &ixgbe_tn_check_overtemp,
4013 .init = &ixgbe_init_phy_ops_X550em,
4014 .identify = &ixgbe_identify_phy_x550em,
4015 .read_reg = &ixgbe_read_phy_reg_x550a,
4016 .write_reg = &ixgbe_write_phy_reg_x550a,
4017 .read_reg_mdi = &ixgbe_read_phy_reg_mdi,
4018 .write_reg_mdi = &ixgbe_write_phy_reg_mdi,
4019 };
4020
4021 static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = {
4022 X550_COMMON_PHY
4023 .check_overtemp = ixgbe_check_overtemp_fw,
4024 .init = ixgbe_init_phy_ops_X550em,
4025 .identify = ixgbe_identify_phy_fw,
4026 .read_reg = NULL,
4027 .write_reg = NULL,
4028 .read_reg_mdi = NULL,
4029 .write_reg_mdi = NULL,
4030 };
4031
4032 static const struct ixgbe_link_operations link_ops_x550em_x = {
4033 .read_link = &ixgbe_read_i2c_combined_generic,
4034 .read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked,
4035 .write_link = &ixgbe_write_i2c_combined_generic,
4036 .write_link_unlocked = &ixgbe_write_i2c_combined_generic_unlocked,
4037 };
4038
4039 static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
4040 IXGBE_MVALS_INIT(X550)
4041 };
4042
4043 static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
4044 IXGBE_MVALS_INIT(X550EM_x)
4045 };
4046
4047 static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
4048 IXGBE_MVALS_INIT(X550EM_a)
4049 };
4050
4051 const struct ixgbe_info ixgbe_X550_info = {
4052 .mac = ixgbe_mac_X550,
4053 .get_invariants = &ixgbe_get_invariants_X540,
4054 .mac_ops = &mac_ops_X550,
4055 .eeprom_ops = &eeprom_ops_X550,
4056 .phy_ops = &phy_ops_X550,
4057 .mbx_ops = &mbx_ops_generic,
4058 .mvals = ixgbe_mvals_X550,
4059 };
4060
4061 const struct ixgbe_info ixgbe_X550EM_x_info = {
4062 .mac = ixgbe_mac_X550EM_x,
4063 .get_invariants = &ixgbe_get_invariants_X550_x,
4064 .mac_ops = &mac_ops_X550EM_x,
4065 .eeprom_ops = &eeprom_ops_X550EM_x,
4066 .phy_ops = &phy_ops_X550EM_x,
4067 .mbx_ops = &mbx_ops_generic,
4068 .mvals = ixgbe_mvals_X550EM_x,
4069 .link_ops = &link_ops_x550em_x,
4070 };
4071
4072 const struct ixgbe_info ixgbe_x550em_x_fw_info = {
4073 .mac = ixgbe_mac_X550EM_x,
4074 .get_invariants = ixgbe_get_invariants_X550_x_fw,
4075 .mac_ops = &mac_ops_X550EM_x_fw,
4076 .eeprom_ops = &eeprom_ops_X550EM_x,
4077 .phy_ops = &phy_ops_x550em_x_fw,
4078 .mbx_ops = &mbx_ops_generic,
4079 .mvals = ixgbe_mvals_X550EM_x,
4080 };
4081
4082 const struct ixgbe_info ixgbe_x550em_a_info = {
4083 .mac = ixgbe_mac_x550em_a,
4084 .get_invariants = &ixgbe_get_invariants_X550_a,
4085 .mac_ops = &mac_ops_x550em_a,
4086 .eeprom_ops = &eeprom_ops_X550EM_x,
4087 .phy_ops = &phy_ops_x550em_a,
4088 .mbx_ops = &mbx_ops_generic,
4089 .mvals = ixgbe_mvals_x550em_a,
4090 };
4091
4092 const struct ixgbe_info ixgbe_x550em_a_fw_info = {
4093 .mac = ixgbe_mac_x550em_a,
4094 .get_invariants = ixgbe_get_invariants_X550_a_fw,
4095 .mac_ops = &mac_ops_x550em_a_fw,
4096 .eeprom_ops = &eeprom_ops_X550EM_x,
4097 .phy_ops = &phy_ops_x550em_a_fw,
4098 .mbx_ops = &mbx_ops_generic,
4099 .mvals = ixgbe_mvals_x550em_a,
4100 };