0001
0002
0003
0004 #include <linux/pci.h>
0005 #include <linux/delay.h>
0006 #include <linux/iopoll.h>
0007 #include <linux/sched.h>
0008
0009 #include "ixgbe.h"
0010 #include "ixgbe_phy.h"
0011
0012 static void ixgbe_i2c_start(struct ixgbe_hw *hw);
0013 static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
0014 static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
0015 static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
0016 static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
0017 static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
0018 static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
0019 static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
0020 static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
0021 static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
0022 static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl);
0023 static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
0024 static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
0025 static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
0026 static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
0027
0028
0029
0030
0031
0032
0033
0034
0035 static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte)
0036 {
0037 s32 status;
0038
0039 status = ixgbe_clock_out_i2c_byte(hw, byte);
0040 if (status)
0041 return status;
0042 return ixgbe_get_i2c_ack(hw);
0043 }
0044
0045
0046
0047
0048
0049
0050
0051
0052 static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte)
0053 {
0054 s32 status;
0055
0056 status = ixgbe_clock_in_i2c_byte(hw, byte);
0057 if (status)
0058 return status;
0059
0060 return ixgbe_clock_out_i2c_bit(hw, false);
0061 }
0062
0063
0064
0065
0066
0067
0068
0069
0070 static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2)
0071 {
0072 u16 sum = add1 + add2;
0073
0074 sum = (sum & 0xFF) + (sum >> 8);
0075 return sum & 0xFF;
0076 }
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088 s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
0089 u16 reg, u16 *val, bool lock)
0090 {
0091 u32 swfw_mask = hw->phy.phy_semaphore_mask;
0092 int max_retry = 3;
0093 int retry = 0;
0094 u8 csum_byte;
0095 u8 high_bits;
0096 u8 low_bits;
0097 u8 reg_high;
0098 u8 csum;
0099
0100 reg_high = ((reg >> 7) & 0xFE) | 1;
0101 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
0102 csum = ~csum;
0103 do {
0104 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
0105 return IXGBE_ERR_SWFW_SYNC;
0106 ixgbe_i2c_start(hw);
0107
0108 if (ixgbe_out_i2c_byte_ack(hw, addr))
0109 goto fail;
0110
0111 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
0112 goto fail;
0113
0114 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
0115 goto fail;
0116
0117 if (ixgbe_out_i2c_byte_ack(hw, csum))
0118 goto fail;
0119
0120 ixgbe_i2c_start(hw);
0121
0122 if (ixgbe_out_i2c_byte_ack(hw, addr | 1))
0123 goto fail;
0124
0125 if (ixgbe_in_i2c_byte_ack(hw, &high_bits))
0126 goto fail;
0127
0128 if (ixgbe_in_i2c_byte_ack(hw, &low_bits))
0129 goto fail;
0130
0131 if (ixgbe_clock_in_i2c_byte(hw, &csum_byte))
0132 goto fail;
0133
0134 if (ixgbe_clock_out_i2c_bit(hw, false))
0135 goto fail;
0136 ixgbe_i2c_stop(hw);
0137 if (lock)
0138 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
0139 *val = (high_bits << 8) | low_bits;
0140 return 0;
0141
0142 fail:
0143 ixgbe_i2c_bus_clear(hw);
0144 if (lock)
0145 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
0146 retry++;
0147 if (retry < max_retry)
0148 hw_dbg(hw, "I2C byte read combined error - Retry.\n");
0149 else
0150 hw_dbg(hw, "I2C byte read combined error.\n");
0151 } while (retry < max_retry);
0152
0153 return IXGBE_ERR_I2C;
0154 }
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166 s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
0167 u16 reg, u16 val, bool lock)
0168 {
0169 u32 swfw_mask = hw->phy.phy_semaphore_mask;
0170 int max_retry = 1;
0171 int retry = 0;
0172 u8 reg_high;
0173 u8 csum;
0174
0175 reg_high = (reg >> 7) & 0xFE;
0176 csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
0177 csum = ixgbe_ones_comp_byte_add(csum, val >> 8);
0178 csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF);
0179 csum = ~csum;
0180 do {
0181 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
0182 return IXGBE_ERR_SWFW_SYNC;
0183 ixgbe_i2c_start(hw);
0184
0185 if (ixgbe_out_i2c_byte_ack(hw, addr))
0186 goto fail;
0187
0188 if (ixgbe_out_i2c_byte_ack(hw, reg_high))
0189 goto fail;
0190
0191 if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF))
0192 goto fail;
0193
0194 if (ixgbe_out_i2c_byte_ack(hw, val >> 8))
0195 goto fail;
0196
0197 if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF))
0198 goto fail;
0199
0200 if (ixgbe_out_i2c_byte_ack(hw, csum))
0201 goto fail;
0202 ixgbe_i2c_stop(hw);
0203 if (lock)
0204 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
0205 return 0;
0206
0207 fail:
0208 ixgbe_i2c_bus_clear(hw);
0209 if (lock)
0210 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
0211 retry++;
0212 if (retry < max_retry)
0213 hw_dbg(hw, "I2C byte write combined error - Retry.\n");
0214 else
0215 hw_dbg(hw, "I2C byte write combined error.\n");
0216 } while (retry < max_retry);
0217
0218 return IXGBE_ERR_I2C;
0219 }
0220
0221
0222
0223
0224
0225
0226
0227
0228 static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
0229 {
0230 u16 ext_ability = 0;
0231
0232 hw->phy.mdio.prtad = phy_addr;
0233 if (mdio45_probe(&hw->phy.mdio, phy_addr) != 0)
0234 return false;
0235
0236 if (ixgbe_get_phy_id(hw))
0237 return false;
0238
0239 hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
0240
0241 if (hw->phy.type == ixgbe_phy_unknown) {
0242 hw->phy.ops.read_reg(hw,
0243 MDIO_PMA_EXTABLE,
0244 MDIO_MMD_PMAPMD,
0245 &ext_ability);
0246 if (ext_ability &
0247 (MDIO_PMA_EXTABLE_10GBT |
0248 MDIO_PMA_EXTABLE_1000BT))
0249 hw->phy.type = ixgbe_phy_cu_unknown;
0250 else
0251 hw->phy.type = ixgbe_phy_generic;
0252 }
0253
0254 return true;
0255 }
0256
0257
0258
0259
0260
0261
0262
0263 s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
0264 {
0265 u32 phy_addr;
0266 u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
0267
0268 if (!hw->phy.phy_semaphore_mask) {
0269 if (hw->bus.lan_id)
0270 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
0271 else
0272 hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
0273 }
0274
0275 if (hw->phy.type != ixgbe_phy_unknown)
0276 return 0;
0277
0278 if (hw->phy.nw_mng_if_sel) {
0279 phy_addr = (hw->phy.nw_mng_if_sel &
0280 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
0281 IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
0282 if (ixgbe_probe_phy(hw, phy_addr))
0283 return 0;
0284 else
0285 return IXGBE_ERR_PHY_ADDR_INVALID;
0286 }
0287
0288 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
0289 if (ixgbe_probe_phy(hw, phy_addr)) {
0290 status = 0;
0291 break;
0292 }
0293 }
0294
0295
0296
0297
0298
0299 if (status)
0300 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
0301
0302 return status;
0303 }
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314 bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
0315 {
0316 u32 mmngc;
0317
0318
0319 if (hw->mac.type == ixgbe_mac_82598EB)
0320 return false;
0321
0322 mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
0323 if (mmngc & IXGBE_MMNGC_MNG_VETO) {
0324 hw_dbg(hw, "MNG_VETO bit detected.\n");
0325 return true;
0326 }
0327
0328 return false;
0329 }
0330
0331
0332
0333
0334
0335
0336 static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
0337 {
0338 s32 status;
0339 u16 phy_id_high = 0;
0340 u16 phy_id_low = 0;
0341
0342 status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
0343 &phy_id_high);
0344
0345 if (!status) {
0346 hw->phy.id = (u32)(phy_id_high << 16);
0347 status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
0348 &phy_id_low);
0349 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
0350 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
0351 }
0352 return status;
0353 }
0354
0355
0356
0357
0358
0359
0360 static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
0361 {
0362 enum ixgbe_phy_type phy_type;
0363
0364 switch (phy_id) {
0365 case TN1010_PHY_ID:
0366 phy_type = ixgbe_phy_tn;
0367 break;
0368 case X550_PHY_ID2:
0369 case X550_PHY_ID3:
0370 case X540_PHY_ID:
0371 phy_type = ixgbe_phy_aq;
0372 break;
0373 case QT2022_PHY_ID:
0374 phy_type = ixgbe_phy_qt;
0375 break;
0376 case ATH_PHY_ID:
0377 phy_type = ixgbe_phy_nl;
0378 break;
0379 case X557_PHY_ID:
0380 case X557_PHY_ID2:
0381 phy_type = ixgbe_phy_x550em_ext_t;
0382 break;
0383 case BCM54616S_E_PHY_ID:
0384 phy_type = ixgbe_phy_ext_1g_t;
0385 break;
0386 default:
0387 phy_type = ixgbe_phy_unknown;
0388 break;
0389 }
0390
0391 return phy_type;
0392 }
0393
0394
0395
0396
0397
0398 s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
0399 {
0400 u32 i;
0401 u16 ctrl = 0;
0402 s32 status = 0;
0403
0404 if (hw->phy.type == ixgbe_phy_unknown)
0405 status = ixgbe_identify_phy_generic(hw);
0406
0407 if (status != 0 || hw->phy.type == ixgbe_phy_none)
0408 return status;
0409
0410
0411 if (!hw->phy.reset_if_overtemp &&
0412 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
0413 return 0;
0414
0415
0416 if (ixgbe_check_reset_blocked(hw))
0417 return 0;
0418
0419
0420
0421
0422
0423 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
0424 MDIO_MMD_PHYXS,
0425 MDIO_CTRL1_RESET);
0426
0427
0428
0429
0430
0431
0432 for (i = 0; i < 30; i++) {
0433 msleep(100);
0434 if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
0435 status = hw->phy.ops.read_reg(hw,
0436 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
0437 MDIO_MMD_PMAPMD, &ctrl);
0438 if (status)
0439 return status;
0440
0441 if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
0442 udelay(2);
0443 break;
0444 }
0445 } else {
0446 status = hw->phy.ops.read_reg(hw, MDIO_CTRL1,
0447 MDIO_MMD_PHYXS, &ctrl);
0448 if (status)
0449 return status;
0450
0451 if (!(ctrl & MDIO_CTRL1_RESET)) {
0452 udelay(2);
0453 break;
0454 }
0455 }
0456 }
0457
0458 if (ctrl & MDIO_CTRL1_RESET) {
0459 hw_dbg(hw, "PHY reset polling failed to complete.\n");
0460 return IXGBE_ERR_RESET_FAILED;
0461 }
0462
0463 return 0;
0464 }
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475 s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
0476 u16 *phy_data)
0477 {
0478 u32 i, data, command;
0479
0480
0481 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
0482 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
0483 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
0484 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
0485
0486 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
0487
0488
0489
0490
0491
0492 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
0493 udelay(10);
0494
0495 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
0496 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
0497 break;
0498 }
0499
0500
0501 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
0502 hw_dbg(hw, "PHY address command did not complete.\n");
0503 return IXGBE_ERR_PHY;
0504 }
0505
0506
0507
0508
0509 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
0510 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
0511 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
0512 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
0513
0514 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
0515
0516
0517
0518
0519
0520 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
0521 udelay(10);
0522
0523 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
0524 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
0525 break;
0526 }
0527
0528 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
0529 hw_dbg(hw, "PHY read command didn't complete\n");
0530 return IXGBE_ERR_PHY;
0531 }
0532
0533
0534
0535
0536 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
0537 data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
0538 *phy_data = (u16)(data);
0539
0540 return 0;
0541 }
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551 s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
0552 u32 device_type, u16 *phy_data)
0553 {
0554 s32 status;
0555 u32 gssr = hw->phy.phy_semaphore_mask;
0556
0557 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
0558 status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
0559 phy_data);
0560 hw->mac.ops.release_swfw_sync(hw, gssr);
0561 } else {
0562 return IXGBE_ERR_SWFW_SYNC;
0563 }
0564
0565 return status;
0566 }
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576 s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
0577 u32 device_type, u16 phy_data)
0578 {
0579 u32 i, command;
0580
0581
0582 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
0583
0584
0585 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
0586 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
0587 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
0588 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
0589
0590 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
0591
0592
0593
0594
0595
0596
0597 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
0598 udelay(10);
0599
0600 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
0601 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
0602 break;
0603 }
0604
0605 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
0606 hw_dbg(hw, "PHY address cmd didn't complete\n");
0607 return IXGBE_ERR_PHY;
0608 }
0609
0610
0611
0612
0613
0614 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
0615 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
0616 (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
0617 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
0618
0619 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
0620
0621
0622
0623
0624
0625 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
0626 udelay(10);
0627
0628 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
0629 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
0630 break;
0631 }
0632
0633 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
0634 hw_dbg(hw, "PHY write cmd didn't complete\n");
0635 return IXGBE_ERR_PHY;
0636 }
0637
0638 return 0;
0639 }
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649 s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
0650 u32 device_type, u16 phy_data)
0651 {
0652 s32 status;
0653 u32 gssr = hw->phy.phy_semaphore_mask;
0654
0655 if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
0656 status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
0657 phy_data);
0658 hw->mac.ops.release_swfw_sync(hw, gssr);
0659 } else {
0660 return IXGBE_ERR_SWFW_SYNC;
0661 }
0662
0663 return status;
0664 }
0665
0666 #define IXGBE_HW_READ_REG(addr) IXGBE_READ_REG(hw, addr)
0667
0668
0669
0670
0671
0672
0673 static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
0674 {
0675 IXGBE_WRITE_REG(hw, IXGBE_MSCA, cmd);
0676
0677 return readx_poll_timeout(IXGBE_HW_READ_REG, IXGBE_MSCA, cmd,
0678 !(cmd & IXGBE_MSCA_MDI_COMMAND), 10,
0679 10 * IXGBE_MDIO_COMMAND_TIMEOUT);
0680 }
0681
0682
0683
0684
0685
0686
0687
0688
0689 static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr,
0690 int regnum, u32 gssr)
0691 {
0692 u32 hwaddr, cmd;
0693 s32 data;
0694
0695 if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
0696 return -EBUSY;
0697
0698 hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
0699 if (regnum & MII_ADDR_C45) {
0700 hwaddr |= regnum & GENMASK(21, 0);
0701 cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
0702 } else {
0703 hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
0704 cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL |
0705 IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND;
0706 }
0707
0708 data = ixgbe_msca_cmd(hw, cmd);
0709 if (data < 0)
0710 goto mii_bus_read_done;
0711
0712
0713
0714
0715 if (!(regnum & MII_ADDR_C45))
0716 goto do_mii_bus_read;
0717
0718 cmd = hwaddr | IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND;
0719 data = ixgbe_msca_cmd(hw, cmd);
0720 if (data < 0)
0721 goto mii_bus_read_done;
0722
0723 do_mii_bus_read:
0724 data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
0725 data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0);
0726
0727 mii_bus_read_done:
0728 hw->mac.ops.release_swfw_sync(hw, gssr);
0729 return data;
0730 }
0731
0732
0733
0734
0735
0736
0737
0738
0739
0740 static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr,
0741 int regnum, u16 val, u32 gssr)
0742 {
0743 u32 hwaddr, cmd;
0744 s32 err;
0745
0746 if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
0747 return -EBUSY;
0748
0749 IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val);
0750
0751 hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
0752 if (regnum & MII_ADDR_C45) {
0753 hwaddr |= regnum & GENMASK(21, 0);
0754 cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
0755 } else {
0756 hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
0757 cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
0758 IXGBE_MSCA_MDI_COMMAND;
0759 }
0760
0761
0762
0763
0764 err = ixgbe_msca_cmd(hw, cmd);
0765 if (err < 0 || !(regnum & MII_ADDR_C45))
0766 goto mii_bus_write_done;
0767
0768 cmd = hwaddr | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND;
0769 err = ixgbe_msca_cmd(hw, cmd);
0770
0771 mii_bus_write_done:
0772 hw->mac.ops.release_swfw_sync(hw, gssr);
0773 return err;
0774 }
0775
0776
0777
0778
0779
0780
0781
0782 static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
0783 {
0784 struct ixgbe_adapter *adapter = bus->priv;
0785 struct ixgbe_hw *hw = &adapter->hw;
0786 u32 gssr = hw->phy.phy_semaphore_mask;
0787
0788 return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
0789 }
0790
0791
0792
0793
0794
0795
0796
0797
0798 static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum,
0799 u16 val)
0800 {
0801 struct ixgbe_adapter *adapter = bus->priv;
0802 struct ixgbe_hw *hw = &adapter->hw;
0803 u32 gssr = hw->phy.phy_semaphore_mask;
0804
0805 return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
0806 }
0807
0808
0809
0810
0811
0812
0813
0814 static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr,
0815 int regnum)
0816 {
0817 struct ixgbe_adapter *adapter = bus->priv;
0818 struct ixgbe_hw *hw = &adapter->hw;
0819 u32 gssr = hw->phy.phy_semaphore_mask;
0820
0821 gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
0822 return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
0823 }
0824
0825
0826
0827
0828
0829
0830
0831
0832 static s32 ixgbe_x550em_a_mii_bus_write(struct mii_bus *bus, int addr,
0833 int regnum, u16 val)
0834 {
0835 struct ixgbe_adapter *adapter = bus->priv;
0836 struct ixgbe_hw *hw = &adapter->hw;
0837 u32 gssr = hw->phy.phy_semaphore_mask;
0838
0839 gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
0840 return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
0841 }
0842
0843
0844
0845
0846
0847
0848
0849
0850 static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn)
0851 {
0852 struct pci_dev *rp_pdev;
0853 int bus;
0854
0855 rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn);
0856 if (rp_pdev && rp_pdev->subordinate) {
0857 bus = rp_pdev->subordinate->number;
0858 return pci_get_domain_bus_and_slot(0, bus, 0);
0859 }
0860
0861 return NULL;
0862 }
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872 static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
0873 {
0874 struct ixgbe_adapter *adapter = hw->back;
0875 struct pci_dev *pdev = adapter->pdev;
0876 struct pci_dev *func0_pdev;
0877
0878
0879
0880
0881
0882
0883
0884 func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0));
0885 if (func0_pdev) {
0886 if (func0_pdev == pdev)
0887 return true;
0888 else
0889 return false;
0890 }
0891 func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0));
0892 if (func0_pdev == pdev)
0893 return true;
0894
0895 return false;
0896 }
0897
0898
0899
0900
0901
0902
0903
0904
0905
0906 s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
0907 {
0908 s32 (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
0909 s32 (*read)(struct mii_bus *bus, int addr, int regnum);
0910 struct ixgbe_adapter *adapter = hw->back;
0911 struct pci_dev *pdev = adapter->pdev;
0912 struct device *dev = &adapter->netdev->dev;
0913 struct mii_bus *bus;
0914
0915 switch (hw->device_id) {
0916
0917 case IXGBE_DEV_ID_X550EM_A_KR:
0918 case IXGBE_DEV_ID_X550EM_A_KR_L:
0919 case IXGBE_DEV_ID_X550EM_A_SFP_N:
0920 case IXGBE_DEV_ID_X550EM_A_SGMII:
0921 case IXGBE_DEV_ID_X550EM_A_SGMII_L:
0922 case IXGBE_DEV_ID_X550EM_A_10G_T:
0923 case IXGBE_DEV_ID_X550EM_A_SFP:
0924 case IXGBE_DEV_ID_X550EM_A_1G_T:
0925 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
0926 if (!ixgbe_x550em_a_has_mii(hw))
0927 return 0;
0928 read = &ixgbe_x550em_a_mii_bus_read;
0929 write = &ixgbe_x550em_a_mii_bus_write;
0930 break;
0931 default:
0932 read = &ixgbe_mii_bus_read;
0933 write = &ixgbe_mii_bus_write;
0934 break;
0935 }
0936
0937 bus = devm_mdiobus_alloc(dev);
0938 if (!bus)
0939 return -ENOMEM;
0940
0941 bus->read = read;
0942 bus->write = write;
0943
0944
0945 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name,
0946 pci_name(pdev));
0947
0948 bus->name = "ixgbe-mdio";
0949 bus->priv = adapter;
0950 bus->parent = dev;
0951 bus->phy_mask = GENMASK(31, 0);
0952
0953
0954
0955
0956
0957 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
0958
0959 adapter->mii_bus = bus;
0960 return mdiobus_register(bus);
0961 }
0962
0963
0964
0965
0966
0967
0968
0969 s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
0970 {
0971 s32 status = 0;
0972 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
0973 bool autoneg = false;
0974 ixgbe_link_speed speed;
0975
0976 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
0977
0978
0979 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, &autoneg_reg);
0980
0981 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
0982 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) &&
0983 (speed & IXGBE_LINK_SPEED_10GB_FULL))
0984 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
0985
0986 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, MDIO_MMD_AN, autoneg_reg);
0987
0988 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
0989 MDIO_MMD_AN, &autoneg_reg);
0990
0991 if (hw->mac.type == ixgbe_mac_X550) {
0992
0993 autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
0994 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
0995 (speed & IXGBE_LINK_SPEED_5GB_FULL))
0996 autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
0997
0998
0999 autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
1000 if ((hw->phy.autoneg_advertised &
1001 IXGBE_LINK_SPEED_2_5GB_FULL) &&
1002 (speed & IXGBE_LINK_SPEED_2_5GB_FULL))
1003 autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
1004 }
1005
1006
1007 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
1008 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) &&
1009 (speed & IXGBE_LINK_SPEED_1GB_FULL))
1010 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
1011
1012 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
1013 MDIO_MMD_AN, autoneg_reg);
1014
1015
1016 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg);
1017
1018 autoneg_reg &= ~(ADVERTISE_100FULL | ADVERTISE_100HALF);
1019 if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) &&
1020 (speed & IXGBE_LINK_SPEED_100_FULL))
1021 autoneg_reg |= ADVERTISE_100FULL;
1022
1023 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg);
1024
1025
1026 if (ixgbe_check_reset_blocked(hw))
1027 return 0;
1028
1029
1030 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
1031 MDIO_MMD_AN, &autoneg_reg);
1032
1033 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
1034
1035 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
1036 MDIO_MMD_AN, autoneg_reg);
1037
1038 return status;
1039 }
1040
1041
1042
1043
1044
1045
1046
1047 s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
1048 ixgbe_link_speed speed,
1049 bool autoneg_wait_to_complete)
1050 {
1051
1052
1053
1054 hw->phy.autoneg_advertised = 0;
1055
1056 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
1057 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
1058
1059 if (speed & IXGBE_LINK_SPEED_5GB_FULL)
1060 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
1061
1062 if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
1063 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
1064
1065 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
1066 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
1067
1068 if (speed & IXGBE_LINK_SPEED_100_FULL)
1069 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
1070
1071 if (speed & IXGBE_LINK_SPEED_10_FULL)
1072 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
1073
1074
1075 if (hw->phy.ops.setup_link)
1076 hw->phy.ops.setup_link(hw);
1077
1078 return 0;
1079 }
1080
1081
1082
1083
1084
1085
1086
1087
1088 static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
1089 {
1090 u16 speed_ability;
1091 s32 status;
1092
1093 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
1094 &speed_ability);
1095 if (status)
1096 return status;
1097
1098 if (speed_ability & MDIO_SPEED_10G)
1099 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
1100 if (speed_ability & MDIO_PMA_SPEED_1000)
1101 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
1102 if (speed_ability & MDIO_PMA_SPEED_100)
1103 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
1104
1105 switch (hw->mac.type) {
1106 case ixgbe_mac_X550:
1107 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
1108 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
1109 break;
1110 case ixgbe_mac_X550EM_x:
1111 case ixgbe_mac_x550em_a:
1112 hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
1113 break;
1114 default:
1115 break;
1116 }
1117
1118 return 0;
1119 }
1120
1121
1122
1123
1124
1125
1126
1127 s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
1128 ixgbe_link_speed *speed,
1129 bool *autoneg)
1130 {
1131 s32 status = 0;
1132
1133 *autoneg = true;
1134 if (!hw->phy.speeds_supported)
1135 status = ixgbe_get_copper_speeds_supported(hw);
1136
1137 *speed = hw->phy.speeds_supported;
1138 return status;
1139 }
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150 s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
1151 bool *link_up)
1152 {
1153 s32 status;
1154 u32 time_out;
1155 u32 max_time_out = 10;
1156 u16 phy_link = 0;
1157 u16 phy_speed = 0;
1158 u16 phy_data = 0;
1159
1160
1161 *link_up = false;
1162 *speed = IXGBE_LINK_SPEED_10GB_FULL;
1163
1164
1165
1166
1167
1168
1169 for (time_out = 0; time_out < max_time_out; time_out++) {
1170 udelay(10);
1171 status = hw->phy.ops.read_reg(hw,
1172 MDIO_STAT1,
1173 MDIO_MMD_VEND1,
1174 &phy_data);
1175 phy_link = phy_data &
1176 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
1177 phy_speed = phy_data &
1178 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
1179 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
1180 *link_up = true;
1181 if (phy_speed ==
1182 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
1183 *speed = IXGBE_LINK_SPEED_1GB_FULL;
1184 break;
1185 }
1186 }
1187
1188 return status;
1189 }
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200 s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
1201 {
1202 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
1203 bool autoneg = false;
1204 ixgbe_link_speed speed;
1205
1206 ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
1207
1208 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
1209
1210 hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
1211 MDIO_MMD_AN,
1212 &autoneg_reg);
1213
1214 autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
1215 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
1216 autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
1217
1218 hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
1219 MDIO_MMD_AN,
1220 autoneg_reg);
1221 }
1222
1223 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
1224
1225 hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
1226 MDIO_MMD_AN,
1227 &autoneg_reg);
1228
1229 autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
1230 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
1231 autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
1232
1233 hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
1234 MDIO_MMD_AN,
1235 autoneg_reg);
1236 }
1237
1238 if (speed & IXGBE_LINK_SPEED_100_FULL) {
1239
1240 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
1241 MDIO_MMD_AN,
1242 &autoneg_reg);
1243
1244 autoneg_reg &= ~(ADVERTISE_100FULL |
1245 ADVERTISE_100HALF);
1246 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
1247 autoneg_reg |= ADVERTISE_100FULL;
1248
1249 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
1250 MDIO_MMD_AN,
1251 autoneg_reg);
1252 }
1253
1254
1255 if (ixgbe_check_reset_blocked(hw))
1256 return 0;
1257
1258
1259 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
1260 MDIO_MMD_AN, &autoneg_reg);
1261
1262 autoneg_reg |= MDIO_AN_CTRL1_RESTART;
1263
1264 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
1265 MDIO_MMD_AN, autoneg_reg);
1266 return 0;
1267 }
1268
1269
1270
1271
1272
1273 s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
1274 {
1275 u16 phy_offset, control, eword, edata, block_crc;
1276 bool end_data = false;
1277 u16 list_offset, data_offset;
1278 u16 phy_data = 0;
1279 s32 ret_val;
1280 u32 i;
1281
1282
1283 if (ixgbe_check_reset_blocked(hw))
1284 return 0;
1285
1286 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
1287
1288
1289 hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
1290 (phy_data | MDIO_CTRL1_RESET));
1291
1292 for (i = 0; i < 100; i++) {
1293 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
1294 &phy_data);
1295 if ((phy_data & MDIO_CTRL1_RESET) == 0)
1296 break;
1297 usleep_range(10000, 20000);
1298 }
1299
1300 if ((phy_data & MDIO_CTRL1_RESET) != 0) {
1301 hw_dbg(hw, "PHY reset did not complete.\n");
1302 return IXGBE_ERR_PHY;
1303 }
1304
1305
1306 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
1307 &data_offset);
1308 if (ret_val)
1309 return ret_val;
1310
1311 ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
1312 data_offset++;
1313 while (!end_data) {
1314
1315
1316
1317 ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
1318 if (ret_val)
1319 goto err_eeprom;
1320 control = (eword & IXGBE_CONTROL_MASK_NL) >>
1321 IXGBE_CONTROL_SHIFT_NL;
1322 edata = eword & IXGBE_DATA_MASK_NL;
1323 switch (control) {
1324 case IXGBE_DELAY_NL:
1325 data_offset++;
1326 hw_dbg(hw, "DELAY: %d MS\n", edata);
1327 usleep_range(edata * 1000, edata * 2000);
1328 break;
1329 case IXGBE_DATA_NL:
1330 hw_dbg(hw, "DATA:\n");
1331 data_offset++;
1332 ret_val = hw->eeprom.ops.read(hw, data_offset++,
1333 &phy_offset);
1334 if (ret_val)
1335 goto err_eeprom;
1336 for (i = 0; i < edata; i++) {
1337 ret_val = hw->eeprom.ops.read(hw, data_offset,
1338 &eword);
1339 if (ret_val)
1340 goto err_eeprom;
1341 hw->phy.ops.write_reg(hw, phy_offset,
1342 MDIO_MMD_PMAPMD, eword);
1343 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
1344 phy_offset);
1345 data_offset++;
1346 phy_offset++;
1347 }
1348 break;
1349 case IXGBE_CONTROL_NL:
1350 data_offset++;
1351 hw_dbg(hw, "CONTROL:\n");
1352 if (edata == IXGBE_CONTROL_EOL_NL) {
1353 hw_dbg(hw, "EOL\n");
1354 end_data = true;
1355 } else if (edata == IXGBE_CONTROL_SOL_NL) {
1356 hw_dbg(hw, "SOL\n");
1357 } else {
1358 hw_dbg(hw, "Bad control value\n");
1359 return IXGBE_ERR_PHY;
1360 }
1361 break;
1362 default:
1363 hw_dbg(hw, "Bad control type\n");
1364 return IXGBE_ERR_PHY;
1365 }
1366 }
1367
1368 return ret_val;
1369
1370 err_eeprom:
1371 hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
1372 return IXGBE_ERR_PHY;
1373 }
1374
1375
1376
1377
1378
1379
1380
1381 s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
1382 {
1383 switch (hw->mac.ops.get_media_type(hw)) {
1384 case ixgbe_media_type_fiber:
1385 return ixgbe_identify_sfp_module_generic(hw);
1386 case ixgbe_media_type_fiber_qsfp:
1387 return ixgbe_identify_qsfp_module_generic(hw);
1388 default:
1389 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1390 return IXGBE_ERR_SFP_NOT_PRESENT;
1391 }
1392
1393 return IXGBE_ERR_SFP_NOT_PRESENT;
1394 }
1395
1396
1397
1398
1399
1400
1401
1402 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
1403 {
1404 struct ixgbe_adapter *adapter = hw->back;
1405 s32 status;
1406 u32 vendor_oui = 0;
1407 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1408 u8 identifier = 0;
1409 u8 comp_codes_1g = 0;
1410 u8 comp_codes_10g = 0;
1411 u8 oui_bytes[3] = {0, 0, 0};
1412 u8 cable_tech = 0;
1413 u8 cable_spec = 0;
1414 u16 enforce_sfp = 0;
1415
1416 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
1417 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1418 return IXGBE_ERR_SFP_NOT_PRESENT;
1419 }
1420
1421
1422 hw->mac.ops.set_lan_id(hw);
1423
1424 status = hw->phy.ops.read_i2c_eeprom(hw,
1425 IXGBE_SFF_IDENTIFIER,
1426 &identifier);
1427
1428 if (status)
1429 goto err_read_i2c_eeprom;
1430
1431 if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
1432 hw->phy.type = ixgbe_phy_sfp_unsupported;
1433 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1434 }
1435 status = hw->phy.ops.read_i2c_eeprom(hw,
1436 IXGBE_SFF_1GBE_COMP_CODES,
1437 &comp_codes_1g);
1438
1439 if (status)
1440 goto err_read_i2c_eeprom;
1441
1442 status = hw->phy.ops.read_i2c_eeprom(hw,
1443 IXGBE_SFF_10GBE_COMP_CODES,
1444 &comp_codes_10g);
1445
1446 if (status)
1447 goto err_read_i2c_eeprom;
1448 status = hw->phy.ops.read_i2c_eeprom(hw,
1449 IXGBE_SFF_CABLE_TECHNOLOGY,
1450 &cable_tech);
1451
1452 if (status)
1453 goto err_read_i2c_eeprom;
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471 if (hw->mac.type == ixgbe_mac_82598EB) {
1472 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1473 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
1474 else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
1475 hw->phy.sfp_type = ixgbe_sfp_type_sr;
1476 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
1477 hw->phy.sfp_type = ixgbe_sfp_type_lr;
1478 else
1479 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1480 } else {
1481 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
1482 if (hw->bus.lan_id == 0)
1483 hw->phy.sfp_type =
1484 ixgbe_sfp_type_da_cu_core0;
1485 else
1486 hw->phy.sfp_type =
1487 ixgbe_sfp_type_da_cu_core1;
1488 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
1489 hw->phy.ops.read_i2c_eeprom(
1490 hw, IXGBE_SFF_CABLE_SPEC_COMP,
1491 &cable_spec);
1492 if (cable_spec &
1493 IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
1494 if (hw->bus.lan_id == 0)
1495 hw->phy.sfp_type =
1496 ixgbe_sfp_type_da_act_lmt_core0;
1497 else
1498 hw->phy.sfp_type =
1499 ixgbe_sfp_type_da_act_lmt_core1;
1500 } else {
1501 hw->phy.sfp_type =
1502 ixgbe_sfp_type_unknown;
1503 }
1504 } else if (comp_codes_10g &
1505 (IXGBE_SFF_10GBASESR_CAPABLE |
1506 IXGBE_SFF_10GBASELR_CAPABLE)) {
1507 if (hw->bus.lan_id == 0)
1508 hw->phy.sfp_type =
1509 ixgbe_sfp_type_srlr_core0;
1510 else
1511 hw->phy.sfp_type =
1512 ixgbe_sfp_type_srlr_core1;
1513 } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
1514 if (hw->bus.lan_id == 0)
1515 hw->phy.sfp_type =
1516 ixgbe_sfp_type_1g_cu_core0;
1517 else
1518 hw->phy.sfp_type =
1519 ixgbe_sfp_type_1g_cu_core1;
1520 } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
1521 if (hw->bus.lan_id == 0)
1522 hw->phy.sfp_type =
1523 ixgbe_sfp_type_1g_sx_core0;
1524 else
1525 hw->phy.sfp_type =
1526 ixgbe_sfp_type_1g_sx_core1;
1527 } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
1528 if (hw->bus.lan_id == 0)
1529 hw->phy.sfp_type =
1530 ixgbe_sfp_type_1g_lx_core0;
1531 else
1532 hw->phy.sfp_type =
1533 ixgbe_sfp_type_1g_lx_core1;
1534 } else {
1535 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
1536 }
1537 }
1538
1539 if (hw->phy.sfp_type != stored_sfp_type)
1540 hw->phy.sfp_setup_needed = true;
1541
1542
1543 hw->phy.multispeed_fiber = false;
1544 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1545 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1546 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1547 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1548 hw->phy.multispeed_fiber = true;
1549
1550
1551 if (hw->phy.type != ixgbe_phy_nl) {
1552 hw->phy.id = identifier;
1553 status = hw->phy.ops.read_i2c_eeprom(hw,
1554 IXGBE_SFF_VENDOR_OUI_BYTE0,
1555 &oui_bytes[0]);
1556
1557 if (status != 0)
1558 goto err_read_i2c_eeprom;
1559
1560 status = hw->phy.ops.read_i2c_eeprom(hw,
1561 IXGBE_SFF_VENDOR_OUI_BYTE1,
1562 &oui_bytes[1]);
1563
1564 if (status != 0)
1565 goto err_read_i2c_eeprom;
1566
1567 status = hw->phy.ops.read_i2c_eeprom(hw,
1568 IXGBE_SFF_VENDOR_OUI_BYTE2,
1569 &oui_bytes[2]);
1570
1571 if (status != 0)
1572 goto err_read_i2c_eeprom;
1573
1574 vendor_oui =
1575 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1576 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1577 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1578
1579 switch (vendor_oui) {
1580 case IXGBE_SFF_VENDOR_OUI_TYCO:
1581 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1582 hw->phy.type =
1583 ixgbe_phy_sfp_passive_tyco;
1584 break;
1585 case IXGBE_SFF_VENDOR_OUI_FTL:
1586 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1587 hw->phy.type = ixgbe_phy_sfp_ftl_active;
1588 else
1589 hw->phy.type = ixgbe_phy_sfp_ftl;
1590 break;
1591 case IXGBE_SFF_VENDOR_OUI_AVAGO:
1592 hw->phy.type = ixgbe_phy_sfp_avago;
1593 break;
1594 case IXGBE_SFF_VENDOR_OUI_INTEL:
1595 hw->phy.type = ixgbe_phy_sfp_intel;
1596 break;
1597 default:
1598 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1599 hw->phy.type =
1600 ixgbe_phy_sfp_passive_unknown;
1601 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1602 hw->phy.type =
1603 ixgbe_phy_sfp_active_unknown;
1604 else
1605 hw->phy.type = ixgbe_phy_sfp_unknown;
1606 break;
1607 }
1608 }
1609
1610
1611 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
1612 IXGBE_SFF_DA_ACTIVE_CABLE))
1613 return 0;
1614
1615
1616 if (comp_codes_10g == 0 &&
1617 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1618 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1619 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1620 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1621 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1622 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1623 hw->phy.type = ixgbe_phy_sfp_unsupported;
1624 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1625 }
1626
1627
1628 if (hw->mac.type == ixgbe_mac_82598EB)
1629 return 0;
1630
1631 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1632 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
1633 !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1634 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1635 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1636 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1637 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1638 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1639
1640 if (hw->phy.type == ixgbe_phy_sfp_intel)
1641 return 0;
1642 if (hw->allow_unsupported_sfp) {
1643 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1644 return 0;
1645 }
1646 hw_dbg(hw, "SFP+ module not supported\n");
1647 hw->phy.type = ixgbe_phy_sfp_unsupported;
1648 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1649 }
1650 return 0;
1651
1652 err_read_i2c_eeprom:
1653 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1654 if (hw->phy.type != ixgbe_phy_nl) {
1655 hw->phy.id = 0;
1656 hw->phy.type = ixgbe_phy_unknown;
1657 }
1658 return IXGBE_ERR_SFP_NOT_PRESENT;
1659 }
1660
1661
1662
1663
1664
1665
1666
1667 static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
1668 {
1669 struct ixgbe_adapter *adapter = hw->back;
1670 s32 status;
1671 u32 vendor_oui = 0;
1672 enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
1673 u8 identifier = 0;
1674 u8 comp_codes_1g = 0;
1675 u8 comp_codes_10g = 0;
1676 u8 oui_bytes[3] = {0, 0, 0};
1677 u16 enforce_sfp = 0;
1678 u8 connector = 0;
1679 u8 cable_length = 0;
1680 u8 device_tech = 0;
1681 bool active_cable = false;
1682
1683 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
1684 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1685 return IXGBE_ERR_SFP_NOT_PRESENT;
1686 }
1687
1688
1689 hw->mac.ops.set_lan_id(hw);
1690
1691 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
1692 &identifier);
1693
1694 if (status != 0)
1695 goto err_read_i2c_eeprom;
1696
1697 if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
1698 hw->phy.type = ixgbe_phy_sfp_unsupported;
1699 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1700 }
1701
1702 hw->phy.id = identifier;
1703
1704 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
1705 &comp_codes_10g);
1706
1707 if (status != 0)
1708 goto err_read_i2c_eeprom;
1709
1710 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
1711 &comp_codes_1g);
1712
1713 if (status != 0)
1714 goto err_read_i2c_eeprom;
1715
1716 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
1717 hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
1718 if (hw->bus.lan_id == 0)
1719 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
1720 else
1721 hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
1722 } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1723 IXGBE_SFF_10GBASELR_CAPABLE)) {
1724 if (hw->bus.lan_id == 0)
1725 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
1726 else
1727 hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
1728 } else {
1729 if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
1730 active_cable = true;
1731
1732 if (!active_cable) {
1733
1734
1735
1736 hw->phy.ops.read_i2c_eeprom(hw,
1737 IXGBE_SFF_QSFP_CONNECTOR,
1738 &connector);
1739
1740 hw->phy.ops.read_i2c_eeprom(hw,
1741 IXGBE_SFF_QSFP_CABLE_LENGTH,
1742 &cable_length);
1743
1744 hw->phy.ops.read_i2c_eeprom(hw,
1745 IXGBE_SFF_QSFP_DEVICE_TECH,
1746 &device_tech);
1747
1748 if ((connector ==
1749 IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
1750 (cable_length > 0) &&
1751 ((device_tech >> 4) ==
1752 IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
1753 active_cable = true;
1754 }
1755
1756 if (active_cable) {
1757 hw->phy.type = ixgbe_phy_qsfp_active_unknown;
1758 if (hw->bus.lan_id == 0)
1759 hw->phy.sfp_type =
1760 ixgbe_sfp_type_da_act_lmt_core0;
1761 else
1762 hw->phy.sfp_type =
1763 ixgbe_sfp_type_da_act_lmt_core1;
1764 } else {
1765
1766 hw->phy.type = ixgbe_phy_sfp_unsupported;
1767 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1768 }
1769 }
1770
1771 if (hw->phy.sfp_type != stored_sfp_type)
1772 hw->phy.sfp_setup_needed = true;
1773
1774
1775 hw->phy.multispeed_fiber = false;
1776 if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
1777 (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
1778 ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
1779 (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
1780 hw->phy.multispeed_fiber = true;
1781
1782
1783 if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
1784 IXGBE_SFF_10GBASELR_CAPABLE)) {
1785 status = hw->phy.ops.read_i2c_eeprom(hw,
1786 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
1787 &oui_bytes[0]);
1788
1789 if (status != 0)
1790 goto err_read_i2c_eeprom;
1791
1792 status = hw->phy.ops.read_i2c_eeprom(hw,
1793 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
1794 &oui_bytes[1]);
1795
1796 if (status != 0)
1797 goto err_read_i2c_eeprom;
1798
1799 status = hw->phy.ops.read_i2c_eeprom(hw,
1800 IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
1801 &oui_bytes[2]);
1802
1803 if (status != 0)
1804 goto err_read_i2c_eeprom;
1805
1806 vendor_oui =
1807 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1808 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1809 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1810
1811 if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
1812 hw->phy.type = ixgbe_phy_qsfp_intel;
1813 else
1814 hw->phy.type = ixgbe_phy_qsfp_unknown;
1815
1816 hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1817 if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
1818
1819 if (hw->phy.type == ixgbe_phy_qsfp_intel)
1820 return 0;
1821 if (hw->allow_unsupported_sfp) {
1822 e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
1823 return 0;
1824 }
1825 hw_dbg(hw, "QSFP module not supported\n");
1826 hw->phy.type = ixgbe_phy_sfp_unsupported;
1827 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1828 }
1829 return 0;
1830 }
1831 return 0;
1832
1833 err_read_i2c_eeprom:
1834 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1835 hw->phy.id = 0;
1836 hw->phy.type = ixgbe_phy_unknown;
1837
1838 return IXGBE_ERR_SFP_NOT_PRESENT;
1839 }
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850 s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
1851 u16 *list_offset,
1852 u16 *data_offset)
1853 {
1854 u16 sfp_id;
1855 u16 sfp_type = hw->phy.sfp_type;
1856
1857 if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
1858 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1859
1860 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1861 return IXGBE_ERR_SFP_NOT_PRESENT;
1862
1863 if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
1864 (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
1865 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1866
1867
1868
1869
1870
1871 if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
1872 sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
1873 sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1874 sfp_type == ixgbe_sfp_type_1g_sx_core0)
1875 sfp_type = ixgbe_sfp_type_srlr_core0;
1876 else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
1877 sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
1878 sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1879 sfp_type == ixgbe_sfp_type_1g_sx_core1)
1880 sfp_type = ixgbe_sfp_type_srlr_core1;
1881
1882
1883 if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
1884 hw_err(hw, "eeprom read at %d failed\n",
1885 IXGBE_PHY_INIT_OFFSET_NL);
1886 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1887 }
1888
1889 if ((!*list_offset) || (*list_offset == 0xFFFF))
1890 return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
1891
1892
1893 (*list_offset)++;
1894
1895
1896
1897
1898
1899 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1900 goto err_phy;
1901
1902 while (sfp_id != IXGBE_PHY_INIT_END_NL) {
1903 if (sfp_id == sfp_type) {
1904 (*list_offset)++;
1905 if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
1906 goto err_phy;
1907 if ((!*data_offset) || (*data_offset == 0xFFFF)) {
1908 hw_dbg(hw, "SFP+ module not supported\n");
1909 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1910 } else {
1911 break;
1912 }
1913 } else {
1914 (*list_offset) += 2;
1915 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1916 goto err_phy;
1917 }
1918 }
1919
1920 if (sfp_id == IXGBE_PHY_INIT_END_NL) {
1921 hw_dbg(hw, "No matching SFP+ module found\n");
1922 return IXGBE_ERR_SFP_NOT_SUPPORTED;
1923 }
1924
1925 return 0;
1926
1927 err_phy:
1928 hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
1929 return IXGBE_ERR_PHY;
1930 }
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940 s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1941 u8 *eeprom_data)
1942 {
1943 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1944 IXGBE_I2C_EEPROM_DEV_ADDR,
1945 eeprom_data);
1946 }
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956 s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
1957 u8 *sff8472_data)
1958 {
1959 return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1960 IXGBE_I2C_EEPROM_DEV_ADDR2,
1961 sff8472_data);
1962 }
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972 s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
1973 u8 eeprom_data)
1974 {
1975 return hw->phy.ops.write_i2c_byte(hw, byte_offset,
1976 IXGBE_I2C_EEPROM_DEV_ADDR,
1977 eeprom_data);
1978 }
1979
1980
1981
1982
1983
1984
1985
1986 static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr)
1987 {
1988 if (addr == IXGBE_I2C_EEPROM_DEV_ADDR &&
1989 offset == IXGBE_SFF_IDENTIFIER &&
1990 hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1991 return true;
1992 return false;
1993 }
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006 static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
2007 u8 dev_addr, u8 *data, bool lock)
2008 {
2009 s32 status;
2010 u32 max_retry = 10;
2011 u32 retry = 0;
2012 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2013 bool nack = true;
2014
2015 if (hw->mac.type >= ixgbe_mac_X550)
2016 max_retry = 3;
2017 if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
2018 max_retry = IXGBE_SFP_DETECT_RETRIES;
2019
2020 *data = 0;
2021
2022 do {
2023 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
2024 return IXGBE_ERR_SWFW_SYNC;
2025
2026 ixgbe_i2c_start(hw);
2027
2028
2029 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
2030 if (status != 0)
2031 goto fail;
2032
2033 status = ixgbe_get_i2c_ack(hw);
2034 if (status != 0)
2035 goto fail;
2036
2037 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
2038 if (status != 0)
2039 goto fail;
2040
2041 status = ixgbe_get_i2c_ack(hw);
2042 if (status != 0)
2043 goto fail;
2044
2045 ixgbe_i2c_start(hw);
2046
2047
2048 status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
2049 if (status != 0)
2050 goto fail;
2051
2052 status = ixgbe_get_i2c_ack(hw);
2053 if (status != 0)
2054 goto fail;
2055
2056 status = ixgbe_clock_in_i2c_byte(hw, data);
2057 if (status != 0)
2058 goto fail;
2059
2060 status = ixgbe_clock_out_i2c_bit(hw, nack);
2061 if (status != 0)
2062 goto fail;
2063
2064 ixgbe_i2c_stop(hw);
2065 if (lock)
2066 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2067 return 0;
2068
2069 fail:
2070 ixgbe_i2c_bus_clear(hw);
2071 if (lock) {
2072 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2073 msleep(100);
2074 }
2075 retry++;
2076 if (retry < max_retry)
2077 hw_dbg(hw, "I2C byte read error - Retrying.\n");
2078 else
2079 hw_dbg(hw, "I2C byte read error.\n");
2080
2081 } while (retry < max_retry);
2082
2083 return status;
2084 }
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096 s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
2097 u8 dev_addr, u8 *data)
2098 {
2099 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2100 data, true);
2101 }
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113 s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
2114 u8 dev_addr, u8 *data)
2115 {
2116 return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2117 data, false);
2118 }
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131 static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
2132 u8 dev_addr, u8 data, bool lock)
2133 {
2134 s32 status;
2135 u32 max_retry = 1;
2136 u32 retry = 0;
2137 u32 swfw_mask = hw->phy.phy_semaphore_mask;
2138
2139 if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
2140 return IXGBE_ERR_SWFW_SYNC;
2141
2142 do {
2143 ixgbe_i2c_start(hw);
2144
2145 status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
2146 if (status != 0)
2147 goto fail;
2148
2149 status = ixgbe_get_i2c_ack(hw);
2150 if (status != 0)
2151 goto fail;
2152
2153 status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
2154 if (status != 0)
2155 goto fail;
2156
2157 status = ixgbe_get_i2c_ack(hw);
2158 if (status != 0)
2159 goto fail;
2160
2161 status = ixgbe_clock_out_i2c_byte(hw, data);
2162 if (status != 0)
2163 goto fail;
2164
2165 status = ixgbe_get_i2c_ack(hw);
2166 if (status != 0)
2167 goto fail;
2168
2169 ixgbe_i2c_stop(hw);
2170 if (lock)
2171 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2172 return 0;
2173
2174 fail:
2175 ixgbe_i2c_bus_clear(hw);
2176 retry++;
2177 if (retry < max_retry)
2178 hw_dbg(hw, "I2C byte write error - Retrying.\n");
2179 else
2180 hw_dbg(hw, "I2C byte write error.\n");
2181 } while (retry < max_retry);
2182
2183 if (lock)
2184 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
2185
2186 return status;
2187 }
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199 s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
2200 u8 dev_addr, u8 data)
2201 {
2202 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2203 data, true);
2204 }
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216 s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset,
2217 u8 dev_addr, u8 data)
2218 {
2219 return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr,
2220 data, false);
2221 }
2222
2223
2224
2225
2226
2227
2228
2229
2230 static void ixgbe_i2c_start(struct ixgbe_hw *hw)
2231 {
2232 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2233
2234 i2cctl |= IXGBE_I2C_BB_EN(hw);
2235
2236
2237 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2238 ixgbe_raise_i2c_clk(hw, &i2cctl);
2239
2240
2241 udelay(IXGBE_I2C_T_SU_STA);
2242
2243 ixgbe_set_i2c_data(hw, &i2cctl, 0);
2244
2245
2246 udelay(IXGBE_I2C_T_HD_STA);
2247
2248 ixgbe_lower_i2c_clk(hw, &i2cctl);
2249
2250
2251 udelay(IXGBE_I2C_T_LOW);
2252
2253 }
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263 static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
2264 {
2265 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2266 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2267 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
2268 u32 bb_en_bit = IXGBE_I2C_BB_EN(hw);
2269
2270
2271 ixgbe_set_i2c_data(hw, &i2cctl, 0);
2272 ixgbe_raise_i2c_clk(hw, &i2cctl);
2273
2274
2275 udelay(IXGBE_I2C_T_SU_STO);
2276
2277 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2278
2279
2280 udelay(IXGBE_I2C_T_BUF);
2281
2282 if (bb_en_bit || data_oe_bit || clk_oe_bit) {
2283 i2cctl &= ~bb_en_bit;
2284 i2cctl |= data_oe_bit | clk_oe_bit;
2285 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2286 IXGBE_WRITE_FLUSH(hw);
2287 }
2288 }
2289
2290
2291
2292
2293
2294
2295
2296
2297 static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
2298 {
2299 s32 i;
2300 bool bit = false;
2301
2302 *data = 0;
2303 for (i = 7; i >= 0; i--) {
2304 ixgbe_clock_in_i2c_bit(hw, &bit);
2305 *data |= bit << i;
2306 }
2307
2308 return 0;
2309 }
2310
2311
2312
2313
2314
2315
2316
2317
2318 static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
2319 {
2320 s32 status;
2321 s32 i;
2322 u32 i2cctl;
2323 bool bit = false;
2324
2325 for (i = 7; i >= 0; i--) {
2326 bit = (data >> i) & 0x1;
2327 status = ixgbe_clock_out_i2c_bit(hw, bit);
2328
2329 if (status != 0)
2330 break;
2331 }
2332
2333
2334 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2335 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2336 i2cctl |= IXGBE_I2C_DATA_OE_N_EN(hw);
2337 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2338 IXGBE_WRITE_FLUSH(hw);
2339
2340 return status;
2341 }
2342
2343
2344
2345
2346
2347
2348
2349 static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
2350 {
2351 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2352 s32 status = 0;
2353 u32 i = 0;
2354 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2355 u32 timeout = 10;
2356 bool ack = true;
2357
2358 if (data_oe_bit) {
2359 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2360 i2cctl |= data_oe_bit;
2361 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2362 IXGBE_WRITE_FLUSH(hw);
2363 }
2364 ixgbe_raise_i2c_clk(hw, &i2cctl);
2365
2366
2367 udelay(IXGBE_I2C_T_HIGH);
2368
2369
2370
2371 for (i = 0; i < timeout; i++) {
2372 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2373 ack = ixgbe_get_i2c_data(hw, &i2cctl);
2374
2375 udelay(1);
2376 if (ack == 0)
2377 break;
2378 }
2379
2380 if (ack == 1) {
2381 hw_dbg(hw, "I2C ack was not received.\n");
2382 status = IXGBE_ERR_I2C;
2383 }
2384
2385 ixgbe_lower_i2c_clk(hw, &i2cctl);
2386
2387
2388 udelay(IXGBE_I2C_T_LOW);
2389
2390 return status;
2391 }
2392
2393
2394
2395
2396
2397
2398
2399
2400 static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
2401 {
2402 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2403 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2404
2405 if (data_oe_bit) {
2406 i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2407 i2cctl |= data_oe_bit;
2408 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl);
2409 IXGBE_WRITE_FLUSH(hw);
2410 }
2411 ixgbe_raise_i2c_clk(hw, &i2cctl);
2412
2413
2414 udelay(IXGBE_I2C_T_HIGH);
2415
2416 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2417 *data = ixgbe_get_i2c_data(hw, &i2cctl);
2418
2419 ixgbe_lower_i2c_clk(hw, &i2cctl);
2420
2421
2422 udelay(IXGBE_I2C_T_LOW);
2423
2424 return 0;
2425 }
2426
2427
2428
2429
2430
2431
2432
2433
2434 static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
2435 {
2436 s32 status;
2437 u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2438
2439 status = ixgbe_set_i2c_data(hw, &i2cctl, data);
2440 if (status == 0) {
2441 ixgbe_raise_i2c_clk(hw, &i2cctl);
2442
2443
2444 udelay(IXGBE_I2C_T_HIGH);
2445
2446 ixgbe_lower_i2c_clk(hw, &i2cctl);
2447
2448
2449
2450
2451 udelay(IXGBE_I2C_T_LOW);
2452 } else {
2453 hw_dbg(hw, "I2C data was not set to %X\n", data);
2454 return IXGBE_ERR_I2C;
2455 }
2456
2457 return 0;
2458 }
2459
2460
2461
2462
2463
2464
2465
2466
2467 static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2468 {
2469 u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN(hw);
2470 u32 i = 0;
2471 u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
2472 u32 i2cctl_r = 0;
2473
2474 if (clk_oe_bit) {
2475 *i2cctl |= clk_oe_bit;
2476 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2477 }
2478
2479 for (i = 0; i < timeout; i++) {
2480 *i2cctl |= IXGBE_I2C_CLK_OUT(hw);
2481 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2482 IXGBE_WRITE_FLUSH(hw);
2483
2484 udelay(IXGBE_I2C_T_RISE);
2485
2486 i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2487 if (i2cctl_r & IXGBE_I2C_CLK_IN(hw))
2488 break;
2489 }
2490 }
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500 static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
2501 {
2502
2503 *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw);
2504 *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN(hw);
2505
2506 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2507 IXGBE_WRITE_FLUSH(hw);
2508
2509
2510 udelay(IXGBE_I2C_T_FALL);
2511 }
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522 static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
2523 {
2524 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2525
2526 if (data)
2527 *i2cctl |= IXGBE_I2C_DATA_OUT(hw);
2528 else
2529 *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw);
2530 *i2cctl &= ~data_oe_bit;
2531
2532 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2533 IXGBE_WRITE_FLUSH(hw);
2534
2535
2536 udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
2537
2538 if (!data)
2539 return 0;
2540 if (data_oe_bit) {
2541 *i2cctl |= data_oe_bit;
2542 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2543 IXGBE_WRITE_FLUSH(hw);
2544 }
2545
2546
2547 *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2548 if (data != ixgbe_get_i2c_data(hw, i2cctl)) {
2549 hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
2550 return IXGBE_ERR_I2C;
2551 }
2552
2553 return 0;
2554 }
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564 static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl)
2565 {
2566 u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw);
2567
2568 if (data_oe_bit) {
2569 *i2cctl |= data_oe_bit;
2570 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl);
2571 IXGBE_WRITE_FLUSH(hw);
2572 udelay(IXGBE_I2C_T_FALL);
2573 }
2574
2575 if (*i2cctl & IXGBE_I2C_DATA_IN(hw))
2576 return true;
2577 return false;
2578 }
2579
2580
2581
2582
2583
2584
2585
2586
2587 static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
2588 {
2589 u32 i2cctl;
2590 u32 i;
2591
2592 ixgbe_i2c_start(hw);
2593 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw));
2594
2595 ixgbe_set_i2c_data(hw, &i2cctl, 1);
2596
2597 for (i = 0; i < 9; i++) {
2598 ixgbe_raise_i2c_clk(hw, &i2cctl);
2599
2600
2601 udelay(IXGBE_I2C_T_HIGH);
2602
2603 ixgbe_lower_i2c_clk(hw, &i2cctl);
2604
2605
2606 udelay(IXGBE_I2C_T_LOW);
2607 }
2608
2609 ixgbe_i2c_start(hw);
2610
2611
2612 ixgbe_i2c_stop(hw);
2613 }
2614
2615
2616
2617
2618
2619
2620
2621 s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
2622 {
2623 u16 phy_data = 0;
2624
2625 if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
2626 return 0;
2627
2628
2629 hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
2630 MDIO_MMD_PMAPMD, &phy_data);
2631
2632 if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
2633 return 0;
2634
2635 return IXGBE_ERR_OVERTEMP;
2636 }
2637
2638
2639
2640
2641
2642 s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on)
2643 {
2644 u32 status;
2645 u16 reg;
2646
2647
2648 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
2649 return 0;
2650
2651 if (!on && ixgbe_mng_present(hw))
2652 return 0;
2653
2654 status = hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, ®);
2655 if (status)
2656 return status;
2657
2658 if (on) {
2659 reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2660 } else {
2661 if (ixgbe_check_reset_blocked(hw))
2662 return 0;
2663 reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE;
2664 }
2665
2666 status = hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_VEND1, reg);
2667 return status;
2668 }