0001
0002
0003
0004 #include <linux/if_ether.h>
0005 #include <linux/delay.h>
0006
0007 #include "e1000_mac.h"
0008 #include "e1000_nvm.h"
0009
0010
0011
0012
0013
0014
0015
0016
0017 static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
0018 {
0019 *eecd = *eecd | E1000_EECD_SK;
0020 wr32(E1000_EECD, *eecd);
0021 wrfl();
0022 udelay(hw->nvm.delay_usec);
0023 }
0024
0025
0026
0027
0028
0029
0030
0031
0032 static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
0033 {
0034 *eecd = *eecd & ~E1000_EECD_SK;
0035 wr32(E1000_EECD, *eecd);
0036 wrfl();
0037 udelay(hw->nvm.delay_usec);
0038 }
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050 static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
0051 {
0052 struct e1000_nvm_info *nvm = &hw->nvm;
0053 u32 eecd = rd32(E1000_EECD);
0054 u32 mask;
0055
0056 mask = 1u << (count - 1);
0057 if (nvm->type == e1000_nvm_eeprom_spi)
0058 eecd |= E1000_EECD_DO;
0059
0060 do {
0061 eecd &= ~E1000_EECD_DI;
0062
0063 if (data & mask)
0064 eecd |= E1000_EECD_DI;
0065
0066 wr32(E1000_EECD, eecd);
0067 wrfl();
0068
0069 udelay(nvm->delay_usec);
0070
0071 igb_raise_eec_clk(hw, &eecd);
0072 igb_lower_eec_clk(hw, &eecd);
0073
0074 mask >>= 1;
0075 } while (mask);
0076
0077 eecd &= ~E1000_EECD_DI;
0078 wr32(E1000_EECD, eecd);
0079 }
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092 static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
0093 {
0094 u32 eecd;
0095 u32 i;
0096 u16 data;
0097
0098 eecd = rd32(E1000_EECD);
0099
0100 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
0101 data = 0;
0102
0103 for (i = 0; i < count; i++) {
0104 data <<= 1;
0105 igb_raise_eec_clk(hw, &eecd);
0106
0107 eecd = rd32(E1000_EECD);
0108
0109 eecd &= ~E1000_EECD_DI;
0110 if (eecd & E1000_EECD_DO)
0111 data |= 1;
0112
0113 igb_lower_eec_clk(hw, &eecd);
0114 }
0115
0116 return data;
0117 }
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127 static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
0128 {
0129 u32 attempts = 100000;
0130 u32 i, reg = 0;
0131 s32 ret_val = -E1000_ERR_NVM;
0132
0133 for (i = 0; i < attempts; i++) {
0134 if (ee_reg == E1000_NVM_POLL_READ)
0135 reg = rd32(E1000_EERD);
0136 else
0137 reg = rd32(E1000_EEWR);
0138
0139 if (reg & E1000_NVM_RW_REG_DONE) {
0140 ret_val = 0;
0141 break;
0142 }
0143
0144 udelay(5);
0145 }
0146
0147 return ret_val;
0148 }
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 s32 igb_acquire_nvm(struct e1000_hw *hw)
0159 {
0160 u32 eecd = rd32(E1000_EECD);
0161 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
0162 s32 ret_val = 0;
0163
0164
0165 wr32(E1000_EECD, eecd | E1000_EECD_REQ);
0166 eecd = rd32(E1000_EECD);
0167
0168 while (timeout) {
0169 if (eecd & E1000_EECD_GNT)
0170 break;
0171 udelay(5);
0172 eecd = rd32(E1000_EECD);
0173 timeout--;
0174 }
0175
0176 if (!timeout) {
0177 eecd &= ~E1000_EECD_REQ;
0178 wr32(E1000_EECD, eecd);
0179 hw_dbg("Could not acquire NVM grant\n");
0180 ret_val = -E1000_ERR_NVM;
0181 }
0182
0183 return ret_val;
0184 }
0185
0186
0187
0188
0189
0190
0191
0192 static void igb_standby_nvm(struct e1000_hw *hw)
0193 {
0194 struct e1000_nvm_info *nvm = &hw->nvm;
0195 u32 eecd = rd32(E1000_EECD);
0196
0197 if (nvm->type == e1000_nvm_eeprom_spi) {
0198
0199 eecd |= E1000_EECD_CS;
0200 wr32(E1000_EECD, eecd);
0201 wrfl();
0202 udelay(nvm->delay_usec);
0203 eecd &= ~E1000_EECD_CS;
0204 wr32(E1000_EECD, eecd);
0205 wrfl();
0206 udelay(nvm->delay_usec);
0207 }
0208 }
0209
0210
0211
0212
0213
0214
0215
0216 static void e1000_stop_nvm(struct e1000_hw *hw)
0217 {
0218 u32 eecd;
0219
0220 eecd = rd32(E1000_EECD);
0221 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
0222
0223 eecd |= E1000_EECD_CS;
0224 igb_lower_eec_clk(hw, &eecd);
0225 }
0226 }
0227
0228
0229
0230
0231
0232
0233
0234 void igb_release_nvm(struct e1000_hw *hw)
0235 {
0236 u32 eecd;
0237
0238 e1000_stop_nvm(hw);
0239
0240 eecd = rd32(E1000_EECD);
0241 eecd &= ~E1000_EECD_REQ;
0242 wr32(E1000_EECD, eecd);
0243 }
0244
0245
0246
0247
0248
0249
0250
0251 static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
0252 {
0253 struct e1000_nvm_info *nvm = &hw->nvm;
0254 u32 eecd = rd32(E1000_EECD);
0255 s32 ret_val = 0;
0256 u16 timeout = 0;
0257 u8 spi_stat_reg;
0258
0259
0260 if (nvm->type == e1000_nvm_eeprom_spi) {
0261
0262 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
0263 wr32(E1000_EECD, eecd);
0264 wrfl();
0265 udelay(1);
0266 timeout = NVM_MAX_RETRY_SPI;
0267
0268
0269
0270
0271
0272
0273 while (timeout) {
0274 igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
0275 hw->nvm.opcode_bits);
0276 spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);
0277 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
0278 break;
0279
0280 udelay(5);
0281 igb_standby_nvm(hw);
0282 timeout--;
0283 }
0284
0285 if (!timeout) {
0286 hw_dbg("SPI NVM Status error\n");
0287 ret_val = -E1000_ERR_NVM;
0288 goto out;
0289 }
0290 }
0291
0292 out:
0293 return ret_val;
0294 }
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305 s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
0306 {
0307 struct e1000_nvm_info *nvm = &hw->nvm;
0308 u32 i = 0;
0309 s32 ret_val;
0310 u16 word_in;
0311 u8 read_opcode = NVM_READ_OPCODE_SPI;
0312
0313
0314
0315
0316 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
0317 (words == 0)) {
0318 hw_dbg("nvm parameter(s) out of bounds\n");
0319 ret_val = -E1000_ERR_NVM;
0320 goto out;
0321 }
0322
0323 ret_val = nvm->ops.acquire(hw);
0324 if (ret_val)
0325 goto out;
0326
0327 ret_val = igb_ready_nvm_eeprom(hw);
0328 if (ret_val)
0329 goto release;
0330
0331 igb_standby_nvm(hw);
0332
0333 if ((nvm->address_bits == 8) && (offset >= 128))
0334 read_opcode |= NVM_A8_OPCODE_SPI;
0335
0336
0337 igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
0338 igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
0339
0340
0341
0342
0343
0344 for (i = 0; i < words; i++) {
0345 word_in = igb_shift_in_eec_bits(hw, 16);
0346 data[i] = (word_in >> 8) | (word_in << 8);
0347 }
0348
0349 release:
0350 nvm->ops.release(hw);
0351
0352 out:
0353 return ret_val;
0354 }
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365 s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
0366 {
0367 struct e1000_nvm_info *nvm = &hw->nvm;
0368 u32 i, eerd = 0;
0369 s32 ret_val = 0;
0370
0371
0372
0373
0374 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
0375 (words == 0)) {
0376 hw_dbg("nvm parameter(s) out of bounds\n");
0377 ret_val = -E1000_ERR_NVM;
0378 goto out;
0379 }
0380
0381 for (i = 0; i < words; i++) {
0382 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
0383 E1000_NVM_RW_REG_START;
0384
0385 wr32(E1000_EERD, eerd);
0386 ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
0387 if (ret_val)
0388 break;
0389
0390 data[i] = (rd32(E1000_EERD) >>
0391 E1000_NVM_RW_REG_DATA);
0392 }
0393
0394 out:
0395 return ret_val;
0396 }
0397
0398
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410 s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
0411 {
0412 struct e1000_nvm_info *nvm = &hw->nvm;
0413 s32 ret_val = -E1000_ERR_NVM;
0414 u16 widx = 0;
0415
0416
0417
0418
0419 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
0420 (words == 0)) {
0421 hw_dbg("nvm parameter(s) out of bounds\n");
0422 return ret_val;
0423 }
0424
0425 while (widx < words) {
0426 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
0427
0428 ret_val = nvm->ops.acquire(hw);
0429 if (ret_val)
0430 return ret_val;
0431
0432 ret_val = igb_ready_nvm_eeprom(hw);
0433 if (ret_val) {
0434 nvm->ops.release(hw);
0435 return ret_val;
0436 }
0437
0438 igb_standby_nvm(hw);
0439
0440
0441 igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
0442 nvm->opcode_bits);
0443
0444 igb_standby_nvm(hw);
0445
0446
0447
0448
0449 if ((nvm->address_bits == 8) && (offset >= 128))
0450 write_opcode |= NVM_A8_OPCODE_SPI;
0451
0452
0453 igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
0454 igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
0455 nvm->address_bits);
0456
0457
0458 while (widx < words) {
0459 u16 word_out = data[widx];
0460
0461 word_out = (word_out >> 8) | (word_out << 8);
0462 igb_shift_out_eec_bits(hw, word_out, 16);
0463 widx++;
0464
0465 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
0466 igb_standby_nvm(hw);
0467 break;
0468 }
0469 }
0470 usleep_range(1000, 2000);
0471 nvm->ops.release(hw);
0472 }
0473
0474 return ret_val;
0475 }
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486 s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
0487 {
0488 s32 ret_val;
0489 u16 nvm_data;
0490 u16 pointer;
0491 u16 offset;
0492 u16 length;
0493
0494 if (part_num == NULL) {
0495 hw_dbg("PBA string buffer was null\n");
0496 ret_val = E1000_ERR_INVALID_ARGUMENT;
0497 goto out;
0498 }
0499
0500 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
0501 if (ret_val) {
0502 hw_dbg("NVM Read Error\n");
0503 goto out;
0504 }
0505
0506 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
0507 if (ret_val) {
0508 hw_dbg("NVM Read Error\n");
0509 goto out;
0510 }
0511
0512
0513
0514
0515
0516 if (nvm_data != NVM_PBA_PTR_GUARD) {
0517 hw_dbg("NVM PBA number is not stored as string\n");
0518
0519
0520 if (part_num_size < 11) {
0521 hw_dbg("PBA string buffer too small\n");
0522 return E1000_ERR_NO_SPACE;
0523 }
0524
0525
0526 part_num[0] = (nvm_data >> 12) & 0xF;
0527 part_num[1] = (nvm_data >> 8) & 0xF;
0528 part_num[2] = (nvm_data >> 4) & 0xF;
0529 part_num[3] = nvm_data & 0xF;
0530 part_num[4] = (pointer >> 12) & 0xF;
0531 part_num[5] = (pointer >> 8) & 0xF;
0532 part_num[6] = '-';
0533 part_num[7] = 0;
0534 part_num[8] = (pointer >> 4) & 0xF;
0535 part_num[9] = pointer & 0xF;
0536
0537
0538 part_num[10] = '\0';
0539
0540
0541 for (offset = 0; offset < 10; offset++) {
0542 if (part_num[offset] < 0xA)
0543 part_num[offset] += '0';
0544 else if (part_num[offset] < 0x10)
0545 part_num[offset] += 'A' - 0xA;
0546 }
0547
0548 goto out;
0549 }
0550
0551 ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
0552 if (ret_val) {
0553 hw_dbg("NVM Read Error\n");
0554 goto out;
0555 }
0556
0557 if (length == 0xFFFF || length == 0) {
0558 hw_dbg("NVM PBA number section invalid length\n");
0559 ret_val = E1000_ERR_NVM_PBA_SECTION;
0560 goto out;
0561 }
0562
0563 if (part_num_size < (((u32)length * 2) - 1)) {
0564 hw_dbg("PBA string buffer too small\n");
0565 ret_val = E1000_ERR_NO_SPACE;
0566 goto out;
0567 }
0568
0569
0570 pointer++;
0571 length--;
0572
0573 for (offset = 0; offset < length; offset++) {
0574 ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
0575 if (ret_val) {
0576 hw_dbg("NVM Read Error\n");
0577 goto out;
0578 }
0579 part_num[offset * 2] = (u8)(nvm_data >> 8);
0580 part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
0581 }
0582 part_num[offset * 2] = '\0';
0583
0584 out:
0585 return ret_val;
0586 }
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596 s32 igb_read_mac_addr(struct e1000_hw *hw)
0597 {
0598 u32 rar_high;
0599 u32 rar_low;
0600 u16 i;
0601
0602 rar_high = rd32(E1000_RAH(0));
0603 rar_low = rd32(E1000_RAL(0));
0604
0605 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
0606 hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
0607
0608 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
0609 hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
0610
0611 for (i = 0; i < ETH_ALEN; i++)
0612 hw->mac.addr[i] = hw->mac.perm_addr[i];
0613
0614 return 0;
0615 }
0616
0617
0618
0619
0620
0621
0622
0623
0624 s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
0625 {
0626 s32 ret_val = 0;
0627 u16 checksum = 0;
0628 u16 i, nvm_data;
0629
0630 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
0631 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
0632 if (ret_val) {
0633 hw_dbg("NVM Read Error\n");
0634 goto out;
0635 }
0636 checksum += nvm_data;
0637 }
0638
0639 if (checksum != (u16) NVM_SUM) {
0640 hw_dbg("NVM Checksum Invalid\n");
0641 ret_val = -E1000_ERR_NVM;
0642 goto out;
0643 }
0644
0645 out:
0646 return ret_val;
0647 }
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657 s32 igb_update_nvm_checksum(struct e1000_hw *hw)
0658 {
0659 s32 ret_val;
0660 u16 checksum = 0;
0661 u16 i, nvm_data;
0662
0663 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
0664 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
0665 if (ret_val) {
0666 hw_dbg("NVM Read Error while updating checksum.\n");
0667 goto out;
0668 }
0669 checksum += nvm_data;
0670 }
0671 checksum = (u16) NVM_SUM - checksum;
0672 ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
0673 if (ret_val)
0674 hw_dbg("NVM Write Error while updating checksum.\n");
0675
0676 out:
0677 return ret_val;
0678 }
0679
0680
0681
0682
0683
0684
0685
0686
0687 void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
0688 {
0689 u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
0690 u8 q, hval, rem, result;
0691 u16 comb_verh, comb_verl, comb_offset;
0692
0693 memset(fw_vers, 0, sizeof(struct e1000_fw_version));
0694
0695
0696
0697
0698 hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
0699 switch (hw->mac.type) {
0700 case e1000_i211:
0701 igb_read_invm_version(hw, fw_vers);
0702 return;
0703 case e1000_82575:
0704 case e1000_82576:
0705 case e1000_82580:
0706
0707
0708
0709 if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
0710 hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
0711 fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
0712 >> NVM_MAJOR_SHIFT;
0713 fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
0714 >> NVM_MINOR_SHIFT;
0715 fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
0716 goto etrack_id;
0717 }
0718 break;
0719 case e1000_i210:
0720 if (!(igb_get_flash_presence_i210(hw))) {
0721 igb_read_invm_version(hw, fw_vers);
0722 return;
0723 }
0724 fallthrough;
0725 case e1000_i350:
0726
0727 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
0728 if ((comb_offset != 0x0) &&
0729 (comb_offset != NVM_VER_INVALID)) {
0730
0731 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
0732 + 1), 1, &comb_verh);
0733 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
0734 1, &comb_verl);
0735
0736
0737 if ((comb_verh && comb_verl) &&
0738 ((comb_verh != NVM_VER_INVALID) &&
0739 (comb_verl != NVM_VER_INVALID))) {
0740
0741 fw_vers->or_valid = true;
0742 fw_vers->or_major =
0743 comb_verl >> NVM_COMB_VER_SHFT;
0744 fw_vers->or_build =
0745 (comb_verl << NVM_COMB_VER_SHFT)
0746 | (comb_verh >> NVM_COMB_VER_SHFT);
0747 fw_vers->or_patch =
0748 comb_verh & NVM_COMB_VER_MASK;
0749 }
0750 }
0751 break;
0752 default:
0753 return;
0754 }
0755 hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
0756 fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
0757 >> NVM_MAJOR_SHIFT;
0758
0759
0760 if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
0761 eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
0762 } else {
0763 eeprom_verl = (fw_version & NVM_MINOR_MASK)
0764 >> NVM_MINOR_SHIFT;
0765 }
0766
0767
0768
0769 q = eeprom_verl / NVM_HEX_CONV;
0770 hval = q * NVM_HEX_TENS;
0771 rem = eeprom_verl % NVM_HEX_CONV;
0772 result = hval + rem;
0773 fw_vers->eep_minor = result;
0774
0775 etrack_id:
0776 if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
0777 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
0778 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
0779 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
0780 | eeprom_verl;
0781 }
0782 }