0001
0002
0003
0004 #include "e1000.h"
0005
0006
0007
0008
0009
0010
0011
0012
0013 static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
0014 {
0015 *eecd = *eecd | E1000_EECD_SK;
0016 ew32(EECD, *eecd);
0017 e1e_flush();
0018 udelay(hw->nvm.delay_usec);
0019 }
0020
0021
0022
0023
0024
0025
0026
0027
0028 static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
0029 {
0030 *eecd = *eecd & ~E1000_EECD_SK;
0031 ew32(EECD, *eecd);
0032 e1e_flush();
0033 udelay(hw->nvm.delay_usec);
0034 }
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046 static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
0047 {
0048 struct e1000_nvm_info *nvm = &hw->nvm;
0049 u32 eecd = er32(EECD);
0050 u32 mask;
0051
0052 mask = BIT(count - 1);
0053 if (nvm->type == e1000_nvm_eeprom_spi)
0054 eecd |= E1000_EECD_DO;
0055
0056 do {
0057 eecd &= ~E1000_EECD_DI;
0058
0059 if (data & mask)
0060 eecd |= E1000_EECD_DI;
0061
0062 ew32(EECD, eecd);
0063 e1e_flush();
0064
0065 udelay(nvm->delay_usec);
0066
0067 e1000_raise_eec_clk(hw, &eecd);
0068 e1000_lower_eec_clk(hw, &eecd);
0069
0070 mask >>= 1;
0071 } while (mask);
0072
0073 eecd &= ~E1000_EECD_DI;
0074 ew32(EECD, eecd);
0075 }
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088 static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
0089 {
0090 u32 eecd;
0091 u32 i;
0092 u16 data;
0093
0094 eecd = er32(EECD);
0095 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
0096 data = 0;
0097
0098 for (i = 0; i < count; i++) {
0099 data <<= 1;
0100 e1000_raise_eec_clk(hw, &eecd);
0101
0102 eecd = er32(EECD);
0103
0104 eecd &= ~E1000_EECD_DI;
0105 if (eecd & E1000_EECD_DO)
0106 data |= 1;
0107
0108 e1000_lower_eec_clk(hw, &eecd);
0109 }
0110
0111 return data;
0112 }
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122 s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
0123 {
0124 u32 attempts = 100000;
0125 u32 i, reg = 0;
0126
0127 for (i = 0; i < attempts; i++) {
0128 if (ee_reg == E1000_NVM_POLL_READ)
0129 reg = er32(EERD);
0130 else
0131 reg = er32(EEWR);
0132
0133 if (reg & E1000_NVM_RW_REG_DONE)
0134 return 0;
0135
0136 udelay(5);
0137 }
0138
0139 return -E1000_ERR_NVM;
0140 }
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150 s32 e1000e_acquire_nvm(struct e1000_hw *hw)
0151 {
0152 u32 eecd = er32(EECD);
0153 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
0154
0155 ew32(EECD, eecd | E1000_EECD_REQ);
0156 eecd = er32(EECD);
0157
0158 while (timeout) {
0159 if (eecd & E1000_EECD_GNT)
0160 break;
0161 udelay(5);
0162 eecd = er32(EECD);
0163 timeout--;
0164 }
0165
0166 if (!timeout) {
0167 eecd &= ~E1000_EECD_REQ;
0168 ew32(EECD, eecd);
0169 e_dbg("Could not acquire NVM grant\n");
0170 return -E1000_ERR_NVM;
0171 }
0172
0173 return 0;
0174 }
0175
0176
0177
0178
0179
0180
0181
0182 static void e1000_standby_nvm(struct e1000_hw *hw)
0183 {
0184 struct e1000_nvm_info *nvm = &hw->nvm;
0185 u32 eecd = er32(EECD);
0186
0187 if (nvm->type == e1000_nvm_eeprom_spi) {
0188
0189 eecd |= E1000_EECD_CS;
0190 ew32(EECD, eecd);
0191 e1e_flush();
0192 udelay(nvm->delay_usec);
0193 eecd &= ~E1000_EECD_CS;
0194 ew32(EECD, eecd);
0195 e1e_flush();
0196 udelay(nvm->delay_usec);
0197 }
0198 }
0199
0200
0201
0202
0203
0204
0205
0206 static void e1000_stop_nvm(struct e1000_hw *hw)
0207 {
0208 u32 eecd;
0209
0210 eecd = er32(EECD);
0211 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
0212
0213 eecd |= E1000_EECD_CS;
0214 e1000_lower_eec_clk(hw, &eecd);
0215 }
0216 }
0217
0218
0219
0220
0221
0222
0223
0224 void e1000e_release_nvm(struct e1000_hw *hw)
0225 {
0226 u32 eecd;
0227
0228 e1000_stop_nvm(hw);
0229
0230 eecd = er32(EECD);
0231 eecd &= ~E1000_EECD_REQ;
0232 ew32(EECD, eecd);
0233 }
0234
0235
0236
0237
0238
0239
0240
0241 static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
0242 {
0243 struct e1000_nvm_info *nvm = &hw->nvm;
0244 u32 eecd = er32(EECD);
0245 u8 spi_stat_reg;
0246
0247 if (nvm->type == e1000_nvm_eeprom_spi) {
0248 u16 timeout = NVM_MAX_RETRY_SPI;
0249
0250
0251 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
0252 ew32(EECD, eecd);
0253 e1e_flush();
0254 udelay(1);
0255
0256
0257
0258
0259
0260
0261 while (timeout) {
0262 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
0263 hw->nvm.opcode_bits);
0264 spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
0265 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
0266 break;
0267
0268 udelay(5);
0269 e1000_standby_nvm(hw);
0270 timeout--;
0271 }
0272
0273 if (!timeout) {
0274 e_dbg("SPI NVM Status error\n");
0275 return -E1000_ERR_NVM;
0276 }
0277 }
0278
0279 return 0;
0280 }
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291 s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
0292 {
0293 struct e1000_nvm_info *nvm = &hw->nvm;
0294 u32 i, eerd = 0;
0295 s32 ret_val = 0;
0296
0297
0298
0299
0300 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
0301 (words == 0)) {
0302 e_dbg("nvm parameter(s) out of bounds\n");
0303 return -E1000_ERR_NVM;
0304 }
0305
0306 for (i = 0; i < words; i++) {
0307 eerd = ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) +
0308 E1000_NVM_RW_REG_START;
0309
0310 ew32(EERD, eerd);
0311 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
0312 if (ret_val) {
0313 e_dbg("NVM read error: %d\n", ret_val);
0314 break;
0315 }
0316
0317 data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
0318 }
0319
0320 return ret_val;
0321 }
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335 s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
0336 {
0337 struct e1000_nvm_info *nvm = &hw->nvm;
0338 s32 ret_val = -E1000_ERR_NVM;
0339 u16 widx = 0;
0340
0341
0342
0343
0344 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
0345 (words == 0)) {
0346 e_dbg("nvm parameter(s) out of bounds\n");
0347 return -E1000_ERR_NVM;
0348 }
0349
0350 while (widx < words) {
0351 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
0352
0353 ret_val = nvm->ops.acquire(hw);
0354 if (ret_val)
0355 return ret_val;
0356
0357 ret_val = e1000_ready_nvm_eeprom(hw);
0358 if (ret_val) {
0359 nvm->ops.release(hw);
0360 return ret_val;
0361 }
0362
0363 e1000_standby_nvm(hw);
0364
0365
0366 e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
0367 nvm->opcode_bits);
0368
0369 e1000_standby_nvm(hw);
0370
0371
0372
0373
0374 if ((nvm->address_bits == 8) && (offset >= 128))
0375 write_opcode |= NVM_A8_OPCODE_SPI;
0376
0377
0378 e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
0379 e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
0380 nvm->address_bits);
0381
0382
0383 while (widx < words) {
0384 u16 word_out = data[widx];
0385
0386 word_out = (word_out >> 8) | (word_out << 8);
0387 e1000_shift_out_eec_bits(hw, word_out, 16);
0388 widx++;
0389
0390 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
0391 e1000_standby_nvm(hw);
0392 break;
0393 }
0394 }
0395 usleep_range(10000, 11000);
0396 nvm->ops.release(hw);
0397 }
0398
0399 return ret_val;
0400 }
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411 s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
0412 u32 pba_num_size)
0413 {
0414 s32 ret_val;
0415 u16 nvm_data;
0416 u16 pba_ptr;
0417 u16 offset;
0418 u16 length;
0419
0420 if (pba_num == NULL) {
0421 e_dbg("PBA string buffer was null\n");
0422 return -E1000_ERR_INVALID_ARGUMENT;
0423 }
0424
0425 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
0426 if (ret_val) {
0427 e_dbg("NVM Read Error\n");
0428 return ret_val;
0429 }
0430
0431 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
0432 if (ret_val) {
0433 e_dbg("NVM Read Error\n");
0434 return ret_val;
0435 }
0436
0437
0438
0439
0440
0441 if (nvm_data != NVM_PBA_PTR_GUARD) {
0442 e_dbg("NVM PBA number is not stored as string\n");
0443
0444
0445 if (pba_num_size < E1000_PBANUM_LENGTH) {
0446 e_dbg("PBA string buffer too small\n");
0447 return E1000_ERR_NO_SPACE;
0448 }
0449
0450
0451 pba_num[0] = (nvm_data >> 12) & 0xF;
0452 pba_num[1] = (nvm_data >> 8) & 0xF;
0453 pba_num[2] = (nvm_data >> 4) & 0xF;
0454 pba_num[3] = nvm_data & 0xF;
0455 pba_num[4] = (pba_ptr >> 12) & 0xF;
0456 pba_num[5] = (pba_ptr >> 8) & 0xF;
0457 pba_num[6] = '-';
0458 pba_num[7] = 0;
0459 pba_num[8] = (pba_ptr >> 4) & 0xF;
0460 pba_num[9] = pba_ptr & 0xF;
0461
0462
0463 pba_num[10] = '\0';
0464
0465
0466 for (offset = 0; offset < 10; offset++) {
0467 if (pba_num[offset] < 0xA)
0468 pba_num[offset] += '0';
0469 else if (pba_num[offset] < 0x10)
0470 pba_num[offset] += 'A' - 0xA;
0471 }
0472
0473 return 0;
0474 }
0475
0476 ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
0477 if (ret_val) {
0478 e_dbg("NVM Read Error\n");
0479 return ret_val;
0480 }
0481
0482 if (length == 0xFFFF || length == 0) {
0483 e_dbg("NVM PBA number section invalid length\n");
0484 return -E1000_ERR_NVM_PBA_SECTION;
0485 }
0486
0487 if (pba_num_size < (((u32)length * 2) - 1)) {
0488 e_dbg("PBA string buffer too small\n");
0489 return -E1000_ERR_NO_SPACE;
0490 }
0491
0492
0493 pba_ptr++;
0494 length--;
0495
0496 for (offset = 0; offset < length; offset++) {
0497 ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
0498 if (ret_val) {
0499 e_dbg("NVM Read Error\n");
0500 return ret_val;
0501 }
0502 pba_num[offset * 2] = (u8)(nvm_data >> 8);
0503 pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
0504 }
0505 pba_num[offset * 2] = '\0';
0506
0507 return 0;
0508 }
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518 s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
0519 {
0520 u32 rar_high;
0521 u32 rar_low;
0522 u16 i;
0523
0524 rar_high = er32(RAH(0));
0525 rar_low = er32(RAL(0));
0526
0527 for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
0528 hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8));
0529
0530 for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
0531 hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8));
0532
0533 for (i = 0; i < ETH_ALEN; i++)
0534 hw->mac.addr[i] = hw->mac.perm_addr[i];
0535
0536 return 0;
0537 }
0538
0539
0540
0541
0542
0543
0544
0545
0546 s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
0547 {
0548 s32 ret_val;
0549 u16 checksum = 0;
0550 u16 i, nvm_data;
0551
0552 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
0553 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
0554 if (ret_val) {
0555 e_dbg("NVM Read Error\n");
0556 return ret_val;
0557 }
0558 checksum += nvm_data;
0559 }
0560
0561 if (checksum != (u16)NVM_SUM) {
0562 e_dbg("NVM Checksum Invalid\n");
0563 return -E1000_ERR_NVM;
0564 }
0565
0566 return 0;
0567 }
0568
0569
0570
0571
0572
0573
0574
0575
0576
0577 s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
0578 {
0579 s32 ret_val;
0580 u16 checksum = 0;
0581 u16 i, nvm_data;
0582
0583 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
0584 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
0585 if (ret_val) {
0586 e_dbg("NVM Read Error while updating checksum.\n");
0587 return ret_val;
0588 }
0589 checksum += nvm_data;
0590 }
0591 checksum = (u16)NVM_SUM - checksum;
0592 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
0593 if (ret_val)
0594 e_dbg("NVM Write Error while updating checksum.\n");
0595
0596 return ret_val;
0597 }
0598
0599
0600
0601
0602
0603
0604
0605
0606 void e1000e_reload_nvm_generic(struct e1000_hw *hw)
0607 {
0608 u32 ctrl_ext;
0609
0610 usleep_range(10, 20);
0611 ctrl_ext = er32(CTRL_EXT);
0612 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
0613 ew32(CTRL_EXT, ctrl_ext);
0614 e1e_flush();
0615 }