Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright(c) 2007 - 2018 Intel Corporation. */
0003 
0004 /* e1000_i210
0005  * e1000_i211
0006  */
0007 
0008 #include <linux/types.h>
0009 #include <linux/if_ether.h>
0010 
0011 #include "e1000_hw.h"
0012 #include "e1000_i210.h"
0013 
0014 static s32 igb_update_flash_i210(struct e1000_hw *hw);
0015 
0016 /**
0017  * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
0018  *  @hw: pointer to the HW structure
0019  *
0020  *  Acquire the HW semaphore to access the PHY or NVM
0021  */
0022 static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
0023 {
0024     u32 swsm;
0025     s32 timeout = hw->nvm.word_size + 1;
0026     s32 i = 0;
0027 
0028     /* Get the SW semaphore */
0029     while (i < timeout) {
0030         swsm = rd32(E1000_SWSM);
0031         if (!(swsm & E1000_SWSM_SMBI))
0032             break;
0033 
0034         udelay(50);
0035         i++;
0036     }
0037 
0038     if (i == timeout) {
0039         /* In rare circumstances, the SW semaphore may already be held
0040          * unintentionally. Clear the semaphore once before giving up.
0041          */
0042         if (hw->dev_spec._82575.clear_semaphore_once) {
0043             hw->dev_spec._82575.clear_semaphore_once = false;
0044             igb_put_hw_semaphore(hw);
0045             for (i = 0; i < timeout; i++) {
0046                 swsm = rd32(E1000_SWSM);
0047                 if (!(swsm & E1000_SWSM_SMBI))
0048                     break;
0049 
0050                 udelay(50);
0051             }
0052         }
0053 
0054         /* If we do not have the semaphore here, we have to give up. */
0055         if (i == timeout) {
0056             hw_dbg("Driver can't access device - SMBI bit is set.\n");
0057             return -E1000_ERR_NVM;
0058         }
0059     }
0060 
0061     /* Get the FW semaphore. */
0062     for (i = 0; i < timeout; i++) {
0063         swsm = rd32(E1000_SWSM);
0064         wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
0065 
0066         /* Semaphore acquired if bit latched */
0067         if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
0068             break;
0069 
0070         udelay(50);
0071     }
0072 
0073     if (i == timeout) {
0074         /* Release semaphores */
0075         igb_put_hw_semaphore(hw);
0076         hw_dbg("Driver can't access the NVM\n");
0077         return -E1000_ERR_NVM;
0078     }
0079 
0080     return 0;
0081 }
0082 
0083 /**
0084  *  igb_acquire_nvm_i210 - Request for access to EEPROM
0085  *  @hw: pointer to the HW structure
0086  *
0087  *  Acquire the necessary semaphores for exclusive access to the EEPROM.
0088  *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
0089  *  Return successful if access grant bit set, else clear the request for
0090  *  EEPROM access and return -E1000_ERR_NVM (-1).
0091  **/
0092 static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
0093 {
0094     return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
0095 }
0096 
0097 /**
0098  *  igb_release_nvm_i210 - Release exclusive access to EEPROM
0099  *  @hw: pointer to the HW structure
0100  *
0101  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
0102  *  then release the semaphores acquired.
0103  **/
0104 static void igb_release_nvm_i210(struct e1000_hw *hw)
0105 {
0106     igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
0107 }
0108 
0109 /**
0110  *  igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
0111  *  @hw: pointer to the HW structure
0112  *  @mask: specifies which semaphore to acquire
0113  *
0114  *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
0115  *  will also specify which port we're acquiring the lock for.
0116  **/
0117 s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
0118 {
0119     u32 swfw_sync;
0120     u32 swmask = mask;
0121     u32 fwmask = mask << 16;
0122     s32 ret_val = 0;
0123     s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
0124 
0125     while (i < timeout) {
0126         if (igb_get_hw_semaphore_i210(hw)) {
0127             ret_val = -E1000_ERR_SWFW_SYNC;
0128             goto out;
0129         }
0130 
0131         swfw_sync = rd32(E1000_SW_FW_SYNC);
0132         if (!(swfw_sync & (fwmask | swmask)))
0133             break;
0134 
0135         /* Firmware currently using resource (fwmask) */
0136         igb_put_hw_semaphore(hw);
0137         mdelay(5);
0138         i++;
0139     }
0140 
0141     if (i == timeout) {
0142         hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
0143         ret_val = -E1000_ERR_SWFW_SYNC;
0144         goto out;
0145     }
0146 
0147     swfw_sync |= swmask;
0148     wr32(E1000_SW_FW_SYNC, swfw_sync);
0149 
0150     igb_put_hw_semaphore(hw);
0151 out:
0152     return ret_val;
0153 }
0154 
0155 /**
0156  *  igb_release_swfw_sync_i210 - Release SW/FW semaphore
0157  *  @hw: pointer to the HW structure
0158  *  @mask: specifies which semaphore to acquire
0159  *
0160  *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
0161  *  will also specify which port we're releasing the lock for.
0162  **/
0163 void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
0164 {
0165     u32 swfw_sync;
0166 
0167     while (igb_get_hw_semaphore_i210(hw))
0168         ; /* Empty */
0169 
0170     swfw_sync = rd32(E1000_SW_FW_SYNC);
0171     swfw_sync &= ~mask;
0172     wr32(E1000_SW_FW_SYNC, swfw_sync);
0173 
0174     igb_put_hw_semaphore(hw);
0175 }
0176 
0177 /**
0178  *  igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
0179  *  @hw: pointer to the HW structure
0180  *  @offset: offset of word in the Shadow Ram to read
0181  *  @words: number of words to read
0182  *  @data: word read from the Shadow Ram
0183  *
0184  *  Reads a 16 bit word from the Shadow Ram using the EERD register.
0185  *  Uses necessary synchronization semaphores.
0186  **/
0187 static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
0188                   u16 *data)
0189 {
0190     s32 status = 0;
0191     u16 i, count;
0192 
0193     /* We cannot hold synchronization semaphores for too long,
0194      * because of forceful takeover procedure. However it is more efficient
0195      * to read in bursts than synchronizing access for each word.
0196      */
0197     for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
0198         count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
0199             E1000_EERD_EEWR_MAX_COUNT : (words - i);
0200         if (!(hw->nvm.ops.acquire(hw))) {
0201             status = igb_read_nvm_eerd(hw, offset, count,
0202                              data + i);
0203             hw->nvm.ops.release(hw);
0204         } else {
0205             status = E1000_ERR_SWFW_SYNC;
0206         }
0207 
0208         if (status)
0209             break;
0210     }
0211 
0212     return status;
0213 }
0214 
0215 /**
0216  *  igb_write_nvm_srwr - Write to Shadow Ram using EEWR
0217  *  @hw: pointer to the HW structure
0218  *  @offset: offset within the Shadow Ram to be written to
0219  *  @words: number of words to write
0220  *  @data: 16 bit word(s) to be written to the Shadow Ram
0221  *
0222  *  Writes data to Shadow Ram at offset using EEWR register.
0223  *
0224  *  If igb_update_nvm_checksum is not called after this function , the
0225  *  Shadow Ram will most likely contain an invalid checksum.
0226  **/
0227 static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
0228                 u16 *data)
0229 {
0230     struct e1000_nvm_info *nvm = &hw->nvm;
0231     u32 i, k, eewr = 0;
0232     u32 attempts = 100000;
0233     s32 ret_val = 0;
0234 
0235     /* A check for invalid values:  offset too large, too many words,
0236      * too many words for the offset, and not enough words.
0237      */
0238     if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
0239         (words == 0)) {
0240         hw_dbg("nvm parameter(s) out of bounds\n");
0241         ret_val = -E1000_ERR_NVM;
0242         goto out;
0243     }
0244 
0245     for (i = 0; i < words; i++) {
0246         eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
0247             (data[i] << E1000_NVM_RW_REG_DATA) |
0248             E1000_NVM_RW_REG_START;
0249 
0250         wr32(E1000_SRWR, eewr);
0251 
0252         for (k = 0; k < attempts; k++) {
0253             if (E1000_NVM_RW_REG_DONE &
0254                 rd32(E1000_SRWR)) {
0255                 ret_val = 0;
0256                 break;
0257             }
0258             udelay(5);
0259     }
0260 
0261         if (ret_val) {
0262             hw_dbg("Shadow RAM write EEWR timed out\n");
0263             break;
0264         }
0265     }
0266 
0267 out:
0268     return ret_val;
0269 }
0270 
0271 /**
0272  *  igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
0273  *  @hw: pointer to the HW structure
0274  *  @offset: offset within the Shadow RAM to be written to
0275  *  @words: number of words to write
0276  *  @data: 16 bit word(s) to be written to the Shadow RAM
0277  *
0278  *  Writes data to Shadow RAM at offset using EEWR register.
0279  *
0280  *  If e1000_update_nvm_checksum is not called after this function , the
0281  *  data will not be committed to FLASH and also Shadow RAM will most likely
0282  *  contain an invalid checksum.
0283  *
0284  *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
0285  *  partially written.
0286  **/
0287 static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
0288                    u16 *data)
0289 {
0290     s32 status = 0;
0291     u16 i, count;
0292 
0293     /* We cannot hold synchronization semaphores for too long,
0294      * because of forceful takeover procedure. However it is more efficient
0295      * to write in bursts than synchronizing access for each word.
0296      */
0297     for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
0298         count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
0299             E1000_EERD_EEWR_MAX_COUNT : (words - i);
0300         if (!(hw->nvm.ops.acquire(hw))) {
0301             status = igb_write_nvm_srwr(hw, offset, count,
0302                               data + i);
0303             hw->nvm.ops.release(hw);
0304         } else {
0305             status = E1000_ERR_SWFW_SYNC;
0306         }
0307 
0308         if (status)
0309             break;
0310     }
0311 
0312     return status;
0313 }
0314 
0315 /**
0316  *  igb_read_invm_word_i210 - Reads OTP
0317  *  @hw: pointer to the HW structure
0318  *  @address: the word address (aka eeprom offset) to read
0319  *  @data: pointer to the data read
0320  *
0321  *  Reads 16-bit words from the OTP. Return error when the word is not
0322  *  stored in OTP.
0323  **/
0324 static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
0325 {
0326     s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
0327     u32 invm_dword;
0328     u16 i;
0329     u8 record_type, word_address;
0330 
0331     for (i = 0; i < E1000_INVM_SIZE; i++) {
0332         invm_dword = rd32(E1000_INVM_DATA_REG(i));
0333         /* Get record type */
0334         record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
0335         if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
0336             break;
0337         if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
0338             i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
0339         if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
0340             i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
0341         if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
0342             word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
0343             if (word_address == address) {
0344                 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
0345                 hw_dbg("Read INVM Word 0x%02x = %x\n",
0346                       address, *data);
0347                 status = 0;
0348                 break;
0349             }
0350         }
0351     }
0352     if (status)
0353         hw_dbg("Requested word 0x%02x not found in OTP\n", address);
0354     return status;
0355 }
0356 
0357 /**
0358  * igb_read_invm_i210 - Read invm wrapper function for I210/I211
0359  *  @hw: pointer to the HW structure
0360  *  @offset: offset to read from
0361  *  @words: number of words to read (unused)
0362  *  @data: pointer to the data read
0363  *
0364  *  Wrapper function to return data formerly found in the NVM.
0365  **/
0366 static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
0367                 u16 __always_unused words, u16 *data)
0368 {
0369     s32 ret_val = 0;
0370 
0371     /* Only the MAC addr is required to be present in the iNVM */
0372     switch (offset) {
0373     case NVM_MAC_ADDR:
0374         ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
0375         ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
0376                              &data[1]);
0377         ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
0378                              &data[2]);
0379         if (ret_val)
0380             hw_dbg("MAC Addr not found in iNVM\n");
0381         break;
0382     case NVM_INIT_CTRL_2:
0383         ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
0384         if (ret_val) {
0385             *data = NVM_INIT_CTRL_2_DEFAULT_I211;
0386             ret_val = 0;
0387         }
0388         break;
0389     case NVM_INIT_CTRL_4:
0390         ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
0391         if (ret_val) {
0392             *data = NVM_INIT_CTRL_4_DEFAULT_I211;
0393             ret_val = 0;
0394         }
0395         break;
0396     case NVM_LED_1_CFG:
0397         ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
0398         if (ret_val) {
0399             *data = NVM_LED_1_CFG_DEFAULT_I211;
0400             ret_val = 0;
0401         }
0402         break;
0403     case NVM_LED_0_2_CFG:
0404         ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
0405         if (ret_val) {
0406             *data = NVM_LED_0_2_CFG_DEFAULT_I211;
0407             ret_val = 0;
0408         }
0409         break;
0410     case NVM_ID_LED_SETTINGS:
0411         ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
0412         if (ret_val) {
0413             *data = ID_LED_RESERVED_FFFF;
0414             ret_val = 0;
0415         }
0416         break;
0417     case NVM_SUB_DEV_ID:
0418         *data = hw->subsystem_device_id;
0419         break;
0420     case NVM_SUB_VEN_ID:
0421         *data = hw->subsystem_vendor_id;
0422         break;
0423     case NVM_DEV_ID:
0424         *data = hw->device_id;
0425         break;
0426     case NVM_VEN_ID:
0427         *data = hw->vendor_id;
0428         break;
0429     default:
0430         hw_dbg("NVM word 0x%02x is not mapped.\n", offset);
0431         *data = NVM_RESERVED_WORD;
0432         break;
0433     }
0434     return ret_val;
0435 }
0436 
0437 /**
0438  *  igb_read_invm_version - Reads iNVM version and image type
0439  *  @hw: pointer to the HW structure
0440  *  @invm_ver: version structure for the version read
0441  *
0442  *  Reads iNVM version and image type.
0443  **/
0444 s32 igb_read_invm_version(struct e1000_hw *hw,
0445               struct e1000_fw_version *invm_ver) {
0446     u32 *record = NULL;
0447     u32 *next_record = NULL;
0448     u32 i = 0;
0449     u32 invm_dword = 0;
0450     u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
0451                          E1000_INVM_RECORD_SIZE_IN_BYTES);
0452     u32 buffer[E1000_INVM_SIZE];
0453     s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
0454     u16 version = 0;
0455 
0456     /* Read iNVM memory */
0457     for (i = 0; i < E1000_INVM_SIZE; i++) {
0458         invm_dword = rd32(E1000_INVM_DATA_REG(i));
0459         buffer[i] = invm_dword;
0460     }
0461 
0462     /* Read version number */
0463     for (i = 1; i < invm_blocks; i++) {
0464         record = &buffer[invm_blocks - i];
0465         next_record = &buffer[invm_blocks - i + 1];
0466 
0467         /* Check if we have first version location used */
0468         if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
0469             version = 0;
0470             status = 0;
0471             break;
0472         }
0473         /* Check if we have second version location used */
0474         else if ((i == 1) &&
0475              ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
0476             version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
0477             status = 0;
0478             break;
0479         }
0480         /* Check if we have odd version location
0481          * used and it is the last one used
0482          */
0483         else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
0484              ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
0485              (i != 1))) {
0486             version = (*next_record & E1000_INVM_VER_FIELD_TWO)
0487                   >> 13;
0488             status = 0;
0489             break;
0490         }
0491         /* Check if we have even version location
0492          * used and it is the last one used
0493          */
0494         else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
0495              ((*record & 0x3) == 0)) {
0496             version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
0497             status = 0;
0498             break;
0499         }
0500     }
0501 
0502     if (!status) {
0503         invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
0504                     >> E1000_INVM_MAJOR_SHIFT;
0505         invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
0506     }
0507     /* Read Image Type */
0508     for (i = 1; i < invm_blocks; i++) {
0509         record = &buffer[invm_blocks - i];
0510         next_record = &buffer[invm_blocks - i + 1];
0511 
0512         /* Check if we have image type in first location used */
0513         if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
0514             invm_ver->invm_img_type = 0;
0515             status = 0;
0516             break;
0517         }
0518         /* Check if we have image type in first location used */
0519         else if ((((*record & 0x3) == 0) &&
0520              ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
0521              ((((*record & 0x3) != 0) && (i != 1)))) {
0522             invm_ver->invm_img_type =
0523                 (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
0524             status = 0;
0525             break;
0526         }
0527     }
0528     return status;
0529 }
0530 
0531 /**
0532  *  igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
0533  *  @hw: pointer to the HW structure
0534  *
0535  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
0536  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
0537  **/
0538 static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
0539 {
0540     s32 status = 0;
0541     s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
0542 
0543     if (!(hw->nvm.ops.acquire(hw))) {
0544 
0545         /* Replace the read function with semaphore grabbing with
0546          * the one that skips this for a while.
0547          * We have semaphore taken already here.
0548          */
0549         read_op_ptr = hw->nvm.ops.read;
0550         hw->nvm.ops.read = igb_read_nvm_eerd;
0551 
0552         status = igb_validate_nvm_checksum(hw);
0553 
0554         /* Revert original read operation. */
0555         hw->nvm.ops.read = read_op_ptr;
0556 
0557         hw->nvm.ops.release(hw);
0558     } else {
0559         status = E1000_ERR_SWFW_SYNC;
0560     }
0561 
0562     return status;
0563 }
0564 
0565 /**
0566  *  igb_update_nvm_checksum_i210 - Update EEPROM checksum
0567  *  @hw: pointer to the HW structure
0568  *
0569  *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
0570  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
0571  *  value to the EEPROM. Next commit EEPROM data onto the Flash.
0572  **/
0573 static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
0574 {
0575     s32 ret_val = 0;
0576     u16 checksum = 0;
0577     u16 i, nvm_data;
0578 
0579     /* Read the first word from the EEPROM. If this times out or fails, do
0580      * not continue or we could be in for a very long wait while every
0581      * EEPROM read fails
0582      */
0583     ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
0584     if (ret_val) {
0585         hw_dbg("EEPROM read failed\n");
0586         goto out;
0587     }
0588 
0589     if (!(hw->nvm.ops.acquire(hw))) {
0590         /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
0591          * because we do not want to take the synchronization
0592          * semaphores twice here.
0593          */
0594 
0595         for (i = 0; i < NVM_CHECKSUM_REG; i++) {
0596             ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data);
0597             if (ret_val) {
0598                 hw->nvm.ops.release(hw);
0599                 hw_dbg("NVM Read Error while updating checksum.\n");
0600                 goto out;
0601             }
0602             checksum += nvm_data;
0603         }
0604         checksum = (u16) NVM_SUM - checksum;
0605         ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
0606                         &checksum);
0607         if (ret_val) {
0608             hw->nvm.ops.release(hw);
0609             hw_dbg("NVM Write Error while updating checksum.\n");
0610             goto out;
0611         }
0612 
0613         hw->nvm.ops.release(hw);
0614 
0615         ret_val = igb_update_flash_i210(hw);
0616     } else {
0617         ret_val = -E1000_ERR_SWFW_SYNC;
0618     }
0619 out:
0620     return ret_val;
0621 }
0622 
0623 /**
0624  *  igb_pool_flash_update_done_i210 - Pool FLUDONE status.
0625  *  @hw: pointer to the HW structure
0626  *
0627  **/
0628 static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
0629 {
0630     s32 ret_val = -E1000_ERR_NVM;
0631     u32 i, reg;
0632 
0633     for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
0634         reg = rd32(E1000_EECD);
0635         if (reg & E1000_EECD_FLUDONE_I210) {
0636             ret_val = 0;
0637             break;
0638         }
0639         udelay(5);
0640     }
0641 
0642     return ret_val;
0643 }
0644 
0645 /**
0646  *  igb_get_flash_presence_i210 - Check if flash device is detected.
0647  *  @hw: pointer to the HW structure
0648  *
0649  **/
0650 bool igb_get_flash_presence_i210(struct e1000_hw *hw)
0651 {
0652     u32 eec = 0;
0653     bool ret_val = false;
0654 
0655     eec = rd32(E1000_EECD);
0656     if (eec & E1000_EECD_FLASH_DETECTED_I210)
0657         ret_val = true;
0658 
0659     return ret_val;
0660 }
0661 
0662 /**
0663  *  igb_update_flash_i210 - Commit EEPROM to the flash
0664  *  @hw: pointer to the HW structure
0665  *
0666  **/
0667 static s32 igb_update_flash_i210(struct e1000_hw *hw)
0668 {
0669     s32 ret_val = 0;
0670     u32 flup;
0671 
0672     ret_val = igb_pool_flash_update_done_i210(hw);
0673     if (ret_val == -E1000_ERR_NVM) {
0674         hw_dbg("Flash update time out\n");
0675         goto out;
0676     }
0677 
0678     flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210;
0679     wr32(E1000_EECD, flup);
0680 
0681     ret_val = igb_pool_flash_update_done_i210(hw);
0682     if (ret_val)
0683         hw_dbg("Flash update time out\n");
0684     else
0685         hw_dbg("Flash update complete\n");
0686 
0687 out:
0688     return ret_val;
0689 }
0690 
0691 /**
0692  *  igb_valid_led_default_i210 - Verify a valid default LED config
0693  *  @hw: pointer to the HW structure
0694  *  @data: pointer to the NVM (EEPROM)
0695  *
0696  *  Read the EEPROM for the current default LED configuration.  If the
0697  *  LED configuration is not valid, set to a valid LED configuration.
0698  **/
0699 s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
0700 {
0701     s32 ret_val;
0702 
0703     ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
0704     if (ret_val) {
0705         hw_dbg("NVM Read Error\n");
0706         goto out;
0707     }
0708 
0709     if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
0710         switch (hw->phy.media_type) {
0711         case e1000_media_type_internal_serdes:
0712             *data = ID_LED_DEFAULT_I210_SERDES;
0713             break;
0714         case e1000_media_type_copper:
0715         default:
0716             *data = ID_LED_DEFAULT_I210;
0717             break;
0718         }
0719     }
0720 out:
0721     return ret_val;
0722 }
0723 
0724 /**
0725  *  __igb_access_xmdio_reg - Read/write XMDIO register
0726  *  @hw: pointer to the HW structure
0727  *  @address: XMDIO address to program
0728  *  @dev_addr: device address to program
0729  *  @data: pointer to value to read/write from/to the XMDIO address
0730  *  @read: boolean flag to indicate read or write
0731  **/
0732 static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
0733                   u8 dev_addr, u16 *data, bool read)
0734 {
0735     s32 ret_val = 0;
0736 
0737     ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
0738     if (ret_val)
0739         return ret_val;
0740 
0741     ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
0742     if (ret_val)
0743         return ret_val;
0744 
0745     ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
0746                              dev_addr);
0747     if (ret_val)
0748         return ret_val;
0749 
0750     if (read)
0751         ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
0752     else
0753         ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
0754     if (ret_val)
0755         return ret_val;
0756 
0757     /* Recalibrate the device back to 0 */
0758     ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
0759     if (ret_val)
0760         return ret_val;
0761 
0762     return ret_val;
0763 }
0764 
0765 /**
0766  *  igb_read_xmdio_reg - Read XMDIO register
0767  *  @hw: pointer to the HW structure
0768  *  @addr: XMDIO address to program
0769  *  @dev_addr: device address to program
0770  *  @data: value to be read from the EMI address
0771  **/
0772 s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
0773 {
0774     return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true);
0775 }
0776 
0777 /**
0778  *  igb_write_xmdio_reg - Write XMDIO register
0779  *  @hw: pointer to the HW structure
0780  *  @addr: XMDIO address to program
0781  *  @dev_addr: device address to program
0782  *  @data: value to be written to the XMDIO address
0783  **/
0784 s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
0785 {
0786     return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
0787 }
0788 
0789 /**
0790  *  igb_init_nvm_params_i210 - Init NVM func ptrs.
0791  *  @hw: pointer to the HW structure
0792  **/
0793 s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
0794 {
0795     struct e1000_nvm_info *nvm = &hw->nvm;
0796 
0797     nvm->ops.acquire = igb_acquire_nvm_i210;
0798     nvm->ops.release = igb_release_nvm_i210;
0799     nvm->ops.valid_led_default = igb_valid_led_default_i210;
0800 
0801     /* NVM Function Pointers */
0802     if (igb_get_flash_presence_i210(hw)) {
0803         hw->nvm.type = e1000_nvm_flash_hw;
0804         nvm->ops.read    = igb_read_nvm_srrd_i210;
0805         nvm->ops.write   = igb_write_nvm_srwr_i210;
0806         nvm->ops.validate = igb_validate_nvm_checksum_i210;
0807         nvm->ops.update   = igb_update_nvm_checksum_i210;
0808     } else {
0809         hw->nvm.type = e1000_nvm_invm;
0810         nvm->ops.read     = igb_read_invm_i210;
0811         nvm->ops.write    = NULL;
0812         nvm->ops.validate = NULL;
0813         nvm->ops.update   = NULL;
0814     }
0815     return 0;
0816 }
0817 
0818 /**
0819  * igb_pll_workaround_i210
0820  * @hw: pointer to the HW structure
0821  *
0822  * Works around an errata in the PLL circuit where it occasionally
0823  * provides the wrong clock frequency after power up.
0824  **/
0825 s32 igb_pll_workaround_i210(struct e1000_hw *hw)
0826 {
0827     s32 ret_val;
0828     u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
0829     u16 nvm_word, phy_word, pci_word, tmp_nvm;
0830     int i;
0831 
0832     /* Get and set needed register values */
0833     wuc = rd32(E1000_WUC);
0834     mdicnfg = rd32(E1000_MDICNFG);
0835     reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
0836     wr32(E1000_MDICNFG, reg_val);
0837 
0838     /* Get data from NVM, or set default */
0839     ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
0840                       &nvm_word);
0841     if (ret_val)
0842         nvm_word = E1000_INVM_DEFAULT_AL;
0843     tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
0844     igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE);
0845     phy_word = E1000_PHY_PLL_UNCONF;
0846     for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
0847         /* check current state directly from internal PHY */
0848         igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
0849         if ((phy_word & E1000_PHY_PLL_UNCONF)
0850             != E1000_PHY_PLL_UNCONF) {
0851             ret_val = 0;
0852             break;
0853         } else {
0854             ret_val = -E1000_ERR_PHY;
0855         }
0856         /* directly reset the internal PHY */
0857         ctrl = rd32(E1000_CTRL);
0858         wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
0859 
0860         ctrl_ext = rd32(E1000_CTRL_EXT);
0861         ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
0862         wr32(E1000_CTRL_EXT, ctrl_ext);
0863 
0864         wr32(E1000_WUC, 0);
0865         reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
0866         wr32(E1000_EEARBC_I210, reg_val);
0867 
0868         igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
0869         pci_word |= E1000_PCI_PMCSR_D3;
0870         igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
0871         usleep_range(1000, 2000);
0872         pci_word &= ~E1000_PCI_PMCSR_D3;
0873         igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
0874         reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
0875         wr32(E1000_EEARBC_I210, reg_val);
0876 
0877         /* restore WUC register */
0878         wr32(E1000_WUC, wuc);
0879     }
0880     igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
0881     /* restore MDICNFG setting */
0882     wr32(E1000_MDICNFG, mdicnfg);
0883     return ret_val;
0884 }
0885 
0886 /**
0887  *  igb_get_cfg_done_i210 - Read config done bit
0888  *  @hw: pointer to the HW structure
0889  *
0890  *  Read the management control register for the config done bit for
0891  *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
0892  *  to read the config done bit, so an error is *ONLY* logged and returns
0893  *  0.  If we were to return with error, EEPROM-less silicon
0894  *  would not be able to be reset or change link.
0895  **/
0896 s32 igb_get_cfg_done_i210(struct e1000_hw *hw)
0897 {
0898     s32 timeout = PHY_CFG_TIMEOUT;
0899     u32 mask = E1000_NVM_CFG_DONE_PORT_0;
0900 
0901     while (timeout) {
0902         if (rd32(E1000_EEMNGCTL_I210) & mask)
0903             break;
0904         usleep_range(1000, 2000);
0905         timeout--;
0906     }
0907     if (!timeout)
0908         hw_dbg("MNG configuration cycle has not completed.\n");
0909 
0910     return 0;
0911 }