Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright (c)  2018 Intel Corporation */
0003 
0004 #include <linux/delay.h>
0005 
0006 #include "igc_hw.h"
0007 
0008 /**
0009  * igc_acquire_nvm_i225 - Acquire exclusive access to EEPROM
0010  * @hw: pointer to the HW structure
0011  *
0012  * Acquire the necessary semaphores for exclusive access to the EEPROM.
0013  * Set the EEPROM access request bit and wait for EEPROM access grant bit.
0014  * Return successful if access grant bit set, else clear the request for
0015  * EEPROM access and return -IGC_ERR_NVM (-1).
0016  */
0017 static s32 igc_acquire_nvm_i225(struct igc_hw *hw)
0018 {
0019     return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
0020 }
0021 
0022 /**
0023  * igc_release_nvm_i225 - Release exclusive access to EEPROM
0024  * @hw: pointer to the HW structure
0025  *
0026  * Stop any current commands to the EEPROM and clear the EEPROM request bit,
0027  * then release the semaphores acquired.
0028  */
0029 static void igc_release_nvm_i225(struct igc_hw *hw)
0030 {
0031     igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
0032 }
0033 
0034 /**
0035  * igc_get_hw_semaphore_i225 - Acquire hardware semaphore
0036  * @hw: pointer to the HW structure
0037  *
0038  * Acquire the HW semaphore to access the PHY or NVM
0039  */
0040 static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw)
0041 {
0042     s32 timeout = hw->nvm.word_size + 1;
0043     s32 i = 0;
0044     u32 swsm;
0045 
0046     /* Get the SW semaphore */
0047     while (i < timeout) {
0048         swsm = rd32(IGC_SWSM);
0049         if (!(swsm & IGC_SWSM_SMBI))
0050             break;
0051 
0052         usleep_range(500, 600);
0053         i++;
0054     }
0055 
0056     if (i == timeout) {
0057         /* In rare circumstances, the SW semaphore may already be held
0058          * unintentionally. Clear the semaphore once before giving up.
0059          */
0060         if (hw->dev_spec._base.clear_semaphore_once) {
0061             hw->dev_spec._base.clear_semaphore_once = false;
0062             igc_put_hw_semaphore(hw);
0063             for (i = 0; i < timeout; i++) {
0064                 swsm = rd32(IGC_SWSM);
0065                 if (!(swsm & IGC_SWSM_SMBI))
0066                     break;
0067 
0068                 usleep_range(500, 600);
0069             }
0070         }
0071 
0072         /* If we do not have the semaphore here, we have to give up. */
0073         if (i == timeout) {
0074             hw_dbg("Driver can't access device - SMBI bit is set.\n");
0075             return -IGC_ERR_NVM;
0076         }
0077     }
0078 
0079     /* Get the FW semaphore. */
0080     for (i = 0; i < timeout; i++) {
0081         swsm = rd32(IGC_SWSM);
0082         wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI);
0083 
0084         /* Semaphore acquired if bit latched */
0085         if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI)
0086             break;
0087 
0088         usleep_range(500, 600);
0089     }
0090 
0091     if (i == timeout) {
0092         /* Release semaphores */
0093         igc_put_hw_semaphore(hw);
0094         hw_dbg("Driver can't access the NVM\n");
0095         return -IGC_ERR_NVM;
0096     }
0097 
0098     return 0;
0099 }
0100 
0101 /**
0102  * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore
0103  * @hw: pointer to the HW structure
0104  * @mask: specifies which semaphore to acquire
0105  *
0106  * Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
0107  * will also specify which port we're acquiring the lock for.
0108  */
0109 s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask)
0110 {
0111     s32 i = 0, timeout = 200;
0112     u32 fwmask = mask << 16;
0113     u32 swmask = mask;
0114     s32 ret_val = 0;
0115     u32 swfw_sync;
0116 
0117     while (i < timeout) {
0118         if (igc_get_hw_semaphore_i225(hw)) {
0119             ret_val = -IGC_ERR_SWFW_SYNC;
0120             goto out;
0121         }
0122 
0123         swfw_sync = rd32(IGC_SW_FW_SYNC);
0124         if (!(swfw_sync & (fwmask | swmask)))
0125             break;
0126 
0127         /* Firmware currently using resource (fwmask) */
0128         igc_put_hw_semaphore(hw);
0129         mdelay(5);
0130         i++;
0131     }
0132 
0133     if (i == timeout) {
0134         hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
0135         ret_val = -IGC_ERR_SWFW_SYNC;
0136         goto out;
0137     }
0138 
0139     swfw_sync |= swmask;
0140     wr32(IGC_SW_FW_SYNC, swfw_sync);
0141 
0142     igc_put_hw_semaphore(hw);
0143 out:
0144     return ret_val;
0145 }
0146 
0147 /**
0148  * igc_release_swfw_sync_i225 - Release SW/FW semaphore
0149  * @hw: pointer to the HW structure
0150  * @mask: specifies which semaphore to acquire
0151  *
0152  * Release the SW/FW semaphore used to access the PHY or NVM.  The mask
0153  * will also specify which port we're releasing the lock for.
0154  */
0155 void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
0156 {
0157     u32 swfw_sync;
0158 
0159     /* Releasing the resource requires first getting the HW semaphore.
0160      * If we fail to get the semaphore, there is nothing we can do,
0161      * except log an error and quit. We are not allowed to hang here
0162      * indefinitely, as it may cause denial of service or system crash.
0163      */
0164     if (igc_get_hw_semaphore_i225(hw)) {
0165         hw_dbg("Failed to release SW_FW_SYNC.\n");
0166         return;
0167     }
0168 
0169     swfw_sync = rd32(IGC_SW_FW_SYNC);
0170     swfw_sync &= ~mask;
0171     wr32(IGC_SW_FW_SYNC, swfw_sync);
0172 
0173     igc_put_hw_semaphore(hw);
0174 }
0175 
0176 /**
0177  * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register
0178  * @hw: pointer to the HW structure
0179  * @offset: offset of word in the Shadow Ram to read
0180  * @words: number of words to read
0181  * @data: word read from the Shadow Ram
0182  *
0183  * Reads a 16 bit word from the Shadow Ram using the EERD register.
0184  * Uses necessary synchronization semaphores.
0185  */
0186 static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words,
0187                   u16 *data)
0188 {
0189     s32 status = 0;
0190     u16 i, count;
0191 
0192     /* We cannot hold synchronization semaphores for too long,
0193      * because of forceful takeover procedure. However it is more efficient
0194      * to read in bursts than synchronizing access for each word.
0195      */
0196     for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
0197         count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
0198             IGC_EERD_EEWR_MAX_COUNT : (words - i);
0199 
0200         status = hw->nvm.ops.acquire(hw);
0201         if (status)
0202             break;
0203 
0204         status = igc_read_nvm_eerd(hw, offset, count, data + i);
0205         hw->nvm.ops.release(hw);
0206         if (status)
0207             break;
0208     }
0209 
0210     return status;
0211 }
0212 
0213 /**
0214  * igc_write_nvm_srwr - Write to Shadow Ram using EEWR
0215  * @hw: pointer to the HW structure
0216  * @offset: offset within the Shadow Ram to be written to
0217  * @words: number of words to write
0218  * @data: 16 bit word(s) to be written to the Shadow Ram
0219  *
0220  * Writes data to Shadow Ram at offset using EEWR register.
0221  *
0222  * If igc_update_nvm_checksum is not called after this function , the
0223  * Shadow Ram will most likely contain an invalid checksum.
0224  */
0225 static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
0226                   u16 *data)
0227 {
0228     struct igc_nvm_info *nvm = &hw->nvm;
0229     s32 ret_val = -IGC_ERR_NVM;
0230     u32 attempts = 100000;
0231     u32 i, k, eewr = 0;
0232 
0233     /* A check for invalid values:  offset too large, too many words,
0234      * too many words for the offset, and not enough words.
0235      */
0236     if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
0237         words == 0) {
0238         hw_dbg("nvm parameter(s) out of bounds\n");
0239         return ret_val;
0240     }
0241 
0242     for (i = 0; i < words; i++) {
0243         ret_val = -IGC_ERR_NVM;
0244         eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) |
0245             (data[i] << IGC_NVM_RW_REG_DATA) |
0246             IGC_NVM_RW_REG_START;
0247 
0248         wr32(IGC_SRWR, eewr);
0249 
0250         for (k = 0; k < attempts; k++) {
0251             if (IGC_NVM_RW_REG_DONE &
0252                 rd32(IGC_SRWR)) {
0253                 ret_val = 0;
0254                 break;
0255             }
0256             udelay(5);
0257         }
0258 
0259         if (ret_val) {
0260             hw_dbg("Shadow RAM write EEWR timed out\n");
0261             break;
0262         }
0263     }
0264 
0265     return ret_val;
0266 }
0267 
0268 /**
0269  * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR
0270  * @hw: pointer to the HW structure
0271  * @offset: offset within the Shadow RAM to be written to
0272  * @words: number of words to write
0273  * @data: 16 bit word(s) to be written to the Shadow RAM
0274  *
0275  * Writes data to Shadow RAM at offset using EEWR register.
0276  *
0277  * If igc_update_nvm_checksum is not called after this function , the
0278  * data will not be committed to FLASH and also Shadow RAM will most likely
0279  * contain an invalid checksum.
0280  *
0281  * If error code is returned, data and Shadow RAM may be inconsistent - buffer
0282  * partially written.
0283  */
0284 static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words,
0285                    u16 *data)
0286 {
0287     s32 status = 0;
0288     u16 i, count;
0289 
0290     /* We cannot hold synchronization semaphores for too long,
0291      * because of forceful takeover procedure. However it is more efficient
0292      * to write in bursts than synchronizing access for each word.
0293      */
0294     for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
0295         count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
0296             IGC_EERD_EEWR_MAX_COUNT : (words - i);
0297 
0298         status = hw->nvm.ops.acquire(hw);
0299         if (status)
0300             break;
0301 
0302         status = igc_write_nvm_srwr(hw, offset, count, data + i);
0303         hw->nvm.ops.release(hw);
0304         if (status)
0305             break;
0306     }
0307 
0308     return status;
0309 }
0310 
0311 /**
0312  * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum
0313  * @hw: pointer to the HW structure
0314  *
0315  * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
0316  * and then verifies that the sum of the EEPROM is equal to 0xBABA.
0317  */
0318 static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw)
0319 {
0320     s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count,
0321                u16 *data);
0322     s32 status = 0;
0323 
0324     status = hw->nvm.ops.acquire(hw);
0325     if (status)
0326         goto out;
0327 
0328     /* Replace the read function with semaphore grabbing with
0329      * the one that skips this for a while.
0330      * We have semaphore taken already here.
0331      */
0332     read_op_ptr = hw->nvm.ops.read;
0333     hw->nvm.ops.read = igc_read_nvm_eerd;
0334 
0335     status = igc_validate_nvm_checksum(hw);
0336 
0337     /* Revert original read operation. */
0338     hw->nvm.ops.read = read_op_ptr;
0339 
0340     hw->nvm.ops.release(hw);
0341 
0342 out:
0343     return status;
0344 }
0345 
0346 /**
0347  * igc_pool_flash_update_done_i225 - Pool FLUDONE status
0348  * @hw: pointer to the HW structure
0349  */
0350 static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw)
0351 {
0352     s32 ret_val = -IGC_ERR_NVM;
0353     u32 i, reg;
0354 
0355     for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) {
0356         reg = rd32(IGC_EECD);
0357         if (reg & IGC_EECD_FLUDONE_I225) {
0358             ret_val = 0;
0359             break;
0360         }
0361         udelay(5);
0362     }
0363 
0364     return ret_val;
0365 }
0366 
0367 /**
0368  * igc_update_flash_i225 - Commit EEPROM to the flash
0369  * @hw: pointer to the HW structure
0370  */
0371 static s32 igc_update_flash_i225(struct igc_hw *hw)
0372 {
0373     s32 ret_val = 0;
0374     u32 flup;
0375 
0376     ret_val = igc_pool_flash_update_done_i225(hw);
0377     if (ret_val == -IGC_ERR_NVM) {
0378         hw_dbg("Flash update time out\n");
0379         goto out;
0380     }
0381 
0382     flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225;
0383     wr32(IGC_EECD, flup);
0384 
0385     ret_val = igc_pool_flash_update_done_i225(hw);
0386     if (ret_val)
0387         hw_dbg("Flash update time out\n");
0388     else
0389         hw_dbg("Flash update complete\n");
0390 
0391 out:
0392     return ret_val;
0393 }
0394 
0395 /**
0396  * igc_update_nvm_checksum_i225 - Update EEPROM checksum
0397  * @hw: pointer to the HW structure
0398  *
0399  * Updates the EEPROM checksum by reading/adding each word of the EEPROM
0400  * up to the checksum.  Then calculates the EEPROM checksum and writes the
0401  * value to the EEPROM. Next commit EEPROM data onto the Flash.
0402  */
0403 static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
0404 {
0405     u16 checksum = 0;
0406     s32 ret_val = 0;
0407     u16 i, nvm_data;
0408 
0409     /* Read the first word from the EEPROM. If this times out or fails, do
0410      * not continue or we could be in for a very long wait while every
0411      * EEPROM read fails
0412      */
0413     ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data);
0414     if (ret_val) {
0415         hw_dbg("EEPROM read failed\n");
0416         goto out;
0417     }
0418 
0419     ret_val = hw->nvm.ops.acquire(hw);
0420     if (ret_val)
0421         goto out;
0422 
0423     /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
0424      * because we do not want to take the synchronization
0425      * semaphores twice here.
0426      */
0427 
0428     for (i = 0; i < NVM_CHECKSUM_REG; i++) {
0429         ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data);
0430         if (ret_val) {
0431             hw->nvm.ops.release(hw);
0432             hw_dbg("NVM Read Error while updating checksum.\n");
0433             goto out;
0434         }
0435         checksum += nvm_data;
0436     }
0437     checksum = (u16)NVM_SUM - checksum;
0438     ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
0439                      &checksum);
0440     if (ret_val) {
0441         hw->nvm.ops.release(hw);
0442         hw_dbg("NVM Write Error while updating checksum.\n");
0443         goto out;
0444     }
0445 
0446     hw->nvm.ops.release(hw);
0447 
0448     ret_val = igc_update_flash_i225(hw);
0449 
0450 out:
0451     return ret_val;
0452 }
0453 
0454 /**
0455  * igc_get_flash_presence_i225 - Check if flash device is detected
0456  * @hw: pointer to the HW structure
0457  */
0458 bool igc_get_flash_presence_i225(struct igc_hw *hw)
0459 {
0460     bool ret_val = false;
0461     u32 eec = 0;
0462 
0463     eec = rd32(IGC_EECD);
0464     if (eec & IGC_EECD_FLASH_DETECTED_I225)
0465         ret_val = true;
0466 
0467     return ret_val;
0468 }
0469 
0470 /**
0471  * igc_init_nvm_params_i225 - Init NVM func ptrs.
0472  * @hw: pointer to the HW structure
0473  */
0474 s32 igc_init_nvm_params_i225(struct igc_hw *hw)
0475 {
0476     struct igc_nvm_info *nvm = &hw->nvm;
0477 
0478     nvm->ops.acquire = igc_acquire_nvm_i225;
0479     nvm->ops.release = igc_release_nvm_i225;
0480 
0481     /* NVM Function Pointers */
0482     if (igc_get_flash_presence_i225(hw)) {
0483         nvm->ops.read = igc_read_nvm_srrd_i225;
0484         nvm->ops.write = igc_write_nvm_srwr_i225;
0485         nvm->ops.validate = igc_validate_nvm_checksum_i225;
0486         nvm->ops.update = igc_update_nvm_checksum_i225;
0487     } else {
0488         nvm->ops.read = igc_read_nvm_eerd;
0489         nvm->ops.write = NULL;
0490         nvm->ops.validate = NULL;
0491         nvm->ops.update = NULL;
0492     }
0493     return 0;
0494 }
0495 
0496 /**
0497  *  igc_set_eee_i225 - Enable/disable EEE support
0498  *  @hw: pointer to the HW structure
0499  *  @adv2p5G: boolean flag enabling 2.5G EEE advertisement
0500  *  @adv1G: boolean flag enabling 1G EEE advertisement
0501  *  @adv100M: boolean flag enabling 100M EEE advertisement
0502  *
0503  *  Enable/disable EEE based on setting in dev_spec structure.
0504  **/
0505 s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G,
0506              bool adv100M)
0507 {
0508     u32 ipcnfg, eeer;
0509 
0510     ipcnfg = rd32(IGC_IPCNFG);
0511     eeer = rd32(IGC_EEER);
0512 
0513     /* enable or disable per user setting */
0514     if (hw->dev_spec._base.eee_enable) {
0515         u32 eee_su = rd32(IGC_EEE_SU);
0516 
0517         if (adv100M)
0518             ipcnfg |= IGC_IPCNFG_EEE_100M_AN;
0519         else
0520             ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN;
0521 
0522         if (adv1G)
0523             ipcnfg |= IGC_IPCNFG_EEE_1G_AN;
0524         else
0525             ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN;
0526 
0527         if (adv2p5G)
0528             ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN;
0529         else
0530             ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN;
0531 
0532         eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
0533              IGC_EEER_LPI_FC);
0534 
0535         /* This bit should not be set in normal operation. */
0536         if (eee_su & IGC_EEE_SU_LPI_CLK_STP)
0537             hw_dbg("LPI Clock Stop Bit should not be set!\n");
0538     } else {
0539         ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN |
0540                 IGC_IPCNFG_EEE_100M_AN);
0541         eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
0542               IGC_EEER_LPI_FC);
0543     }
0544     wr32(IGC_IPCNFG, ipcnfg);
0545     wr32(IGC_EEER, eeer);
0546     rd32(IGC_IPCNFG);
0547     rd32(IGC_EEER);
0548 
0549     return IGC_SUCCESS;
0550 }
0551 
0552 /* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds
0553  * @hw: pointer to the HW structure
0554  * @link: bool indicating link status
0555  *
0556  * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC
0557  * settings, otherwise specify that there is no LTR requirement.
0558  */
0559 s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
0560 {
0561     u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max;
0562     u16 speed, duplex;
0563     s32 size;
0564 
0565     /* If we do not have link, LTR thresholds are zero. */
0566     if (link) {
0567         hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
0568 
0569         /* Check if using copper interface with EEE enabled or if the
0570          * link speed is 10 Mbps.
0571          */
0572         if (hw->dev_spec._base.eee_enable &&
0573             speed != SPEED_10) {
0574             /* EEE enabled, so send LTRMAX threshold. */
0575             ltrc = rd32(IGC_LTRC) |
0576                    IGC_LTRC_EEEMS_EN;
0577             wr32(IGC_LTRC, ltrc);
0578 
0579             /* Calculate tw_system (nsec). */
0580             if (speed == SPEED_100) {
0581                 tw_system = ((rd32(IGC_EEE_SU) &
0582                          IGC_TW_SYSTEM_100_MASK) >>
0583                          IGC_TW_SYSTEM_100_SHIFT) * 500;
0584             } else {
0585                 tw_system = (rd32(IGC_EEE_SU) &
0586                          IGC_TW_SYSTEM_1000_MASK) * 500;
0587             }
0588         } else {
0589             tw_system = 0;
0590         }
0591 
0592         /* Get the Rx packet buffer size. */
0593         size = rd32(IGC_RXPBS) &
0594                IGC_RXPBS_SIZE_I225_MASK;
0595 
0596         /* Calculations vary based on DMAC settings. */
0597         if (rd32(IGC_DMACR) & IGC_DMACR_DMAC_EN) {
0598             size -= (rd32(IGC_DMACR) &
0599                  IGC_DMACR_DMACTHR_MASK) >>
0600                  IGC_DMACR_DMACTHR_SHIFT;
0601             /* Convert size to bits. */
0602             size *= 1024 * 8;
0603         } else {
0604             /* Convert size to bytes, subtract the MTU, and then
0605              * convert the size to bits.
0606              */
0607             size *= 1024;
0608             size *= 8;
0609         }
0610 
0611         if (size < 0) {
0612             hw_dbg("Invalid effective Rx buffer size %d\n",
0613                    size);
0614             return -IGC_ERR_CONFIG;
0615         }
0616 
0617         /* Calculate the thresholds. Since speed is in Mbps, simplify
0618          * the calculation by multiplying size/speed by 1000 for result
0619          * to be in nsec before dividing by the scale in nsec. Set the
0620          * scale such that the LTR threshold fits in the register.
0621          */
0622         ltr_min = (1000 * size) / speed;
0623         ltr_max = ltr_min + tw_system;
0624         scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 :
0625                 IGC_LTRMINV_SCALE_32768;
0626         scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 :
0627                 IGC_LTRMAXV_SCALE_32768;
0628         ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768;
0629         ltr_min -= 1;
0630         ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768;
0631         ltr_max -= 1;
0632 
0633         /* Only write the LTR thresholds if they differ from before. */
0634         ltrv = rd32(IGC_LTRMINV);
0635         if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) {
0636             ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min |
0637                    (scale_min << IGC_LTRMINV_SCALE_SHIFT);
0638             wr32(IGC_LTRMINV, ltrv);
0639         }
0640 
0641         ltrv = rd32(IGC_LTRMAXV);
0642         if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
0643             ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
0644                    (scale_max << IGC_LTRMAXV_SCALE_SHIFT);
0645             wr32(IGC_LTRMAXV, ltrv);
0646         }
0647     }
0648 
0649     return IGC_SUCCESS;
0650 }