Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright(c) 2013 - 2018 Intel Corporation. */
0003 
0004 #include "i40e_prototype.h"
0005 
0006 /**
0007  * i40e_init_nvm - Initialize NVM function pointers
0008  * @hw: pointer to the HW structure
0009  *
0010  * Setup the function pointers and the NVM info structure. Should be called
0011  * once per NVM initialization, e.g. inside the i40e_init_shared_code().
0012  * Please notice that the NVM term is used here (& in all methods covered
0013  * in this file) as an equivalent of the FLASH part mapped into the SR.
0014  * We are accessing FLASH always thru the Shadow RAM.
0015  **/
0016 i40e_status i40e_init_nvm(struct i40e_hw *hw)
0017 {
0018     struct i40e_nvm_info *nvm = &hw->nvm;
0019     i40e_status ret_code = 0;
0020     u32 fla, gens;
0021     u8 sr_size;
0022 
0023     /* The SR size is stored regardless of the nvm programming mode
0024      * as the blank mode may be used in the factory line.
0025      */
0026     gens = rd32(hw, I40E_GLNVM_GENS);
0027     sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
0028                I40E_GLNVM_GENS_SR_SIZE_SHIFT);
0029     /* Switching to words (sr_size contains power of 2KB) */
0030     nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
0031 
0032     /* Check if we are in the normal or blank NVM programming mode */
0033     fla = rd32(hw, I40E_GLNVM_FLA);
0034     if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
0035         /* Max NVM timeout */
0036         nvm->timeout = I40E_MAX_NVM_TIMEOUT;
0037         nvm->blank_nvm_mode = false;
0038     } else { /* Blank programming mode */
0039         nvm->blank_nvm_mode = true;
0040         ret_code = I40E_ERR_NVM_BLANK_MODE;
0041         i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
0042     }
0043 
0044     return ret_code;
0045 }
0046 
0047 /**
0048  * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
0049  * @hw: pointer to the HW structure
0050  * @access: NVM access type (read or write)
0051  *
0052  * This function will request NVM ownership for reading
0053  * via the proper Admin Command.
0054  **/
0055 i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
0056                        enum i40e_aq_resource_access_type access)
0057 {
0058     i40e_status ret_code = 0;
0059     u64 gtime, timeout;
0060     u64 time_left = 0;
0061 
0062     if (hw->nvm.blank_nvm_mode)
0063         goto i40e_i40e_acquire_nvm_exit;
0064 
0065     ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
0066                         0, &time_left, NULL);
0067     /* Reading the Global Device Timer */
0068     gtime = rd32(hw, I40E_GLVFGEN_TIMER);
0069 
0070     /* Store the timeout */
0071     hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
0072 
0073     if (ret_code)
0074         i40e_debug(hw, I40E_DEBUG_NVM,
0075                "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
0076                access, time_left, ret_code, hw->aq.asq_last_status);
0077 
0078     if (ret_code && time_left) {
0079         /* Poll until the current NVM owner timeouts */
0080         timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
0081         while ((gtime < timeout) && time_left) {
0082             usleep_range(10000, 20000);
0083             gtime = rd32(hw, I40E_GLVFGEN_TIMER);
0084             ret_code = i40e_aq_request_resource(hw,
0085                             I40E_NVM_RESOURCE_ID,
0086                             access, 0, &time_left,
0087                             NULL);
0088             if (!ret_code) {
0089                 hw->nvm.hw_semaphore_timeout =
0090                         I40E_MS_TO_GTIME(time_left) + gtime;
0091                 break;
0092             }
0093         }
0094         if (ret_code) {
0095             hw->nvm.hw_semaphore_timeout = 0;
0096             i40e_debug(hw, I40E_DEBUG_NVM,
0097                    "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
0098                    time_left, ret_code, hw->aq.asq_last_status);
0099         }
0100     }
0101 
0102 i40e_i40e_acquire_nvm_exit:
0103     return ret_code;
0104 }
0105 
0106 /**
0107  * i40e_release_nvm - Generic request for releasing the NVM ownership
0108  * @hw: pointer to the HW structure
0109  *
0110  * This function will release NVM resource via the proper Admin Command.
0111  **/
0112 void i40e_release_nvm(struct i40e_hw *hw)
0113 {
0114     i40e_status ret_code = I40E_SUCCESS;
0115     u32 total_delay = 0;
0116 
0117     if (hw->nvm.blank_nvm_mode)
0118         return;
0119 
0120     ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
0121 
0122     /* there are some rare cases when trying to release the resource
0123      * results in an admin Q timeout, so handle them correctly
0124      */
0125     while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
0126            (total_delay < hw->aq.asq_cmd_timeout)) {
0127         usleep_range(1000, 2000);
0128         ret_code = i40e_aq_release_resource(hw,
0129                             I40E_NVM_RESOURCE_ID,
0130                             0, NULL);
0131         total_delay++;
0132     }
0133 }
0134 
0135 /**
0136  * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
0137  * @hw: pointer to the HW structure
0138  *
0139  * Polls the SRCTL Shadow RAM register done bit.
0140  **/
0141 static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
0142 {
0143     i40e_status ret_code = I40E_ERR_TIMEOUT;
0144     u32 srctl, wait_cnt;
0145 
0146     /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
0147     for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
0148         srctl = rd32(hw, I40E_GLNVM_SRCTL);
0149         if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
0150             ret_code = 0;
0151             break;
0152         }
0153         udelay(5);
0154     }
0155     if (ret_code == I40E_ERR_TIMEOUT)
0156         i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
0157     return ret_code;
0158 }
0159 
0160 /**
0161  * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
0162  * @hw: pointer to the HW structure
0163  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
0164  * @data: word read from the Shadow RAM
0165  *
0166  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
0167  **/
0168 static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
0169                         u16 *data)
0170 {
0171     i40e_status ret_code = I40E_ERR_TIMEOUT;
0172     u32 sr_reg;
0173 
0174     if (offset >= hw->nvm.sr_size) {
0175         i40e_debug(hw, I40E_DEBUG_NVM,
0176                "NVM read error: offset %d beyond Shadow RAM limit %d\n",
0177                offset, hw->nvm.sr_size);
0178         ret_code = I40E_ERR_PARAM;
0179         goto read_nvm_exit;
0180     }
0181 
0182     /* Poll the done bit first */
0183     ret_code = i40e_poll_sr_srctl_done_bit(hw);
0184     if (!ret_code) {
0185         /* Write the address and start reading */
0186         sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
0187              BIT(I40E_GLNVM_SRCTL_START_SHIFT);
0188         wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
0189 
0190         /* Poll I40E_GLNVM_SRCTL until the done bit is set */
0191         ret_code = i40e_poll_sr_srctl_done_bit(hw);
0192         if (!ret_code) {
0193             sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
0194             *data = (u16)((sr_reg &
0195                        I40E_GLNVM_SRDATA_RDDATA_MASK)
0196                     >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
0197         }
0198     }
0199     if (ret_code)
0200         i40e_debug(hw, I40E_DEBUG_NVM,
0201                "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
0202                offset);
0203 
0204 read_nvm_exit:
0205     return ret_code;
0206 }
0207 
0208 /**
0209  * i40e_read_nvm_aq - Read Shadow RAM.
0210  * @hw: pointer to the HW structure.
0211  * @module_pointer: module pointer location in words from the NVM beginning
0212  * @offset: offset in words from module start
0213  * @words: number of words to write
0214  * @data: buffer with words to write to the Shadow RAM
0215  * @last_command: tells the AdminQ that this is the last command
0216  *
0217  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
0218  **/
0219 static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
0220                     u8 module_pointer, u32 offset,
0221                     u16 words, void *data,
0222                     bool last_command)
0223 {
0224     i40e_status ret_code = I40E_ERR_NVM;
0225     struct i40e_asq_cmd_details cmd_details;
0226 
0227     memset(&cmd_details, 0, sizeof(cmd_details));
0228     cmd_details.wb_desc = &hw->nvm_wb_desc;
0229 
0230     /* Here we are checking the SR limit only for the flat memory model.
0231      * We cannot do it for the module-based model, as we did not acquire
0232      * the NVM resource yet (we cannot get the module pointer value).
0233      * Firmware will check the module-based model.
0234      */
0235     if ((offset + words) > hw->nvm.sr_size)
0236         i40e_debug(hw, I40E_DEBUG_NVM,
0237                "NVM write error: offset %d beyond Shadow RAM limit %d\n",
0238                (offset + words), hw->nvm.sr_size);
0239     else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
0240         /* We can write only up to 4KB (one sector), in one AQ write */
0241         i40e_debug(hw, I40E_DEBUG_NVM,
0242                "NVM write fail error: tried to write %d words, limit is %d.\n",
0243                words, I40E_SR_SECTOR_SIZE_IN_WORDS);
0244     else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
0245          != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
0246         /* A single write cannot spread over two sectors */
0247         i40e_debug(hw, I40E_DEBUG_NVM,
0248                "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
0249                offset, words);
0250     else
0251         ret_code = i40e_aq_read_nvm(hw, module_pointer,
0252                         2 * offset,  /*bytes*/
0253                         2 * words,   /*bytes*/
0254                         data, last_command, &cmd_details);
0255 
0256     return ret_code;
0257 }
0258 
0259 /**
0260  * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
0261  * @hw: pointer to the HW structure
0262  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
0263  * @data: word read from the Shadow RAM
0264  *
0265  * Reads one 16 bit word from the Shadow RAM using the AdminQ
0266  **/
0267 static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
0268                      u16 *data)
0269 {
0270     i40e_status ret_code = I40E_ERR_TIMEOUT;
0271 
0272     ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
0273     *data = le16_to_cpu(*(__le16 *)data);
0274 
0275     return ret_code;
0276 }
0277 
0278 /**
0279  * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
0280  * @hw: pointer to the HW structure
0281  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
0282  * @data: word read from the Shadow RAM
0283  *
0284  * Reads one 16 bit word from the Shadow RAM.
0285  *
0286  * Do not use this function except in cases where the nvm lock is already
0287  * taken via i40e_acquire_nvm().
0288  **/
0289 static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
0290                     u16 offset, u16 *data)
0291 {
0292     if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
0293         return i40e_read_nvm_word_aq(hw, offset, data);
0294 
0295     return i40e_read_nvm_word_srctl(hw, offset, data);
0296 }
0297 
0298 /**
0299  * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
0300  * @hw: pointer to the HW structure
0301  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
0302  * @data: word read from the Shadow RAM
0303  *
0304  * Reads one 16 bit word from the Shadow RAM.
0305  **/
0306 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
0307                    u16 *data)
0308 {
0309     i40e_status ret_code = 0;
0310 
0311     if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
0312         ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
0313     if (ret_code)
0314         return ret_code;
0315 
0316     ret_code = __i40e_read_nvm_word(hw, offset, data);
0317 
0318     if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
0319         i40e_release_nvm(hw);
0320 
0321     return ret_code;
0322 }
0323 
0324 /**
0325  * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
0326  * @hw: Pointer to the HW structure
0327  * @module_ptr: Pointer to module in words with respect to NVM beginning
0328  * @module_offset: Offset in words from module start
0329  * @data_offset: Offset in words from reading data area start
0330  * @words_data_size: Words to read from NVM
0331  * @data_ptr: Pointer to memory location where resulting buffer will be stored
0332  **/
0333 enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
0334                         u8 module_ptr,
0335                         u16 module_offset,
0336                         u16 data_offset,
0337                         u16 words_data_size,
0338                         u16 *data_ptr)
0339 {
0340     i40e_status status;
0341     u16 specific_ptr = 0;
0342     u16 ptr_value = 0;
0343     u32 offset = 0;
0344 
0345     if (module_ptr != 0) {
0346         status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
0347         if (status) {
0348             i40e_debug(hw, I40E_DEBUG_ALL,
0349                    "Reading nvm word failed.Error code: %d.\n",
0350                    status);
0351             return I40E_ERR_NVM;
0352         }
0353     }
0354 #define I40E_NVM_INVALID_PTR_VAL 0x7FFF
0355 #define I40E_NVM_INVALID_VAL 0xFFFF
0356 
0357     /* Pointer not initialized */
0358     if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
0359         ptr_value == I40E_NVM_INVALID_VAL) {
0360         i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
0361         return I40E_ERR_BAD_PTR;
0362     }
0363 
0364     /* Check whether the module is in SR mapped area or outside */
0365     if (ptr_value & I40E_PTR_TYPE) {
0366         /* Pointer points outside of the Shared RAM mapped area */
0367         i40e_debug(hw, I40E_DEBUG_ALL,
0368                "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
0369 
0370         return I40E_ERR_PARAM;
0371     } else {
0372         /* Read from the Shadow RAM */
0373 
0374         status = i40e_read_nvm_word(hw, ptr_value + module_offset,
0375                         &specific_ptr);
0376         if (status) {
0377             i40e_debug(hw, I40E_DEBUG_ALL,
0378                    "Reading nvm word failed.Error code: %d.\n",
0379                    status);
0380             return I40E_ERR_NVM;
0381         }
0382 
0383         offset = ptr_value + module_offset + specific_ptr +
0384             data_offset;
0385 
0386         status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
0387                           data_ptr);
0388         if (status) {
0389             i40e_debug(hw, I40E_DEBUG_ALL,
0390                    "Reading nvm buffer failed.Error code: %d.\n",
0391                    status);
0392         }
0393     }
0394 
0395     return status;
0396 }
0397 
0398 /**
0399  * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
0400  * @hw: pointer to the HW structure
0401  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
0402  * @words: (in) number of words to read; (out) number of words actually read
0403  * @data: words read from the Shadow RAM
0404  *
0405  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
0406  * method. The buffer read is preceded by the NVM ownership take
0407  * and followed by the release.
0408  **/
0409 static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
0410                           u16 *words, u16 *data)
0411 {
0412     i40e_status ret_code = 0;
0413     u16 index, word;
0414 
0415     /* Loop thru the selected region */
0416     for (word = 0; word < *words; word++) {
0417         index = offset + word;
0418         ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
0419         if (ret_code)
0420             break;
0421     }
0422 
0423     /* Update the number of words read from the Shadow RAM */
0424     *words = word;
0425 
0426     return ret_code;
0427 }
0428 
0429 /**
0430  * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
0431  * @hw: pointer to the HW structure
0432  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
0433  * @words: (in) number of words to read; (out) number of words actually read
0434  * @data: words read from the Shadow RAM
0435  *
0436  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
0437  * method. The buffer read is preceded by the NVM ownership take
0438  * and followed by the release.
0439  **/
0440 static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
0441                        u16 *words, u16 *data)
0442 {
0443     i40e_status ret_code;
0444     u16 read_size;
0445     bool last_cmd = false;
0446     u16 words_read = 0;
0447     u16 i = 0;
0448 
0449     do {
0450         /* Calculate number of bytes we should read in this step.
0451          * FVL AQ do not allow to read more than one page at a time or
0452          * to cross page boundaries.
0453          */
0454         if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
0455             read_size = min(*words,
0456                     (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
0457                       (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
0458         else
0459             read_size = min((*words - words_read),
0460                     I40E_SR_SECTOR_SIZE_IN_WORDS);
0461 
0462         /* Check if this is last command, if so set proper flag */
0463         if ((words_read + read_size) >= *words)
0464             last_cmd = true;
0465 
0466         ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
0467                         data + words_read, last_cmd);
0468         if (ret_code)
0469             goto read_nvm_buffer_aq_exit;
0470 
0471         /* Increment counter for words already read and move offset to
0472          * new read location
0473          */
0474         words_read += read_size;
0475         offset += read_size;
0476     } while (words_read < *words);
0477 
0478     for (i = 0; i < *words; i++)
0479         data[i] = le16_to_cpu(((__le16 *)data)[i]);
0480 
0481 read_nvm_buffer_aq_exit:
0482     *words = words_read;
0483     return ret_code;
0484 }
0485 
0486 /**
0487  * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
0488  * @hw: pointer to the HW structure
0489  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
0490  * @words: (in) number of words to read; (out) number of words actually read
0491  * @data: words read from the Shadow RAM
0492  *
0493  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
0494  * method.
0495  **/
0496 static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
0497                       u16 offset, u16 *words,
0498                       u16 *data)
0499 {
0500     if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
0501         return i40e_read_nvm_buffer_aq(hw, offset, words, data);
0502 
0503     return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
0504 }
0505 
0506 /**
0507  * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
0508  * @hw: pointer to the HW structure
0509  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
0510  * @words: (in) number of words to read; (out) number of words actually read
0511  * @data: words read from the Shadow RAM
0512  *
0513  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
0514  * method. The buffer read is preceded by the NVM ownership take
0515  * and followed by the release.
0516  **/
0517 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
0518                  u16 *words, u16 *data)
0519 {
0520     i40e_status ret_code = 0;
0521 
0522     if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
0523         ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
0524         if (!ret_code) {
0525             ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
0526                                data);
0527             i40e_release_nvm(hw);
0528         }
0529     } else {
0530         ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
0531     }
0532 
0533     return ret_code;
0534 }
0535 
0536 /**
0537  * i40e_write_nvm_aq - Writes Shadow RAM.
0538  * @hw: pointer to the HW structure.
0539  * @module_pointer: module pointer location in words from the NVM beginning
0540  * @offset: offset in words from module start
0541  * @words: number of words to write
0542  * @data: buffer with words to write to the Shadow RAM
0543  * @last_command: tells the AdminQ that this is the last command
0544  *
0545  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
0546  **/
0547 static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
0548                      u32 offset, u16 words, void *data,
0549                      bool last_command)
0550 {
0551     i40e_status ret_code = I40E_ERR_NVM;
0552     struct i40e_asq_cmd_details cmd_details;
0553 
0554     memset(&cmd_details, 0, sizeof(cmd_details));
0555     cmd_details.wb_desc = &hw->nvm_wb_desc;
0556 
0557     /* Here we are checking the SR limit only for the flat memory model.
0558      * We cannot do it for the module-based model, as we did not acquire
0559      * the NVM resource yet (we cannot get the module pointer value).
0560      * Firmware will check the module-based model.
0561      */
0562     if ((offset + words) > hw->nvm.sr_size)
0563         i40e_debug(hw, I40E_DEBUG_NVM,
0564                "NVM write error: offset %d beyond Shadow RAM limit %d\n",
0565                (offset + words), hw->nvm.sr_size);
0566     else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
0567         /* We can write only up to 4KB (one sector), in one AQ write */
0568         i40e_debug(hw, I40E_DEBUG_NVM,
0569                "NVM write fail error: tried to write %d words, limit is %d.\n",
0570                words, I40E_SR_SECTOR_SIZE_IN_WORDS);
0571     else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
0572          != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
0573         /* A single write cannot spread over two sectors */
0574         i40e_debug(hw, I40E_DEBUG_NVM,
0575                "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
0576                offset, words);
0577     else
0578         ret_code = i40e_aq_update_nvm(hw, module_pointer,
0579                           2 * offset,  /*bytes*/
0580                           2 * words,   /*bytes*/
0581                           data, last_command, 0,
0582                           &cmd_details);
0583 
0584     return ret_code;
0585 }
0586 
0587 /**
0588  * i40e_calc_nvm_checksum - Calculates and returns the checksum
0589  * @hw: pointer to hardware structure
0590  * @checksum: pointer to the checksum
0591  *
0592  * This function calculates SW Checksum that covers the whole 64kB shadow RAM
0593  * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
0594  * is customer specific and unknown. Therefore, this function skips all maximum
0595  * possible size of VPD (1kB).
0596  **/
0597 static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
0598                             u16 *checksum)
0599 {
0600     i40e_status ret_code;
0601     struct i40e_virt_mem vmem;
0602     u16 pcie_alt_module = 0;
0603     u16 checksum_local = 0;
0604     u16 vpd_module = 0;
0605     u16 *data;
0606     u16 i = 0;
0607 
0608     ret_code = i40e_allocate_virt_mem(hw, &vmem,
0609                     I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
0610     if (ret_code)
0611         goto i40e_calc_nvm_checksum_exit;
0612     data = (u16 *)vmem.va;
0613 
0614     /* read pointer to VPD area */
0615     ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
0616     if (ret_code) {
0617         ret_code = I40E_ERR_NVM_CHECKSUM;
0618         goto i40e_calc_nvm_checksum_exit;
0619     }
0620 
0621     /* read pointer to PCIe Alt Auto-load module */
0622     ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
0623                     &pcie_alt_module);
0624     if (ret_code) {
0625         ret_code = I40E_ERR_NVM_CHECKSUM;
0626         goto i40e_calc_nvm_checksum_exit;
0627     }
0628 
0629     /* Calculate SW checksum that covers the whole 64kB shadow RAM
0630      * except the VPD and PCIe ALT Auto-load modules
0631      */
0632     for (i = 0; i < hw->nvm.sr_size; i++) {
0633         /* Read SR page */
0634         if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
0635             u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
0636 
0637             ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
0638             if (ret_code) {
0639                 ret_code = I40E_ERR_NVM_CHECKSUM;
0640                 goto i40e_calc_nvm_checksum_exit;
0641             }
0642         }
0643 
0644         /* Skip Checksum word */
0645         if (i == I40E_SR_SW_CHECKSUM_WORD)
0646             continue;
0647         /* Skip VPD module (convert byte size to word count) */
0648         if ((i >= (u32)vpd_module) &&
0649             (i < ((u32)vpd_module +
0650              (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
0651             continue;
0652         }
0653         /* Skip PCIe ALT module (convert byte size to word count) */
0654         if ((i >= (u32)pcie_alt_module) &&
0655             (i < ((u32)pcie_alt_module +
0656              (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
0657             continue;
0658         }
0659 
0660         checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
0661     }
0662 
0663     *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
0664 
0665 i40e_calc_nvm_checksum_exit:
0666     i40e_free_virt_mem(hw, &vmem);
0667     return ret_code;
0668 }
0669 
0670 /**
0671  * i40e_update_nvm_checksum - Updates the NVM checksum
0672  * @hw: pointer to hardware structure
0673  *
0674  * NVM ownership must be acquired before calling this function and released
0675  * on ARQ completion event reception by caller.
0676  * This function will commit SR to NVM.
0677  **/
0678 i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
0679 {
0680     i40e_status ret_code;
0681     u16 checksum;
0682     __le16 le_sum;
0683 
0684     ret_code = i40e_calc_nvm_checksum(hw, &checksum);
0685     if (!ret_code) {
0686         le_sum = cpu_to_le16(checksum);
0687         ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
0688                          1, &le_sum, true);
0689     }
0690 
0691     return ret_code;
0692 }
0693 
0694 /**
0695  * i40e_validate_nvm_checksum - Validate EEPROM checksum
0696  * @hw: pointer to hardware structure
0697  * @checksum: calculated checksum
0698  *
0699  * Performs checksum calculation and validates the NVM SW checksum. If the
0700  * caller does not need checksum, the value can be NULL.
0701  **/
0702 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
0703                          u16 *checksum)
0704 {
0705     i40e_status ret_code = 0;
0706     u16 checksum_sr = 0;
0707     u16 checksum_local = 0;
0708 
0709     /* We must acquire the NVM lock in order to correctly synchronize the
0710      * NVM accesses across multiple PFs. Without doing so it is possible
0711      * for one of the PFs to read invalid data potentially indicating that
0712      * the checksum is invalid.
0713      */
0714     ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
0715     if (ret_code)
0716         return ret_code;
0717     ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
0718     __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
0719     i40e_release_nvm(hw);
0720     if (ret_code)
0721         return ret_code;
0722 
0723     /* Verify read checksum from EEPROM is the same as
0724      * calculated checksum
0725      */
0726     if (checksum_local != checksum_sr)
0727         ret_code = I40E_ERR_NVM_CHECKSUM;
0728 
0729     /* If the user cares, return the calculated checksum */
0730     if (checksum)
0731         *checksum = checksum_local;
0732 
0733     return ret_code;
0734 }
0735 
0736 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
0737                       struct i40e_nvm_access *cmd,
0738                       u8 *bytes, int *perrno);
0739 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
0740                          struct i40e_nvm_access *cmd,
0741                          u8 *bytes, int *perrno);
0742 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
0743                          struct i40e_nvm_access *cmd,
0744                          u8 *bytes, int *errno);
0745 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
0746                         struct i40e_nvm_access *cmd,
0747                         int *perrno);
0748 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
0749                      struct i40e_nvm_access *cmd,
0750                      int *perrno);
0751 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
0752                      struct i40e_nvm_access *cmd,
0753                      u8 *bytes, int *perrno);
0754 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
0755                     struct i40e_nvm_access *cmd,
0756                     u8 *bytes, int *perrno);
0757 static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
0758                        struct i40e_nvm_access *cmd,
0759                        u8 *bytes, int *perrno);
0760 static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
0761                          struct i40e_nvm_access *cmd,
0762                          u8 *bytes, int *perrno);
0763 static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
0764                         struct i40e_nvm_access *cmd,
0765                         u8 *bytes, int *perrno);
0766 static inline u8 i40e_nvmupd_get_module(u32 val)
0767 {
0768     return (u8)(val & I40E_NVM_MOD_PNT_MASK);
0769 }
0770 static inline u8 i40e_nvmupd_get_transaction(u32 val)
0771 {
0772     return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
0773 }
0774 
0775 static inline u8 i40e_nvmupd_get_preservation_flags(u32 val)
0776 {
0777     return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
0778             I40E_NVM_PRESERVATION_FLAGS_SHIFT);
0779 }
0780 
0781 static const char * const i40e_nvm_update_state_str[] = {
0782     "I40E_NVMUPD_INVALID",
0783     "I40E_NVMUPD_READ_CON",
0784     "I40E_NVMUPD_READ_SNT",
0785     "I40E_NVMUPD_READ_LCB",
0786     "I40E_NVMUPD_READ_SA",
0787     "I40E_NVMUPD_WRITE_ERA",
0788     "I40E_NVMUPD_WRITE_CON",
0789     "I40E_NVMUPD_WRITE_SNT",
0790     "I40E_NVMUPD_WRITE_LCB",
0791     "I40E_NVMUPD_WRITE_SA",
0792     "I40E_NVMUPD_CSUM_CON",
0793     "I40E_NVMUPD_CSUM_SA",
0794     "I40E_NVMUPD_CSUM_LCB",
0795     "I40E_NVMUPD_STATUS",
0796     "I40E_NVMUPD_EXEC_AQ",
0797     "I40E_NVMUPD_GET_AQ_RESULT",
0798     "I40E_NVMUPD_GET_AQ_EVENT",
0799 };
0800 
0801 /**
0802  * i40e_nvmupd_command - Process an NVM update command
0803  * @hw: pointer to hardware structure
0804  * @cmd: pointer to nvm update command
0805  * @bytes: pointer to the data buffer
0806  * @perrno: pointer to return error code
0807  *
0808  * Dispatches command depending on what update state is current
0809  **/
0810 i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
0811                 struct i40e_nvm_access *cmd,
0812                 u8 *bytes, int *perrno)
0813 {
0814     i40e_status status;
0815     enum i40e_nvmupd_cmd upd_cmd;
0816 
0817     /* assume success */
0818     *perrno = 0;
0819 
0820     /* early check for status command and debug msgs */
0821     upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
0822 
0823     i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
0824            i40e_nvm_update_state_str[upd_cmd],
0825            hw->nvmupd_state,
0826            hw->nvm_release_on_done, hw->nvm_wait_opcode,
0827            cmd->command, cmd->config, cmd->offset, cmd->data_size);
0828 
0829     if (upd_cmd == I40E_NVMUPD_INVALID) {
0830         *perrno = -EFAULT;
0831         i40e_debug(hw, I40E_DEBUG_NVM,
0832                "i40e_nvmupd_validate_command returns %d errno %d\n",
0833                upd_cmd, *perrno);
0834     }
0835 
0836     /* a status request returns immediately rather than
0837      * going into the state machine
0838      */
0839     if (upd_cmd == I40E_NVMUPD_STATUS) {
0840         if (!cmd->data_size) {
0841             *perrno = -EFAULT;
0842             return I40E_ERR_BUF_TOO_SHORT;
0843         }
0844 
0845         bytes[0] = hw->nvmupd_state;
0846 
0847         if (cmd->data_size >= 4) {
0848             bytes[1] = 0;
0849             *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
0850         }
0851 
0852         /* Clear error status on read */
0853         if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
0854             hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
0855 
0856         return 0;
0857     }
0858 
0859     /* Clear status even it is not read and log */
0860     if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
0861         i40e_debug(hw, I40E_DEBUG_NVM,
0862                "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
0863         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
0864     }
0865 
0866     /* Acquire lock to prevent race condition where adminq_task
0867      * can execute after i40e_nvmupd_nvm_read/write but before state
0868      * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
0869      *
0870      * During NVMUpdate, it is observed that lock could be held for
0871      * ~5ms for most commands. However lock is held for ~60ms for
0872      * NVMUPD_CSUM_LCB command.
0873      */
0874     mutex_lock(&hw->aq.arq_mutex);
0875     switch (hw->nvmupd_state) {
0876     case I40E_NVMUPD_STATE_INIT:
0877         status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
0878         break;
0879 
0880     case I40E_NVMUPD_STATE_READING:
0881         status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
0882         break;
0883 
0884     case I40E_NVMUPD_STATE_WRITING:
0885         status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
0886         break;
0887 
0888     case I40E_NVMUPD_STATE_INIT_WAIT:
0889     case I40E_NVMUPD_STATE_WRITE_WAIT:
0890         /* if we need to stop waiting for an event, clear
0891          * the wait info and return before doing anything else
0892          */
0893         if (cmd->offset == 0xffff) {
0894             i40e_nvmupd_clear_wait_state(hw);
0895             status = 0;
0896             break;
0897         }
0898 
0899         status = I40E_ERR_NOT_READY;
0900         *perrno = -EBUSY;
0901         break;
0902 
0903     default:
0904         /* invalid state, should never happen */
0905         i40e_debug(hw, I40E_DEBUG_NVM,
0906                "NVMUPD: no such state %d\n", hw->nvmupd_state);
0907         status = I40E_NOT_SUPPORTED;
0908         *perrno = -ESRCH;
0909         break;
0910     }
0911 
0912     mutex_unlock(&hw->aq.arq_mutex);
0913     return status;
0914 }
0915 
0916 /**
0917  * i40e_nvmupd_state_init - Handle NVM update state Init
0918  * @hw: pointer to hardware structure
0919  * @cmd: pointer to nvm update command buffer
0920  * @bytes: pointer to the data buffer
0921  * @perrno: pointer to return error code
0922  *
0923  * Process legitimate commands of the Init state and conditionally set next
0924  * state. Reject all other commands.
0925  **/
0926 static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
0927                       struct i40e_nvm_access *cmd,
0928                       u8 *bytes, int *perrno)
0929 {
0930     i40e_status status = 0;
0931     enum i40e_nvmupd_cmd upd_cmd;
0932 
0933     upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
0934 
0935     switch (upd_cmd) {
0936     case I40E_NVMUPD_READ_SA:
0937         status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
0938         if (status) {
0939             *perrno = i40e_aq_rc_to_posix(status,
0940                              hw->aq.asq_last_status);
0941         } else {
0942             status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
0943             i40e_release_nvm(hw);
0944         }
0945         break;
0946 
0947     case I40E_NVMUPD_READ_SNT:
0948         status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
0949         if (status) {
0950             *perrno = i40e_aq_rc_to_posix(status,
0951                              hw->aq.asq_last_status);
0952         } else {
0953             status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
0954             if (status)
0955                 i40e_release_nvm(hw);
0956             else
0957                 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
0958         }
0959         break;
0960 
0961     case I40E_NVMUPD_WRITE_ERA:
0962         status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
0963         if (status) {
0964             *perrno = i40e_aq_rc_to_posix(status,
0965                              hw->aq.asq_last_status);
0966         } else {
0967             status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
0968             if (status) {
0969                 i40e_release_nvm(hw);
0970             } else {
0971                 hw->nvm_release_on_done = true;
0972                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
0973                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
0974             }
0975         }
0976         break;
0977 
0978     case I40E_NVMUPD_WRITE_SA:
0979         status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
0980         if (status) {
0981             *perrno = i40e_aq_rc_to_posix(status,
0982                              hw->aq.asq_last_status);
0983         } else {
0984             status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
0985             if (status) {
0986                 i40e_release_nvm(hw);
0987             } else {
0988                 hw->nvm_release_on_done = true;
0989                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
0990                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
0991             }
0992         }
0993         break;
0994 
0995     case I40E_NVMUPD_WRITE_SNT:
0996         status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
0997         if (status) {
0998             *perrno = i40e_aq_rc_to_posix(status,
0999                              hw->aq.asq_last_status);
1000         } else {
1001             status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1002             if (status) {
1003                 i40e_release_nvm(hw);
1004             } else {
1005                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1006                 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1007             }
1008         }
1009         break;
1010 
1011     case I40E_NVMUPD_CSUM_SA:
1012         status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1013         if (status) {
1014             *perrno = i40e_aq_rc_to_posix(status,
1015                              hw->aq.asq_last_status);
1016         } else {
1017             status = i40e_update_nvm_checksum(hw);
1018             if (status) {
1019                 *perrno = hw->aq.asq_last_status ?
1020                    i40e_aq_rc_to_posix(status,
1021                                hw->aq.asq_last_status) :
1022                    -EIO;
1023                 i40e_release_nvm(hw);
1024             } else {
1025                 hw->nvm_release_on_done = true;
1026                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1027                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1028             }
1029         }
1030         break;
1031 
1032     case I40E_NVMUPD_EXEC_AQ:
1033         status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1034         break;
1035 
1036     case I40E_NVMUPD_GET_AQ_RESULT:
1037         status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1038         break;
1039 
1040     case I40E_NVMUPD_GET_AQ_EVENT:
1041         status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
1042         break;
1043 
1044     default:
1045         i40e_debug(hw, I40E_DEBUG_NVM,
1046                "NVMUPD: bad cmd %s in init state\n",
1047                i40e_nvm_update_state_str[upd_cmd]);
1048         status = I40E_ERR_NVM;
1049         *perrno = -ESRCH;
1050         break;
1051     }
1052     return status;
1053 }
1054 
1055 /**
1056  * i40e_nvmupd_state_reading - Handle NVM update state Reading
1057  * @hw: pointer to hardware structure
1058  * @cmd: pointer to nvm update command buffer
1059  * @bytes: pointer to the data buffer
1060  * @perrno: pointer to return error code
1061  *
1062  * NVM ownership is already held.  Process legitimate commands and set any
1063  * change in state; reject all other commands.
1064  **/
1065 static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
1066                          struct i40e_nvm_access *cmd,
1067                          u8 *bytes, int *perrno)
1068 {
1069     i40e_status status = 0;
1070     enum i40e_nvmupd_cmd upd_cmd;
1071 
1072     upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1073 
1074     switch (upd_cmd) {
1075     case I40E_NVMUPD_READ_SA:
1076     case I40E_NVMUPD_READ_CON:
1077         status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1078         break;
1079 
1080     case I40E_NVMUPD_READ_LCB:
1081         status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1082         i40e_release_nvm(hw);
1083         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1084         break;
1085 
1086     default:
1087         i40e_debug(hw, I40E_DEBUG_NVM,
1088                "NVMUPD: bad cmd %s in reading state.\n",
1089                i40e_nvm_update_state_str[upd_cmd]);
1090         status = I40E_NOT_SUPPORTED;
1091         *perrno = -ESRCH;
1092         break;
1093     }
1094     return status;
1095 }
1096 
1097 /**
1098  * i40e_nvmupd_state_writing - Handle NVM update state Writing
1099  * @hw: pointer to hardware structure
1100  * @cmd: pointer to nvm update command buffer
1101  * @bytes: pointer to the data buffer
1102  * @perrno: pointer to return error code
1103  *
1104  * NVM ownership is already held.  Process legitimate commands and set any
1105  * change in state; reject all other commands
1106  **/
1107 static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
1108                          struct i40e_nvm_access *cmd,
1109                          u8 *bytes, int *perrno)
1110 {
1111     i40e_status status = 0;
1112     enum i40e_nvmupd_cmd upd_cmd;
1113     bool retry_attempt = false;
1114 
1115     upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1116 
1117 retry:
1118     switch (upd_cmd) {
1119     case I40E_NVMUPD_WRITE_CON:
1120         status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1121         if (!status) {
1122             hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1123             hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1124         }
1125         break;
1126 
1127     case I40E_NVMUPD_WRITE_LCB:
1128         status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1129         if (status) {
1130             *perrno = hw->aq.asq_last_status ?
1131                    i40e_aq_rc_to_posix(status,
1132                                hw->aq.asq_last_status) :
1133                    -EIO;
1134             hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1135         } else {
1136             hw->nvm_release_on_done = true;
1137             hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1138             hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1139         }
1140         break;
1141 
1142     case I40E_NVMUPD_CSUM_CON:
1143         /* Assumes the caller has acquired the nvm */
1144         status = i40e_update_nvm_checksum(hw);
1145         if (status) {
1146             *perrno = hw->aq.asq_last_status ?
1147                    i40e_aq_rc_to_posix(status,
1148                                hw->aq.asq_last_status) :
1149                    -EIO;
1150             hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1151         } else {
1152             hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1153             hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1154         }
1155         break;
1156 
1157     case I40E_NVMUPD_CSUM_LCB:
1158         /* Assumes the caller has acquired the nvm */
1159         status = i40e_update_nvm_checksum(hw);
1160         if (status) {
1161             *perrno = hw->aq.asq_last_status ?
1162                    i40e_aq_rc_to_posix(status,
1163                                hw->aq.asq_last_status) :
1164                    -EIO;
1165             hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1166         } else {
1167             hw->nvm_release_on_done = true;
1168             hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1169             hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1170         }
1171         break;
1172 
1173     default:
1174         i40e_debug(hw, I40E_DEBUG_NVM,
1175                "NVMUPD: bad cmd %s in writing state.\n",
1176                i40e_nvm_update_state_str[upd_cmd]);
1177         status = I40E_NOT_SUPPORTED;
1178         *perrno = -ESRCH;
1179         break;
1180     }
1181 
1182     /* In some circumstances, a multi-write transaction takes longer
1183      * than the default 3 minute timeout on the write semaphore.  If
1184      * the write failed with an EBUSY status, this is likely the problem,
1185      * so here we try to reacquire the semaphore then retry the write.
1186      * We only do one retry, then give up.
1187      */
1188     if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1189         !retry_attempt) {
1190         i40e_status old_status = status;
1191         u32 old_asq_status = hw->aq.asq_last_status;
1192         u32 gtime;
1193 
1194         gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1195         if (gtime >= hw->nvm.hw_semaphore_timeout) {
1196             i40e_debug(hw, I40E_DEBUG_ALL,
1197                    "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1198                    gtime, hw->nvm.hw_semaphore_timeout);
1199             i40e_release_nvm(hw);
1200             status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1201             if (status) {
1202                 i40e_debug(hw, I40E_DEBUG_ALL,
1203                        "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1204                        hw->aq.asq_last_status);
1205                 status = old_status;
1206                 hw->aq.asq_last_status = old_asq_status;
1207             } else {
1208                 retry_attempt = true;
1209                 goto retry;
1210             }
1211         }
1212     }
1213 
1214     return status;
1215 }
1216 
1217 /**
1218  * i40e_nvmupd_clear_wait_state - clear wait state on hw
1219  * @hw: pointer to the hardware structure
1220  **/
1221 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
1222 {
1223     i40e_debug(hw, I40E_DEBUG_NVM,
1224            "NVMUPD: clearing wait on opcode 0x%04x\n",
1225            hw->nvm_wait_opcode);
1226 
1227     if (hw->nvm_release_on_done) {
1228         i40e_release_nvm(hw);
1229         hw->nvm_release_on_done = false;
1230     }
1231     hw->nvm_wait_opcode = 0;
1232 
1233     if (hw->aq.arq_last_status) {
1234         hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1235         return;
1236     }
1237 
1238     switch (hw->nvmupd_state) {
1239     case I40E_NVMUPD_STATE_INIT_WAIT:
1240         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1241         break;
1242 
1243     case I40E_NVMUPD_STATE_WRITE_WAIT:
1244         hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1245         break;
1246 
1247     default:
1248         break;
1249     }
1250 }
1251 
1252 /**
1253  * i40e_nvmupd_check_wait_event - handle NVM update operation events
1254  * @hw: pointer to the hardware structure
1255  * @opcode: the event that just happened
1256  * @desc: AdminQ descriptor
1257  **/
1258 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
1259                   struct i40e_aq_desc *desc)
1260 {
1261     u32 aq_desc_len = sizeof(struct i40e_aq_desc);
1262 
1263     if (opcode == hw->nvm_wait_opcode) {
1264         memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
1265         i40e_nvmupd_clear_wait_state(hw);
1266     }
1267 }
1268 
1269 /**
1270  * i40e_nvmupd_validate_command - Validate given command
1271  * @hw: pointer to hardware structure
1272  * @cmd: pointer to nvm update command buffer
1273  * @perrno: pointer to return error code
1274  *
1275  * Return one of the valid command types or I40E_NVMUPD_INVALID
1276  **/
1277 static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1278                          struct i40e_nvm_access *cmd,
1279                          int *perrno)
1280 {
1281     enum i40e_nvmupd_cmd upd_cmd;
1282     u8 module, transaction;
1283 
1284     /* anything that doesn't match a recognized case is an error */
1285     upd_cmd = I40E_NVMUPD_INVALID;
1286 
1287     transaction = i40e_nvmupd_get_transaction(cmd->config);
1288     module = i40e_nvmupd_get_module(cmd->config);
1289 
1290     /* limits on data size */
1291     if ((cmd->data_size < 1) ||
1292         (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1293         i40e_debug(hw, I40E_DEBUG_NVM,
1294                "i40e_nvmupd_validate_command data_size %d\n",
1295                cmd->data_size);
1296         *perrno = -EFAULT;
1297         return I40E_NVMUPD_INVALID;
1298     }
1299 
1300     switch (cmd->command) {
1301     case I40E_NVM_READ:
1302         switch (transaction) {
1303         case I40E_NVM_CON:
1304             upd_cmd = I40E_NVMUPD_READ_CON;
1305             break;
1306         case I40E_NVM_SNT:
1307             upd_cmd = I40E_NVMUPD_READ_SNT;
1308             break;
1309         case I40E_NVM_LCB:
1310             upd_cmd = I40E_NVMUPD_READ_LCB;
1311             break;
1312         case I40E_NVM_SA:
1313             upd_cmd = I40E_NVMUPD_READ_SA;
1314             break;
1315         case I40E_NVM_EXEC:
1316             if (module == 0xf)
1317                 upd_cmd = I40E_NVMUPD_STATUS;
1318             else if (module == 0)
1319                 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1320             break;
1321         case I40E_NVM_AQE:
1322             upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
1323             break;
1324         }
1325         break;
1326 
1327     case I40E_NVM_WRITE:
1328         switch (transaction) {
1329         case I40E_NVM_CON:
1330             upd_cmd = I40E_NVMUPD_WRITE_CON;
1331             break;
1332         case I40E_NVM_SNT:
1333             upd_cmd = I40E_NVMUPD_WRITE_SNT;
1334             break;
1335         case I40E_NVM_LCB:
1336             upd_cmd = I40E_NVMUPD_WRITE_LCB;
1337             break;
1338         case I40E_NVM_SA:
1339             upd_cmd = I40E_NVMUPD_WRITE_SA;
1340             break;
1341         case I40E_NVM_ERA:
1342             upd_cmd = I40E_NVMUPD_WRITE_ERA;
1343             break;
1344         case I40E_NVM_CSUM:
1345             upd_cmd = I40E_NVMUPD_CSUM_CON;
1346             break;
1347         case (I40E_NVM_CSUM|I40E_NVM_SA):
1348             upd_cmd = I40E_NVMUPD_CSUM_SA;
1349             break;
1350         case (I40E_NVM_CSUM|I40E_NVM_LCB):
1351             upd_cmd = I40E_NVMUPD_CSUM_LCB;
1352             break;
1353         case I40E_NVM_EXEC:
1354             if (module == 0)
1355                 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1356             break;
1357         }
1358         break;
1359     }
1360 
1361     return upd_cmd;
1362 }
1363 
1364 /**
1365  * i40e_nvmupd_exec_aq - Run an AQ command
1366  * @hw: pointer to hardware structure
1367  * @cmd: pointer to nvm update command buffer
1368  * @bytes: pointer to the data buffer
1369  * @perrno: pointer to return error code
1370  *
1371  * cmd structure contains identifiers and data buffer
1372  **/
1373 static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1374                        struct i40e_nvm_access *cmd,
1375                        u8 *bytes, int *perrno)
1376 {
1377     struct i40e_asq_cmd_details cmd_details;
1378     i40e_status status;
1379     struct i40e_aq_desc *aq_desc;
1380     u32 buff_size = 0;
1381     u8 *buff = NULL;
1382     u32 aq_desc_len;
1383     u32 aq_data_len;
1384 
1385     i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1386     if (cmd->offset == 0xffff)
1387         return 0;
1388 
1389     memset(&cmd_details, 0, sizeof(cmd_details));
1390     cmd_details.wb_desc = &hw->nvm_wb_desc;
1391 
1392     aq_desc_len = sizeof(struct i40e_aq_desc);
1393     memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1394 
1395     /* get the aq descriptor */
1396     if (cmd->data_size < aq_desc_len) {
1397         i40e_debug(hw, I40E_DEBUG_NVM,
1398                "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1399                cmd->data_size, aq_desc_len);
1400         *perrno = -EINVAL;
1401         return I40E_ERR_PARAM;
1402     }
1403     aq_desc = (struct i40e_aq_desc *)bytes;
1404 
1405     /* if data buffer needed, make sure it's ready */
1406     aq_data_len = cmd->data_size - aq_desc_len;
1407     buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
1408     if (buff_size) {
1409         if (!hw->nvm_buff.va) {
1410             status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1411                             hw->aq.asq_buf_size);
1412             if (status)
1413                 i40e_debug(hw, I40E_DEBUG_NVM,
1414                        "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1415                        status);
1416         }
1417 
1418         if (hw->nvm_buff.va) {
1419             buff = hw->nvm_buff.va;
1420             memcpy(buff, &bytes[aq_desc_len], aq_data_len);
1421         }
1422     }
1423 
1424     if (cmd->offset)
1425         memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
1426 
1427     /* and away we go! */
1428     status = i40e_asq_send_command(hw, aq_desc, buff,
1429                        buff_size, &cmd_details);
1430     if (status) {
1431         i40e_debug(hw, I40E_DEBUG_NVM,
1432                "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1433                i40e_stat_str(hw, status),
1434                i40e_aq_str(hw, hw->aq.asq_last_status));
1435         *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1436         return status;
1437     }
1438 
1439     /* should we wait for a followup event? */
1440     if (cmd->offset) {
1441         hw->nvm_wait_opcode = cmd->offset;
1442         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1443     }
1444 
1445     return status;
1446 }
1447 
1448 /**
1449  * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1450  * @hw: pointer to hardware structure
1451  * @cmd: pointer to nvm update command buffer
1452  * @bytes: pointer to the data buffer
1453  * @perrno: pointer to return error code
1454  *
1455  * cmd structure contains identifiers and data buffer
1456  **/
1457 static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1458                          struct i40e_nvm_access *cmd,
1459                          u8 *bytes, int *perrno)
1460 {
1461     u32 aq_total_len;
1462     u32 aq_desc_len;
1463     int remainder;
1464     u8 *buff;
1465 
1466     i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1467 
1468     aq_desc_len = sizeof(struct i40e_aq_desc);
1469     aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
1470 
1471     /* check offset range */
1472     if (cmd->offset > aq_total_len) {
1473         i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1474                __func__, cmd->offset, aq_total_len);
1475         *perrno = -EINVAL;
1476         return I40E_ERR_PARAM;
1477     }
1478 
1479     /* check copylength range */
1480     if (cmd->data_size > (aq_total_len - cmd->offset)) {
1481         int new_len = aq_total_len - cmd->offset;
1482 
1483         i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1484                __func__, cmd->data_size, new_len);
1485         cmd->data_size = new_len;
1486     }
1487 
1488     remainder = cmd->data_size;
1489     if (cmd->offset < aq_desc_len) {
1490         u32 len = aq_desc_len - cmd->offset;
1491 
1492         len = min(len, cmd->data_size);
1493         i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1494                __func__, cmd->offset, cmd->offset + len);
1495 
1496         buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1497         memcpy(bytes, buff, len);
1498 
1499         bytes += len;
1500         remainder -= len;
1501         buff = hw->nvm_buff.va;
1502     } else {
1503         buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1504     }
1505 
1506     if (remainder > 0) {
1507         int start_byte = buff - (u8 *)hw->nvm_buff.va;
1508 
1509         i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1510                __func__, start_byte, start_byte + remainder);
1511         memcpy(bytes, buff, remainder);
1512     }
1513 
1514     return 0;
1515 }
1516 
1517 /**
1518  * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
1519  * @hw: pointer to hardware structure
1520  * @cmd: pointer to nvm update command buffer
1521  * @bytes: pointer to the data buffer
1522  * @perrno: pointer to return error code
1523  *
1524  * cmd structure contains identifiers and data buffer
1525  **/
1526 static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
1527                         struct i40e_nvm_access *cmd,
1528                         u8 *bytes, int *perrno)
1529 {
1530     u32 aq_total_len;
1531     u32 aq_desc_len;
1532 
1533     i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1534 
1535     aq_desc_len = sizeof(struct i40e_aq_desc);
1536     aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
1537 
1538     /* check copylength range */
1539     if (cmd->data_size > aq_total_len) {
1540         i40e_debug(hw, I40E_DEBUG_NVM,
1541                "%s: copy length %d too big, trimming to %d\n",
1542                __func__, cmd->data_size, aq_total_len);
1543         cmd->data_size = aq_total_len;
1544     }
1545 
1546     memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
1547 
1548     return 0;
1549 }
1550 
1551 /**
1552  * i40e_nvmupd_nvm_read - Read NVM
1553  * @hw: pointer to hardware structure
1554  * @cmd: pointer to nvm update command buffer
1555  * @bytes: pointer to the data buffer
1556  * @perrno: pointer to return error code
1557  *
1558  * cmd structure contains identifiers and data buffer
1559  **/
1560 static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1561                     struct i40e_nvm_access *cmd,
1562                     u8 *bytes, int *perrno)
1563 {
1564     struct i40e_asq_cmd_details cmd_details;
1565     i40e_status status;
1566     u8 module, transaction;
1567     bool last;
1568 
1569     transaction = i40e_nvmupd_get_transaction(cmd->config);
1570     module = i40e_nvmupd_get_module(cmd->config);
1571     last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1572 
1573     memset(&cmd_details, 0, sizeof(cmd_details));
1574     cmd_details.wb_desc = &hw->nvm_wb_desc;
1575 
1576     status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1577                   bytes, last, &cmd_details);
1578     if (status) {
1579         i40e_debug(hw, I40E_DEBUG_NVM,
1580                "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
1581                module, cmd->offset, cmd->data_size);
1582         i40e_debug(hw, I40E_DEBUG_NVM,
1583                "i40e_nvmupd_nvm_read status %d aq %d\n",
1584                status, hw->aq.asq_last_status);
1585         *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1586     }
1587 
1588     return status;
1589 }
1590 
1591 /**
1592  * i40e_nvmupd_nvm_erase - Erase an NVM module
1593  * @hw: pointer to hardware structure
1594  * @cmd: pointer to nvm update command buffer
1595  * @perrno: pointer to return error code
1596  *
1597  * module, offset, data_size and data are in cmd structure
1598  **/
1599 static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1600                      struct i40e_nvm_access *cmd,
1601                      int *perrno)
1602 {
1603     i40e_status status = 0;
1604     struct i40e_asq_cmd_details cmd_details;
1605     u8 module, transaction;
1606     bool last;
1607 
1608     transaction = i40e_nvmupd_get_transaction(cmd->config);
1609     module = i40e_nvmupd_get_module(cmd->config);
1610     last = (transaction & I40E_NVM_LCB);
1611 
1612     memset(&cmd_details, 0, sizeof(cmd_details));
1613     cmd_details.wb_desc = &hw->nvm_wb_desc;
1614 
1615     status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1616                    last, &cmd_details);
1617     if (status) {
1618         i40e_debug(hw, I40E_DEBUG_NVM,
1619                "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
1620                module, cmd->offset, cmd->data_size);
1621         i40e_debug(hw, I40E_DEBUG_NVM,
1622                "i40e_nvmupd_nvm_erase status %d aq %d\n",
1623                status, hw->aq.asq_last_status);
1624         *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1625     }
1626 
1627     return status;
1628 }
1629 
1630 /**
1631  * i40e_nvmupd_nvm_write - Write NVM
1632  * @hw: pointer to hardware structure
1633  * @cmd: pointer to nvm update command buffer
1634  * @bytes: pointer to the data buffer
1635  * @perrno: pointer to return error code
1636  *
1637  * module, offset, data_size and data are in cmd structure
1638  **/
1639 static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1640                      struct i40e_nvm_access *cmd,
1641                      u8 *bytes, int *perrno)
1642 {
1643     i40e_status status = 0;
1644     struct i40e_asq_cmd_details cmd_details;
1645     u8 module, transaction;
1646     u8 preservation_flags;
1647     bool last;
1648 
1649     transaction = i40e_nvmupd_get_transaction(cmd->config);
1650     module = i40e_nvmupd_get_module(cmd->config);
1651     last = (transaction & I40E_NVM_LCB);
1652     preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
1653 
1654     memset(&cmd_details, 0, sizeof(cmd_details));
1655     cmd_details.wb_desc = &hw->nvm_wb_desc;
1656 
1657     status = i40e_aq_update_nvm(hw, module, cmd->offset,
1658                     (u16)cmd->data_size, bytes, last,
1659                     preservation_flags, &cmd_details);
1660     if (status) {
1661         i40e_debug(hw, I40E_DEBUG_NVM,
1662                "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1663                module, cmd->offset, cmd->data_size);
1664         i40e_debug(hw, I40E_DEBUG_NVM,
1665                "i40e_nvmupd_nvm_write status %d aq %d\n",
1666                status, hw->aq.asq_last_status);
1667         *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1668     }
1669 
1670     return status;
1671 }