Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright(c) 2013 - 2018 Intel Corporation. */
0003 
0004 #include "i40e_status.h"
0005 #include "i40e_type.h"
0006 #include "i40e_register.h"
0007 #include "i40e_adminq.h"
0008 #include "i40e_prototype.h"
0009 
0010 static void i40e_resume_aq(struct i40e_hw *hw);
0011 
0012 /**
0013  *  i40e_adminq_init_regs - Initialize AdminQ registers
0014  *  @hw: pointer to the hardware structure
0015  *
0016  *  This assumes the alloc_asq and alloc_arq functions have already been called
0017  **/
0018 static void i40e_adminq_init_regs(struct i40e_hw *hw)
0019 {
0020     /* set head and tail registers in our local struct */
0021     if (i40e_is_vf(hw)) {
0022         hw->aq.asq.tail = I40E_VF_ATQT1;
0023         hw->aq.asq.head = I40E_VF_ATQH1;
0024         hw->aq.asq.len  = I40E_VF_ATQLEN1;
0025         hw->aq.asq.bal  = I40E_VF_ATQBAL1;
0026         hw->aq.asq.bah  = I40E_VF_ATQBAH1;
0027         hw->aq.arq.tail = I40E_VF_ARQT1;
0028         hw->aq.arq.head = I40E_VF_ARQH1;
0029         hw->aq.arq.len  = I40E_VF_ARQLEN1;
0030         hw->aq.arq.bal  = I40E_VF_ARQBAL1;
0031         hw->aq.arq.bah  = I40E_VF_ARQBAH1;
0032     } else {
0033         hw->aq.asq.tail = I40E_PF_ATQT;
0034         hw->aq.asq.head = I40E_PF_ATQH;
0035         hw->aq.asq.len  = I40E_PF_ATQLEN;
0036         hw->aq.asq.bal  = I40E_PF_ATQBAL;
0037         hw->aq.asq.bah  = I40E_PF_ATQBAH;
0038         hw->aq.arq.tail = I40E_PF_ARQT;
0039         hw->aq.arq.head = I40E_PF_ARQH;
0040         hw->aq.arq.len  = I40E_PF_ARQLEN;
0041         hw->aq.arq.bal  = I40E_PF_ARQBAL;
0042         hw->aq.arq.bah  = I40E_PF_ARQBAH;
0043     }
0044 }
0045 
0046 /**
0047  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
0048  *  @hw: pointer to the hardware structure
0049  **/
0050 static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
0051 {
0052     i40e_status ret_code;
0053 
0054     ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
0055                      i40e_mem_atq_ring,
0056                      (hw->aq.num_asq_entries *
0057                      sizeof(struct i40e_aq_desc)),
0058                      I40E_ADMINQ_DESC_ALIGNMENT);
0059     if (ret_code)
0060         return ret_code;
0061 
0062     ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
0063                       (hw->aq.num_asq_entries *
0064                       sizeof(struct i40e_asq_cmd_details)));
0065     if (ret_code) {
0066         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
0067         return ret_code;
0068     }
0069 
0070     return ret_code;
0071 }
0072 
0073 /**
0074  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
0075  *  @hw: pointer to the hardware structure
0076  **/
0077 static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
0078 {
0079     i40e_status ret_code;
0080 
0081     ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
0082                      i40e_mem_arq_ring,
0083                      (hw->aq.num_arq_entries *
0084                      sizeof(struct i40e_aq_desc)),
0085                      I40E_ADMINQ_DESC_ALIGNMENT);
0086 
0087     return ret_code;
0088 }
0089 
0090 /**
0091  *  i40e_free_adminq_asq - Free Admin Queue send rings
0092  *  @hw: pointer to the hardware structure
0093  *
0094  *  This assumes the posted send buffers have already been cleaned
0095  *  and de-allocated
0096  **/
0097 static void i40e_free_adminq_asq(struct i40e_hw *hw)
0098 {
0099     i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
0100 }
0101 
0102 /**
0103  *  i40e_free_adminq_arq - Free Admin Queue receive rings
0104  *  @hw: pointer to the hardware structure
0105  *
0106  *  This assumes the posted receive buffers have already been cleaned
0107  *  and de-allocated
0108  **/
0109 static void i40e_free_adminq_arq(struct i40e_hw *hw)
0110 {
0111     i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
0112 }
0113 
0114 /**
0115  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
0116  *  @hw: pointer to the hardware structure
0117  **/
0118 static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
0119 {
0120     i40e_status ret_code;
0121     struct i40e_aq_desc *desc;
0122     struct i40e_dma_mem *bi;
0123     int i;
0124 
0125     /* We'll be allocating the buffer info memory first, then we can
0126      * allocate the mapped buffers for the event processing
0127      */
0128 
0129     /* buffer_info structures do not need alignment */
0130     ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
0131         (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
0132     if (ret_code)
0133         goto alloc_arq_bufs;
0134     hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
0135 
0136     /* allocate the mapped buffers */
0137     for (i = 0; i < hw->aq.num_arq_entries; i++) {
0138         bi = &hw->aq.arq.r.arq_bi[i];
0139         ret_code = i40e_allocate_dma_mem(hw, bi,
0140                          i40e_mem_arq_buf,
0141                          hw->aq.arq_buf_size,
0142                          I40E_ADMINQ_DESC_ALIGNMENT);
0143         if (ret_code)
0144             goto unwind_alloc_arq_bufs;
0145 
0146         /* now configure the descriptors for use */
0147         desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
0148 
0149         desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
0150         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
0151             desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
0152         desc->opcode = 0;
0153         /* This is in accordance with Admin queue design, there is no
0154          * register for buffer size configuration
0155          */
0156         desc->datalen = cpu_to_le16((u16)bi->size);
0157         desc->retval = 0;
0158         desc->cookie_high = 0;
0159         desc->cookie_low = 0;
0160         desc->params.external.addr_high =
0161             cpu_to_le32(upper_32_bits(bi->pa));
0162         desc->params.external.addr_low =
0163             cpu_to_le32(lower_32_bits(bi->pa));
0164         desc->params.external.param0 = 0;
0165         desc->params.external.param1 = 0;
0166     }
0167 
0168 alloc_arq_bufs:
0169     return ret_code;
0170 
0171 unwind_alloc_arq_bufs:
0172     /* don't try to free the one that failed... */
0173     i--;
0174     for (; i >= 0; i--)
0175         i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
0176     i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
0177 
0178     return ret_code;
0179 }
0180 
0181 /**
0182  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
0183  *  @hw: pointer to the hardware structure
0184  **/
0185 static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
0186 {
0187     i40e_status ret_code;
0188     struct i40e_dma_mem *bi;
0189     int i;
0190 
0191     /* No mapped memory needed yet, just the buffer info structures */
0192     ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
0193         (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
0194     if (ret_code)
0195         goto alloc_asq_bufs;
0196     hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
0197 
0198     /* allocate the mapped buffers */
0199     for (i = 0; i < hw->aq.num_asq_entries; i++) {
0200         bi = &hw->aq.asq.r.asq_bi[i];
0201         ret_code = i40e_allocate_dma_mem(hw, bi,
0202                          i40e_mem_asq_buf,
0203                          hw->aq.asq_buf_size,
0204                          I40E_ADMINQ_DESC_ALIGNMENT);
0205         if (ret_code)
0206             goto unwind_alloc_asq_bufs;
0207     }
0208 alloc_asq_bufs:
0209     return ret_code;
0210 
0211 unwind_alloc_asq_bufs:
0212     /* don't try to free the one that failed... */
0213     i--;
0214     for (; i >= 0; i--)
0215         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
0216     i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
0217 
0218     return ret_code;
0219 }
0220 
0221 /**
0222  *  i40e_free_arq_bufs - Free receive queue buffer info elements
0223  *  @hw: pointer to the hardware structure
0224  **/
0225 static void i40e_free_arq_bufs(struct i40e_hw *hw)
0226 {
0227     int i;
0228 
0229     /* free descriptors */
0230     for (i = 0; i < hw->aq.num_arq_entries; i++)
0231         i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
0232 
0233     /* free the descriptor memory */
0234     i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
0235 
0236     /* free the dma header */
0237     i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
0238 }
0239 
0240 /**
0241  *  i40e_free_asq_bufs - Free send queue buffer info elements
0242  *  @hw: pointer to the hardware structure
0243  **/
0244 static void i40e_free_asq_bufs(struct i40e_hw *hw)
0245 {
0246     int i;
0247 
0248     /* only unmap if the address is non-NULL */
0249     for (i = 0; i < hw->aq.num_asq_entries; i++)
0250         if (hw->aq.asq.r.asq_bi[i].pa)
0251             i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
0252 
0253     /* free the buffer info list */
0254     i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
0255 
0256     /* free the descriptor memory */
0257     i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
0258 
0259     /* free the dma header */
0260     i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
0261 }
0262 
0263 /**
0264  *  i40e_config_asq_regs - configure ASQ registers
0265  *  @hw: pointer to the hardware structure
0266  *
0267  *  Configure base address and length registers for the transmit queue
0268  **/
0269 static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
0270 {
0271     i40e_status ret_code = 0;
0272     u32 reg = 0;
0273 
0274     /* Clear Head and Tail */
0275     wr32(hw, hw->aq.asq.head, 0);
0276     wr32(hw, hw->aq.asq.tail, 0);
0277 
0278     /* set starting point */
0279     wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
0280                   I40E_PF_ATQLEN_ATQENABLE_MASK));
0281     wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
0282     wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
0283 
0284     /* Check one register to verify that config was applied */
0285     reg = rd32(hw, hw->aq.asq.bal);
0286     if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
0287         ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
0288 
0289     return ret_code;
0290 }
0291 
0292 /**
0293  *  i40e_config_arq_regs - ARQ register configuration
0294  *  @hw: pointer to the hardware structure
0295  *
0296  * Configure base address and length registers for the receive (event queue)
0297  **/
0298 static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
0299 {
0300     i40e_status ret_code = 0;
0301     u32 reg = 0;
0302 
0303     /* Clear Head and Tail */
0304     wr32(hw, hw->aq.arq.head, 0);
0305     wr32(hw, hw->aq.arq.tail, 0);
0306 
0307     /* set starting point */
0308     wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
0309                   I40E_PF_ARQLEN_ARQENABLE_MASK));
0310     wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
0311     wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
0312 
0313     /* Update tail in the HW to post pre-allocated buffers */
0314     wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
0315 
0316     /* Check one register to verify that config was applied */
0317     reg = rd32(hw, hw->aq.arq.bal);
0318     if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
0319         ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
0320 
0321     return ret_code;
0322 }
0323 
0324 /**
0325  *  i40e_init_asq - main initialization routine for ASQ
0326  *  @hw: pointer to the hardware structure
0327  *
0328  *  This is the main initialization routine for the Admin Send Queue
0329  *  Prior to calling this function, drivers *MUST* set the following fields
0330  *  in the hw->aq structure:
0331  *     - hw->aq.num_asq_entries
0332  *     - hw->aq.arq_buf_size
0333  *
0334  *  Do *NOT* hold the lock when calling this as the memory allocation routines
0335  *  called are not going to be atomic context safe
0336  **/
0337 static i40e_status i40e_init_asq(struct i40e_hw *hw)
0338 {
0339     i40e_status ret_code = 0;
0340 
0341     if (hw->aq.asq.count > 0) {
0342         /* queue already initialized */
0343         ret_code = I40E_ERR_NOT_READY;
0344         goto init_adminq_exit;
0345     }
0346 
0347     /* verify input for valid configuration */
0348     if ((hw->aq.num_asq_entries == 0) ||
0349         (hw->aq.asq_buf_size == 0)) {
0350         ret_code = I40E_ERR_CONFIG;
0351         goto init_adminq_exit;
0352     }
0353 
0354     hw->aq.asq.next_to_use = 0;
0355     hw->aq.asq.next_to_clean = 0;
0356 
0357     /* allocate the ring memory */
0358     ret_code = i40e_alloc_adminq_asq_ring(hw);
0359     if (ret_code)
0360         goto init_adminq_exit;
0361 
0362     /* allocate buffers in the rings */
0363     ret_code = i40e_alloc_asq_bufs(hw);
0364     if (ret_code)
0365         goto init_adminq_free_rings;
0366 
0367     /* initialize base registers */
0368     ret_code = i40e_config_asq_regs(hw);
0369     if (ret_code)
0370         goto init_adminq_free_rings;
0371 
0372     /* success! */
0373     hw->aq.asq.count = hw->aq.num_asq_entries;
0374     goto init_adminq_exit;
0375 
0376 init_adminq_free_rings:
0377     i40e_free_adminq_asq(hw);
0378 
0379 init_adminq_exit:
0380     return ret_code;
0381 }
0382 
0383 /**
0384  *  i40e_init_arq - initialize ARQ
0385  *  @hw: pointer to the hardware structure
0386  *
0387  *  The main initialization routine for the Admin Receive (Event) Queue.
0388  *  Prior to calling this function, drivers *MUST* set the following fields
0389  *  in the hw->aq structure:
0390  *     - hw->aq.num_asq_entries
0391  *     - hw->aq.arq_buf_size
0392  *
0393  *  Do *NOT* hold the lock when calling this as the memory allocation routines
0394  *  called are not going to be atomic context safe
0395  **/
0396 static i40e_status i40e_init_arq(struct i40e_hw *hw)
0397 {
0398     i40e_status ret_code = 0;
0399 
0400     if (hw->aq.arq.count > 0) {
0401         /* queue already initialized */
0402         ret_code = I40E_ERR_NOT_READY;
0403         goto init_adminq_exit;
0404     }
0405 
0406     /* verify input for valid configuration */
0407     if ((hw->aq.num_arq_entries == 0) ||
0408         (hw->aq.arq_buf_size == 0)) {
0409         ret_code = I40E_ERR_CONFIG;
0410         goto init_adminq_exit;
0411     }
0412 
0413     hw->aq.arq.next_to_use = 0;
0414     hw->aq.arq.next_to_clean = 0;
0415 
0416     /* allocate the ring memory */
0417     ret_code = i40e_alloc_adminq_arq_ring(hw);
0418     if (ret_code)
0419         goto init_adminq_exit;
0420 
0421     /* allocate buffers in the rings */
0422     ret_code = i40e_alloc_arq_bufs(hw);
0423     if (ret_code)
0424         goto init_adminq_free_rings;
0425 
0426     /* initialize base registers */
0427     ret_code = i40e_config_arq_regs(hw);
0428     if (ret_code)
0429         goto init_adminq_free_rings;
0430 
0431     /* success! */
0432     hw->aq.arq.count = hw->aq.num_arq_entries;
0433     goto init_adminq_exit;
0434 
0435 init_adminq_free_rings:
0436     i40e_free_adminq_arq(hw);
0437 
0438 init_adminq_exit:
0439     return ret_code;
0440 }
0441 
0442 /**
0443  *  i40e_shutdown_asq - shutdown the ASQ
0444  *  @hw: pointer to the hardware structure
0445  *
0446  *  The main shutdown routine for the Admin Send Queue
0447  **/
0448 static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
0449 {
0450     i40e_status ret_code = 0;
0451 
0452     mutex_lock(&hw->aq.asq_mutex);
0453 
0454     if (hw->aq.asq.count == 0) {
0455         ret_code = I40E_ERR_NOT_READY;
0456         goto shutdown_asq_out;
0457     }
0458 
0459     /* Stop firmware AdminQ processing */
0460     wr32(hw, hw->aq.asq.head, 0);
0461     wr32(hw, hw->aq.asq.tail, 0);
0462     wr32(hw, hw->aq.asq.len, 0);
0463     wr32(hw, hw->aq.asq.bal, 0);
0464     wr32(hw, hw->aq.asq.bah, 0);
0465 
0466     hw->aq.asq.count = 0; /* to indicate uninitialized queue */
0467 
0468     /* free ring buffers */
0469     i40e_free_asq_bufs(hw);
0470 
0471 shutdown_asq_out:
0472     mutex_unlock(&hw->aq.asq_mutex);
0473     return ret_code;
0474 }
0475 
0476 /**
0477  *  i40e_shutdown_arq - shutdown ARQ
0478  *  @hw: pointer to the hardware structure
0479  *
0480  *  The main shutdown routine for the Admin Receive Queue
0481  **/
0482 static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
0483 {
0484     i40e_status ret_code = 0;
0485 
0486     mutex_lock(&hw->aq.arq_mutex);
0487 
0488     if (hw->aq.arq.count == 0) {
0489         ret_code = I40E_ERR_NOT_READY;
0490         goto shutdown_arq_out;
0491     }
0492 
0493     /* Stop firmware AdminQ processing */
0494     wr32(hw, hw->aq.arq.head, 0);
0495     wr32(hw, hw->aq.arq.tail, 0);
0496     wr32(hw, hw->aq.arq.len, 0);
0497     wr32(hw, hw->aq.arq.bal, 0);
0498     wr32(hw, hw->aq.arq.bah, 0);
0499 
0500     hw->aq.arq.count = 0; /* to indicate uninitialized queue */
0501 
0502     /* free ring buffers */
0503     i40e_free_arq_bufs(hw);
0504 
0505 shutdown_arq_out:
0506     mutex_unlock(&hw->aq.arq_mutex);
0507     return ret_code;
0508 }
0509 
0510 /**
0511  *  i40e_set_hw_flags - set HW flags
0512  *  @hw: pointer to the hardware structure
0513  **/
0514 static void i40e_set_hw_flags(struct i40e_hw *hw)
0515 {
0516     struct i40e_adminq_info *aq = &hw->aq;
0517 
0518     hw->flags = 0;
0519 
0520     switch (hw->mac.type) {
0521     case I40E_MAC_XL710:
0522         if (aq->api_maj_ver > 1 ||
0523             (aq->api_maj_ver == 1 &&
0524              aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
0525             hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
0526             hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
0527             /* The ability to RX (not drop) 802.1ad frames */
0528             hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
0529         }
0530         break;
0531     case I40E_MAC_X722:
0532         hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
0533                  I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
0534 
0535         if (aq->api_maj_ver > 1 ||
0536             (aq->api_maj_ver == 1 &&
0537              aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
0538             hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
0539 
0540         if (aq->api_maj_ver > 1 ||
0541             (aq->api_maj_ver == 1 &&
0542              aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
0543             hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
0544 
0545         if (aq->api_maj_ver > 1 ||
0546             (aq->api_maj_ver == 1 &&
0547              aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
0548             hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
0549 
0550         fallthrough;
0551     default:
0552         break;
0553     }
0554 
0555     /* Newer versions of firmware require lock when reading the NVM */
0556     if (aq->api_maj_ver > 1 ||
0557         (aq->api_maj_ver == 1 &&
0558          aq->api_min_ver >= 5))
0559         hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
0560 
0561     if (aq->api_maj_ver > 1 ||
0562         (aq->api_maj_ver == 1 &&
0563          aq->api_min_ver >= 8)) {
0564         hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
0565         hw->flags |= I40E_HW_FLAG_DROP_MODE;
0566     }
0567 
0568     if (aq->api_maj_ver > 1 ||
0569         (aq->api_maj_ver == 1 &&
0570          aq->api_min_ver >= 9))
0571         hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
0572 }
0573 
0574 /**
0575  *  i40e_init_adminq - main initialization routine for Admin Queue
0576  *  @hw: pointer to the hardware structure
0577  *
0578  *  Prior to calling this function, drivers *MUST* set the following fields
0579  *  in the hw->aq structure:
0580  *     - hw->aq.num_asq_entries
0581  *     - hw->aq.num_arq_entries
0582  *     - hw->aq.arq_buf_size
0583  *     - hw->aq.asq_buf_size
0584  **/
0585 i40e_status i40e_init_adminq(struct i40e_hw *hw)
0586 {
0587     u16 cfg_ptr, oem_hi, oem_lo;
0588     u16 eetrack_lo, eetrack_hi;
0589     i40e_status ret_code;
0590     int retry = 0;
0591 
0592     /* verify input for valid configuration */
0593     if ((hw->aq.num_arq_entries == 0) ||
0594         (hw->aq.num_asq_entries == 0) ||
0595         (hw->aq.arq_buf_size == 0) ||
0596         (hw->aq.asq_buf_size == 0)) {
0597         ret_code = I40E_ERR_CONFIG;
0598         goto init_adminq_exit;
0599     }
0600 
0601     /* Set up register offsets */
0602     i40e_adminq_init_regs(hw);
0603 
0604     /* setup ASQ command write back timeout */
0605     hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
0606 
0607     /* allocate the ASQ */
0608     ret_code = i40e_init_asq(hw);
0609     if (ret_code)
0610         goto init_adminq_destroy_locks;
0611 
0612     /* allocate the ARQ */
0613     ret_code = i40e_init_arq(hw);
0614     if (ret_code)
0615         goto init_adminq_free_asq;
0616 
0617     /* There are some cases where the firmware may not be quite ready
0618      * for AdminQ operations, so we retry the AdminQ setup a few times
0619      * if we see timeouts in this first AQ call.
0620      */
0621     do {
0622         ret_code = i40e_aq_get_firmware_version(hw,
0623                             &hw->aq.fw_maj_ver,
0624                             &hw->aq.fw_min_ver,
0625                             &hw->aq.fw_build,
0626                             &hw->aq.api_maj_ver,
0627                             &hw->aq.api_min_ver,
0628                             NULL);
0629         if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
0630             break;
0631         retry++;
0632         msleep(100);
0633         i40e_resume_aq(hw);
0634     } while (retry < 10);
0635     if (ret_code != I40E_SUCCESS)
0636         goto init_adminq_free_arq;
0637 
0638     /* Some features were introduced in different FW API version
0639      * for different MAC type.
0640      */
0641     i40e_set_hw_flags(hw);
0642 
0643     /* get the NVM version info */
0644     i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
0645                &hw->nvm.version);
0646     i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
0647     i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
0648     hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
0649     i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
0650     i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
0651                &oem_hi);
0652     i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
0653                &oem_lo);
0654     hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
0655 
0656     if (hw->mac.type == I40E_MAC_XL710 &&
0657         hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
0658         hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
0659         hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
0660         hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
0661     }
0662     if (hw->mac.type == I40E_MAC_X722 &&
0663         hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
0664         hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) {
0665         hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
0666     }
0667 
0668     /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
0669     if (hw->aq.api_maj_ver > 1 ||
0670         (hw->aq.api_maj_ver == 1 &&
0671          hw->aq.api_min_ver >= 7))
0672         hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
0673 
0674     if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
0675         ret_code = I40E_ERR_FIRMWARE_API_VERSION;
0676         goto init_adminq_free_arq;
0677     }
0678 
0679     /* pre-emptive resource lock release */
0680     i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
0681     hw->nvm_release_on_done = false;
0682     hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
0683 
0684     ret_code = 0;
0685 
0686     /* success! */
0687     goto init_adminq_exit;
0688 
0689 init_adminq_free_arq:
0690     i40e_shutdown_arq(hw);
0691 init_adminq_free_asq:
0692     i40e_shutdown_asq(hw);
0693 init_adminq_destroy_locks:
0694 
0695 init_adminq_exit:
0696     return ret_code;
0697 }
0698 
0699 /**
0700  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
0701  *  @hw: pointer to the hardware structure
0702  **/
0703 void i40e_shutdown_adminq(struct i40e_hw *hw)
0704 {
0705     if (i40e_check_asq_alive(hw))
0706         i40e_aq_queue_shutdown(hw, true);
0707 
0708     i40e_shutdown_asq(hw);
0709     i40e_shutdown_arq(hw);
0710 
0711     if (hw->nvm_buff.va)
0712         i40e_free_virt_mem(hw, &hw->nvm_buff);
0713 }
0714 
0715 /**
0716  *  i40e_clean_asq - cleans Admin send queue
0717  *  @hw: pointer to the hardware structure
0718  *
0719  *  returns the number of free desc
0720  **/
0721 static u16 i40e_clean_asq(struct i40e_hw *hw)
0722 {
0723     struct i40e_adminq_ring *asq = &(hw->aq.asq);
0724     struct i40e_asq_cmd_details *details;
0725     u16 ntc = asq->next_to_clean;
0726     struct i40e_aq_desc desc_cb;
0727     struct i40e_aq_desc *desc;
0728 
0729     desc = I40E_ADMINQ_DESC(*asq, ntc);
0730     details = I40E_ADMINQ_DETAILS(*asq, ntc);
0731     while (rd32(hw, hw->aq.asq.head) != ntc) {
0732         i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
0733                "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
0734 
0735         if (details->callback) {
0736             I40E_ADMINQ_CALLBACK cb_func =
0737                     (I40E_ADMINQ_CALLBACK)details->callback;
0738             desc_cb = *desc;
0739             cb_func(hw, &desc_cb);
0740         }
0741         memset(desc, 0, sizeof(*desc));
0742         memset(details, 0, sizeof(*details));
0743         ntc++;
0744         if (ntc == asq->count)
0745             ntc = 0;
0746         desc = I40E_ADMINQ_DESC(*asq, ntc);
0747         details = I40E_ADMINQ_DETAILS(*asq, ntc);
0748     }
0749 
0750     asq->next_to_clean = ntc;
0751 
0752     return I40E_DESC_UNUSED(asq);
0753 }
0754 
0755 /**
0756  *  i40e_asq_done - check if FW has processed the Admin Send Queue
0757  *  @hw: pointer to the hw struct
0758  *
0759  *  Returns true if the firmware has processed all descriptors on the
0760  *  admin send queue. Returns false if there are still requests pending.
0761  **/
0762 static bool i40e_asq_done(struct i40e_hw *hw)
0763 {
0764     /* AQ designers suggest use of head for better
0765      * timing reliability than DD bit
0766      */
0767     return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
0768 
0769 }
0770 
0771 /**
0772  *  i40e_asq_send_command_atomic_exec - send command to Admin Queue
0773  *  @hw: pointer to the hw struct
0774  *  @desc: prefilled descriptor describing the command (non DMA mem)
0775  *  @buff: buffer to use for indirect commands
0776  *  @buff_size: size of buffer for indirect commands
0777  *  @cmd_details: pointer to command details structure
0778  *  @is_atomic_context: is the function called in an atomic context?
0779  *
0780  *  This is the main send command driver routine for the Admin Queue send
0781  *  queue.  It runs the queue, cleans the queue, etc
0782  **/
0783 static i40e_status
0784 i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
0785                   struct i40e_aq_desc *desc,
0786                   void *buff, /* can be NULL */
0787                   u16  buff_size,
0788                   struct i40e_asq_cmd_details *cmd_details,
0789                   bool is_atomic_context)
0790 {
0791     i40e_status status = 0;
0792     struct i40e_dma_mem *dma_buff = NULL;
0793     struct i40e_asq_cmd_details *details;
0794     struct i40e_aq_desc *desc_on_ring;
0795     bool cmd_completed = false;
0796     u16  retval = 0;
0797     u32  val = 0;
0798 
0799     if (hw->aq.asq.count == 0) {
0800         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
0801                "AQTX: Admin queue not initialized.\n");
0802         status = I40E_ERR_QUEUE_EMPTY;
0803         goto asq_send_command_error;
0804     }
0805 
0806     hw->aq.asq_last_status = I40E_AQ_RC_OK;
0807 
0808     val = rd32(hw, hw->aq.asq.head);
0809     if (val >= hw->aq.num_asq_entries) {
0810         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
0811                "AQTX: head overrun at %d\n", val);
0812         status = I40E_ERR_ADMIN_QUEUE_FULL;
0813         goto asq_send_command_error;
0814     }
0815 
0816     details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
0817     if (cmd_details) {
0818         *details = *cmd_details;
0819 
0820         /* If the cmd_details are defined copy the cookie.  The
0821          * cpu_to_le32 is not needed here because the data is ignored
0822          * by the FW, only used by the driver
0823          */
0824         if (details->cookie) {
0825             desc->cookie_high =
0826                 cpu_to_le32(upper_32_bits(details->cookie));
0827             desc->cookie_low =
0828                 cpu_to_le32(lower_32_bits(details->cookie));
0829         }
0830     } else {
0831         memset(details, 0, sizeof(struct i40e_asq_cmd_details));
0832     }
0833 
0834     /* clear requested flags and then set additional flags if defined */
0835     desc->flags &= ~cpu_to_le16(details->flags_dis);
0836     desc->flags |= cpu_to_le16(details->flags_ena);
0837 
0838     if (buff_size > hw->aq.asq_buf_size) {
0839         i40e_debug(hw,
0840                I40E_DEBUG_AQ_MESSAGE,
0841                "AQTX: Invalid buffer size: %d.\n",
0842                buff_size);
0843         status = I40E_ERR_INVALID_SIZE;
0844         goto asq_send_command_error;
0845     }
0846 
0847     if (details->postpone && !details->async) {
0848         i40e_debug(hw,
0849                I40E_DEBUG_AQ_MESSAGE,
0850                "AQTX: Async flag not set along with postpone flag");
0851         status = I40E_ERR_PARAM;
0852         goto asq_send_command_error;
0853     }
0854 
0855     /* call clean and check queue available function to reclaim the
0856      * descriptors that were processed by FW, the function returns the
0857      * number of desc available
0858      */
0859     /* the clean function called here could be called in a separate thread
0860      * in case of asynchronous completions
0861      */
0862     if (i40e_clean_asq(hw) == 0) {
0863         i40e_debug(hw,
0864                I40E_DEBUG_AQ_MESSAGE,
0865                "AQTX: Error queue is full.\n");
0866         status = I40E_ERR_ADMIN_QUEUE_FULL;
0867         goto asq_send_command_error;
0868     }
0869 
0870     /* initialize the temp desc pointer with the right desc */
0871     desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
0872 
0873     /* if the desc is available copy the temp desc to the right place */
0874     *desc_on_ring = *desc;
0875 
0876     /* if buff is not NULL assume indirect command */
0877     if (buff != NULL) {
0878         dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
0879         /* copy the user buff into the respective DMA buff */
0880         memcpy(dma_buff->va, buff, buff_size);
0881         desc_on_ring->datalen = cpu_to_le16(buff_size);
0882 
0883         /* Update the address values in the desc with the pa value
0884          * for respective buffer
0885          */
0886         desc_on_ring->params.external.addr_high =
0887                 cpu_to_le32(upper_32_bits(dma_buff->pa));
0888         desc_on_ring->params.external.addr_low =
0889                 cpu_to_le32(lower_32_bits(dma_buff->pa));
0890     }
0891 
0892     /* bump the tail */
0893     i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
0894     i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
0895               buff, buff_size);
0896     (hw->aq.asq.next_to_use)++;
0897     if (hw->aq.asq.next_to_use == hw->aq.asq.count)
0898         hw->aq.asq.next_to_use = 0;
0899     if (!details->postpone)
0900         wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
0901 
0902     /* if cmd_details are not defined or async flag is not set,
0903      * we need to wait for desc write back
0904      */
0905     if (!details->async && !details->postpone) {
0906         u32 total_delay = 0;
0907 
0908         do {
0909             /* AQ designers suggest use of head for better
0910              * timing reliability than DD bit
0911              */
0912             if (i40e_asq_done(hw))
0913                 break;
0914 
0915             if (is_atomic_context)
0916                 udelay(50);
0917             else
0918                 usleep_range(40, 60);
0919 
0920             total_delay += 50;
0921         } while (total_delay < hw->aq.asq_cmd_timeout);
0922     }
0923 
0924     /* if ready, copy the desc back to temp */
0925     if (i40e_asq_done(hw)) {
0926         *desc = *desc_on_ring;
0927         if (buff != NULL)
0928             memcpy(buff, dma_buff->va, buff_size);
0929         retval = le16_to_cpu(desc->retval);
0930         if (retval != 0) {
0931             i40e_debug(hw,
0932                    I40E_DEBUG_AQ_MESSAGE,
0933                    "AQTX: Command completed with error 0x%X.\n",
0934                    retval);
0935 
0936             /* strip off FW internal code */
0937             retval &= 0xff;
0938         }
0939         cmd_completed = true;
0940         if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
0941             status = 0;
0942         else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
0943             status = I40E_ERR_NOT_READY;
0944         else
0945             status = I40E_ERR_ADMIN_QUEUE_ERROR;
0946         hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
0947     }
0948 
0949     i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
0950            "AQTX: desc and buffer writeback:\n");
0951     i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
0952 
0953     /* save writeback aq if requested */
0954     if (details->wb_desc)
0955         *details->wb_desc = *desc_on_ring;
0956 
0957     /* update the error if time out occurred */
0958     if ((!cmd_completed) &&
0959         (!details->async && !details->postpone)) {
0960         if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
0961             i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
0962                    "AQTX: AQ Critical error.\n");
0963             status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
0964         } else {
0965             i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
0966                    "AQTX: Writeback timeout.\n");
0967             status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
0968         }
0969     }
0970 
0971 asq_send_command_error:
0972     return status;
0973 }
0974 
0975 /**
0976  *  i40e_asq_send_command_atomic - send command to Admin Queue
0977  *  @hw: pointer to the hw struct
0978  *  @desc: prefilled descriptor describing the command (non DMA mem)
0979  *  @buff: buffer to use for indirect commands
0980  *  @buff_size: size of buffer for indirect commands
0981  *  @cmd_details: pointer to command details structure
0982  *  @is_atomic_context: is the function called in an atomic context?
0983  *
0984  *  Acquires the lock and calls the main send command execution
0985  *  routine.
0986  **/
0987 i40e_status
0988 i40e_asq_send_command_atomic(struct i40e_hw *hw,
0989                  struct i40e_aq_desc *desc,
0990                  void *buff, /* can be NULL */
0991                  u16  buff_size,
0992                  struct i40e_asq_cmd_details *cmd_details,
0993                  bool is_atomic_context)
0994 {
0995     i40e_status status;
0996 
0997     mutex_lock(&hw->aq.asq_mutex);
0998     status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size,
0999                            cmd_details,
1000                            is_atomic_context);
1001 
1002     mutex_unlock(&hw->aq.asq_mutex);
1003     return status;
1004 }
1005 
1006 i40e_status
1007 i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
1008               void *buff, /* can be NULL */ u16  buff_size,
1009               struct i40e_asq_cmd_details *cmd_details)
1010 {
1011     return i40e_asq_send_command_atomic(hw, desc, buff, buff_size,
1012                         cmd_details, false);
1013 }
1014 
1015 /**
1016  *  i40e_asq_send_command_atomic_v2 - send command to Admin Queue
1017  *  @hw: pointer to the hw struct
1018  *  @desc: prefilled descriptor describing the command (non DMA mem)
1019  *  @buff: buffer to use for indirect commands
1020  *  @buff_size: size of buffer for indirect commands
1021  *  @cmd_details: pointer to command details structure
1022  *  @is_atomic_context: is the function called in an atomic context?
1023  *  @aq_status: pointer to Admin Queue status return value
1024  *
1025  *  Acquires the lock and calls the main send command execution
1026  *  routine. Returns the last Admin Queue status in aq_status
1027  *  to avoid race conditions in access to hw->aq.asq_last_status.
1028  **/
1029 i40e_status
1030 i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
1031                 struct i40e_aq_desc *desc,
1032                 void *buff, /* can be NULL */
1033                 u16  buff_size,
1034                 struct i40e_asq_cmd_details *cmd_details,
1035                 bool is_atomic_context,
1036                 enum i40e_admin_queue_err *aq_status)
1037 {
1038     i40e_status status;
1039 
1040     mutex_lock(&hw->aq.asq_mutex);
1041     status = i40e_asq_send_command_atomic_exec(hw, desc, buff,
1042                            buff_size,
1043                            cmd_details,
1044                            is_atomic_context);
1045     if (aq_status)
1046         *aq_status = hw->aq.asq_last_status;
1047     mutex_unlock(&hw->aq.asq_mutex);
1048     return status;
1049 }
1050 
1051 i40e_status
1052 i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
1053              void *buff, /* can be NULL */ u16  buff_size,
1054              struct i40e_asq_cmd_details *cmd_details,
1055              enum i40e_admin_queue_err *aq_status)
1056 {
1057     return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size,
1058                            cmd_details, true, aq_status);
1059 }
1060 
1061 /**
1062  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1063  *  @desc:     pointer to the temp descriptor (non DMA mem)
1064  *  @opcode:   the opcode can be used to decide which flags to turn off or on
1065  *
1066  *  Fill the desc with default values
1067  **/
1068 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1069                        u16 opcode)
1070 {
1071     /* zero out the desc */
1072     memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
1073     desc->opcode = cpu_to_le16(opcode);
1074     desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
1075 }
1076 
1077 /**
1078  *  i40e_clean_arq_element
1079  *  @hw: pointer to the hw struct
1080  *  @e: event info from the receive descriptor, includes any buffers
1081  *  @pending: number of events that could be left to process
1082  *
1083  *  This function cleans one Admin Receive Queue element and returns
1084  *  the contents through e.  It can also return how many events are
1085  *  left to process through 'pending'
1086  **/
1087 i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
1088                          struct i40e_arq_event_info *e,
1089                          u16 *pending)
1090 {
1091     i40e_status ret_code = 0;
1092     u16 ntc = hw->aq.arq.next_to_clean;
1093     struct i40e_aq_desc *desc;
1094     struct i40e_dma_mem *bi;
1095     u16 desc_idx;
1096     u16 datalen;
1097     u16 flags;
1098     u16 ntu;
1099 
1100     /* pre-clean the event info */
1101     memset(&e->desc, 0, sizeof(e->desc));
1102 
1103     /* take the lock before we start messing with the ring */
1104     mutex_lock(&hw->aq.arq_mutex);
1105 
1106     if (hw->aq.arq.count == 0) {
1107         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1108                "AQRX: Admin queue not initialized.\n");
1109         ret_code = I40E_ERR_QUEUE_EMPTY;
1110         goto clean_arq_element_err;
1111     }
1112 
1113     /* set next_to_use to head */
1114     ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1115     if (ntu == ntc) {
1116         /* nothing to do - shouldn't need to update ring's values */
1117         ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1118         goto clean_arq_element_out;
1119     }
1120 
1121     /* now clean the next descriptor */
1122     desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1123     desc_idx = ntc;
1124 
1125     hw->aq.arq_last_status =
1126         (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
1127     flags = le16_to_cpu(desc->flags);
1128     if (flags & I40E_AQ_FLAG_ERR) {
1129         ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1130         i40e_debug(hw,
1131                I40E_DEBUG_AQ_MESSAGE,
1132                "AQRX: Event received with error 0x%X.\n",
1133                hw->aq.arq_last_status);
1134     }
1135 
1136     e->desc = *desc;
1137     datalen = le16_to_cpu(desc->datalen);
1138     e->msg_len = min(datalen, e->buf_len);
1139     if (e->msg_buf != NULL && (e->msg_len != 0))
1140         memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
1141                e->msg_len);
1142 
1143     i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
1144     i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1145               hw->aq.arq_buf_size);
1146 
1147     /* Restore the original datalen and buffer address in the desc,
1148      * FW updates datalen to indicate the event message
1149      * size
1150      */
1151     bi = &hw->aq.arq.r.arq_bi[ntc];
1152     memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
1153 
1154     desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
1155     if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1156         desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
1157     desc->datalen = cpu_to_le16((u16)bi->size);
1158     desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1159     desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1160 
1161     /* set tail = the last cleaned desc index. */
1162     wr32(hw, hw->aq.arq.tail, ntc);
1163     /* ntc is updated to tail + 1 */
1164     ntc++;
1165     if (ntc == hw->aq.num_arq_entries)
1166         ntc = 0;
1167     hw->aq.arq.next_to_clean = ntc;
1168     hw->aq.arq.next_to_use = ntu;
1169 
1170     i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc);
1171 clean_arq_element_out:
1172     /* Set pending if needed, unlock and return */
1173     if (pending)
1174         *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1175 clean_arq_element_err:
1176     mutex_unlock(&hw->aq.arq_mutex);
1177 
1178     return ret_code;
1179 }
1180 
1181 static void i40e_resume_aq(struct i40e_hw *hw)
1182 {
1183     /* Registers are reset after PF reset */
1184     hw->aq.asq.next_to_use = 0;
1185     hw->aq.asq.next_to_clean = 0;
1186 
1187     i40e_config_asq_regs(hw);
1188 
1189     hw->aq.arq.next_to_use = 0;
1190     hw->aq.arq.next_to_clean = 0;
1191 
1192     i40e_config_arq_regs(hw);
1193 }