Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright(c) 2013 - 2018 Intel Corporation. */
0003 
0004 #include "iavf_status.h"
0005 #include "iavf_type.h"
0006 #include "iavf_register.h"
0007 #include "iavf_adminq.h"
0008 #include "iavf_prototype.h"
0009 
0010 /**
0011  *  iavf_adminq_init_regs - Initialize AdminQ registers
0012  *  @hw: pointer to the hardware structure
0013  *
0014  *  This assumes the alloc_asq and alloc_arq functions have already been called
0015  **/
0016 static void iavf_adminq_init_regs(struct iavf_hw *hw)
0017 {
0018     /* set head and tail registers in our local struct */
0019     hw->aq.asq.tail = IAVF_VF_ATQT1;
0020     hw->aq.asq.head = IAVF_VF_ATQH1;
0021     hw->aq.asq.len  = IAVF_VF_ATQLEN1;
0022     hw->aq.asq.bal  = IAVF_VF_ATQBAL1;
0023     hw->aq.asq.bah  = IAVF_VF_ATQBAH1;
0024     hw->aq.arq.tail = IAVF_VF_ARQT1;
0025     hw->aq.arq.head = IAVF_VF_ARQH1;
0026     hw->aq.arq.len  = IAVF_VF_ARQLEN1;
0027     hw->aq.arq.bal  = IAVF_VF_ARQBAL1;
0028     hw->aq.arq.bah  = IAVF_VF_ARQBAH1;
0029 }
0030 
0031 /**
0032  *  iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
0033  *  @hw: pointer to the hardware structure
0034  **/
0035 static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
0036 {
0037     enum iavf_status ret_code;
0038 
0039     ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
0040                      iavf_mem_atq_ring,
0041                      (hw->aq.num_asq_entries *
0042                      sizeof(struct iavf_aq_desc)),
0043                      IAVF_ADMINQ_DESC_ALIGNMENT);
0044     if (ret_code)
0045         return ret_code;
0046 
0047     ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
0048                       (hw->aq.num_asq_entries *
0049                       sizeof(struct iavf_asq_cmd_details)));
0050     if (ret_code) {
0051         iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
0052         return ret_code;
0053     }
0054 
0055     return ret_code;
0056 }
0057 
0058 /**
0059  *  iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
0060  *  @hw: pointer to the hardware structure
0061  **/
0062 static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
0063 {
0064     enum iavf_status ret_code;
0065 
0066     ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
0067                      iavf_mem_arq_ring,
0068                      (hw->aq.num_arq_entries *
0069                      sizeof(struct iavf_aq_desc)),
0070                      IAVF_ADMINQ_DESC_ALIGNMENT);
0071 
0072     return ret_code;
0073 }
0074 
0075 /**
0076  *  iavf_free_adminq_asq - Free Admin Queue send rings
0077  *  @hw: pointer to the hardware structure
0078  *
0079  *  This assumes the posted send buffers have already been cleaned
0080  *  and de-allocated
0081  **/
0082 static void iavf_free_adminq_asq(struct iavf_hw *hw)
0083 {
0084     iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
0085 }
0086 
0087 /**
0088  *  iavf_free_adminq_arq - Free Admin Queue receive rings
0089  *  @hw: pointer to the hardware structure
0090  *
0091  *  This assumes the posted receive buffers have already been cleaned
0092  *  and de-allocated
0093  **/
0094 static void iavf_free_adminq_arq(struct iavf_hw *hw)
0095 {
0096     iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
0097 }
0098 
0099 /**
0100  *  iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
0101  *  @hw: pointer to the hardware structure
0102  **/
0103 static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
0104 {
0105     struct iavf_aq_desc *desc;
0106     struct iavf_dma_mem *bi;
0107     enum iavf_status ret_code;
0108     int i;
0109 
0110     /* We'll be allocating the buffer info memory first, then we can
0111      * allocate the mapped buffers for the event processing
0112      */
0113 
0114     /* buffer_info structures do not need alignment */
0115     ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
0116                       (hw->aq.num_arq_entries *
0117                        sizeof(struct iavf_dma_mem)));
0118     if (ret_code)
0119         goto alloc_arq_bufs;
0120     hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
0121 
0122     /* allocate the mapped buffers */
0123     for (i = 0; i < hw->aq.num_arq_entries; i++) {
0124         bi = &hw->aq.arq.r.arq_bi[i];
0125         ret_code = iavf_allocate_dma_mem(hw, bi,
0126                          iavf_mem_arq_buf,
0127                          hw->aq.arq_buf_size,
0128                          IAVF_ADMINQ_DESC_ALIGNMENT);
0129         if (ret_code)
0130             goto unwind_alloc_arq_bufs;
0131 
0132         /* now configure the descriptors for use */
0133         desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
0134 
0135         desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
0136         if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
0137             desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
0138         desc->opcode = 0;
0139         /* This is in accordance with Admin queue design, there is no
0140          * register for buffer size configuration
0141          */
0142         desc->datalen = cpu_to_le16((u16)bi->size);
0143         desc->retval = 0;
0144         desc->cookie_high = 0;
0145         desc->cookie_low = 0;
0146         desc->params.external.addr_high =
0147             cpu_to_le32(upper_32_bits(bi->pa));
0148         desc->params.external.addr_low =
0149             cpu_to_le32(lower_32_bits(bi->pa));
0150         desc->params.external.param0 = 0;
0151         desc->params.external.param1 = 0;
0152     }
0153 
0154 alloc_arq_bufs:
0155     return ret_code;
0156 
0157 unwind_alloc_arq_bufs:
0158     /* don't try to free the one that failed... */
0159     i--;
0160     for (; i >= 0; i--)
0161         iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
0162     iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
0163 
0164     return ret_code;
0165 }
0166 
0167 /**
0168  *  iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
0169  *  @hw: pointer to the hardware structure
0170  **/
0171 static enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
0172 {
0173     struct iavf_dma_mem *bi;
0174     enum iavf_status ret_code;
0175     int i;
0176 
0177     /* No mapped memory needed yet, just the buffer info structures */
0178     ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
0179                       (hw->aq.num_asq_entries *
0180                        sizeof(struct iavf_dma_mem)));
0181     if (ret_code)
0182         goto alloc_asq_bufs;
0183     hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
0184 
0185     /* allocate the mapped buffers */
0186     for (i = 0; i < hw->aq.num_asq_entries; i++) {
0187         bi = &hw->aq.asq.r.asq_bi[i];
0188         ret_code = iavf_allocate_dma_mem(hw, bi,
0189                          iavf_mem_asq_buf,
0190                          hw->aq.asq_buf_size,
0191                          IAVF_ADMINQ_DESC_ALIGNMENT);
0192         if (ret_code)
0193             goto unwind_alloc_asq_bufs;
0194     }
0195 alloc_asq_bufs:
0196     return ret_code;
0197 
0198 unwind_alloc_asq_bufs:
0199     /* don't try to free the one that failed... */
0200     i--;
0201     for (; i >= 0; i--)
0202         iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
0203     iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
0204 
0205     return ret_code;
0206 }
0207 
0208 /**
0209  *  iavf_free_arq_bufs - Free receive queue buffer info elements
0210  *  @hw: pointer to the hardware structure
0211  **/
0212 static void iavf_free_arq_bufs(struct iavf_hw *hw)
0213 {
0214     int i;
0215 
0216     /* free descriptors */
0217     for (i = 0; i < hw->aq.num_arq_entries; i++)
0218         iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
0219 
0220     /* free the descriptor memory */
0221     iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
0222 
0223     /* free the dma header */
0224     iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
0225 }
0226 
0227 /**
0228  *  iavf_free_asq_bufs - Free send queue buffer info elements
0229  *  @hw: pointer to the hardware structure
0230  **/
0231 static void iavf_free_asq_bufs(struct iavf_hw *hw)
0232 {
0233     int i;
0234 
0235     /* only unmap if the address is non-NULL */
0236     for (i = 0; i < hw->aq.num_asq_entries; i++)
0237         if (hw->aq.asq.r.asq_bi[i].pa)
0238             iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
0239 
0240     /* free the buffer info list */
0241     iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
0242 
0243     /* free the descriptor memory */
0244     iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
0245 
0246     /* free the dma header */
0247     iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
0248 }
0249 
0250 /**
0251  *  iavf_config_asq_regs - configure ASQ registers
0252  *  @hw: pointer to the hardware structure
0253  *
0254  *  Configure base address and length registers for the transmit queue
0255  **/
0256 static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
0257 {
0258     enum iavf_status ret_code = 0;
0259     u32 reg = 0;
0260 
0261     /* Clear Head and Tail */
0262     wr32(hw, hw->aq.asq.head, 0);
0263     wr32(hw, hw->aq.asq.tail, 0);
0264 
0265     /* set starting point */
0266     wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
0267                   IAVF_VF_ATQLEN1_ATQENABLE_MASK));
0268     wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
0269     wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
0270 
0271     /* Check one register to verify that config was applied */
0272     reg = rd32(hw, hw->aq.asq.bal);
0273     if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
0274         ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
0275 
0276     return ret_code;
0277 }
0278 
0279 /**
0280  *  iavf_config_arq_regs - ARQ register configuration
0281  *  @hw: pointer to the hardware structure
0282  *
0283  * Configure base address and length registers for the receive (event queue)
0284  **/
0285 static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
0286 {
0287     enum iavf_status ret_code = 0;
0288     u32 reg = 0;
0289 
0290     /* Clear Head and Tail */
0291     wr32(hw, hw->aq.arq.head, 0);
0292     wr32(hw, hw->aq.arq.tail, 0);
0293 
0294     /* set starting point */
0295     wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
0296                   IAVF_VF_ARQLEN1_ARQENABLE_MASK));
0297     wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
0298     wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
0299 
0300     /* Update tail in the HW to post pre-allocated buffers */
0301     wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
0302 
0303     /* Check one register to verify that config was applied */
0304     reg = rd32(hw, hw->aq.arq.bal);
0305     if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
0306         ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
0307 
0308     return ret_code;
0309 }
0310 
0311 /**
0312  *  iavf_init_asq - main initialization routine for ASQ
0313  *  @hw: pointer to the hardware structure
0314  *
0315  *  This is the main initialization routine for the Admin Send Queue
0316  *  Prior to calling this function, drivers *MUST* set the following fields
0317  *  in the hw->aq structure:
0318  *     - hw->aq.num_asq_entries
0319  *     - hw->aq.arq_buf_size
0320  *
0321  *  Do *NOT* hold the lock when calling this as the memory allocation routines
0322  *  called are not going to be atomic context safe
0323  **/
0324 static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
0325 {
0326     enum iavf_status ret_code = 0;
0327     int i;
0328 
0329     if (hw->aq.asq.count > 0) {
0330         /* queue already initialized */
0331         ret_code = IAVF_ERR_NOT_READY;
0332         goto init_adminq_exit;
0333     }
0334 
0335     /* verify input for valid configuration */
0336     if ((hw->aq.num_asq_entries == 0) ||
0337         (hw->aq.asq_buf_size == 0)) {
0338         ret_code = IAVF_ERR_CONFIG;
0339         goto init_adminq_exit;
0340     }
0341 
0342     hw->aq.asq.next_to_use = 0;
0343     hw->aq.asq.next_to_clean = 0;
0344 
0345     /* allocate the ring memory */
0346     ret_code = iavf_alloc_adminq_asq_ring(hw);
0347     if (ret_code)
0348         goto init_adminq_exit;
0349 
0350     /* allocate buffers in the rings */
0351     ret_code = iavf_alloc_asq_bufs(hw);
0352     if (ret_code)
0353         goto init_adminq_free_rings;
0354 
0355     /* initialize base registers */
0356     ret_code = iavf_config_asq_regs(hw);
0357     if (ret_code)
0358         goto init_free_asq_bufs;
0359 
0360     /* success! */
0361     hw->aq.asq.count = hw->aq.num_asq_entries;
0362     goto init_adminq_exit;
0363 
0364 init_free_asq_bufs:
0365     for (i = 0; i < hw->aq.num_asq_entries; i++)
0366         iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
0367     iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
0368 
0369 init_adminq_free_rings:
0370     iavf_free_adminq_asq(hw);
0371 
0372 init_adminq_exit:
0373     return ret_code;
0374 }
0375 
0376 /**
0377  *  iavf_init_arq - initialize ARQ
0378  *  @hw: pointer to the hardware structure
0379  *
0380  *  The main initialization routine for the Admin Receive (Event) Queue.
0381  *  Prior to calling this function, drivers *MUST* set the following fields
0382  *  in the hw->aq structure:
0383  *     - hw->aq.num_asq_entries
0384  *     - hw->aq.arq_buf_size
0385  *
0386  *  Do *NOT* hold the lock when calling this as the memory allocation routines
0387  *  called are not going to be atomic context safe
0388  **/
0389 static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
0390 {
0391     enum iavf_status ret_code = 0;
0392     int i;
0393 
0394     if (hw->aq.arq.count > 0) {
0395         /* queue already initialized */
0396         ret_code = IAVF_ERR_NOT_READY;
0397         goto init_adminq_exit;
0398     }
0399 
0400     /* verify input for valid configuration */
0401     if ((hw->aq.num_arq_entries == 0) ||
0402         (hw->aq.arq_buf_size == 0)) {
0403         ret_code = IAVF_ERR_CONFIG;
0404         goto init_adminq_exit;
0405     }
0406 
0407     hw->aq.arq.next_to_use = 0;
0408     hw->aq.arq.next_to_clean = 0;
0409 
0410     /* allocate the ring memory */
0411     ret_code = iavf_alloc_adminq_arq_ring(hw);
0412     if (ret_code)
0413         goto init_adminq_exit;
0414 
0415     /* allocate buffers in the rings */
0416     ret_code = iavf_alloc_arq_bufs(hw);
0417     if (ret_code)
0418         goto init_adminq_free_rings;
0419 
0420     /* initialize base registers */
0421     ret_code = iavf_config_arq_regs(hw);
0422     if (ret_code)
0423         goto init_free_arq_bufs;
0424 
0425     /* success! */
0426     hw->aq.arq.count = hw->aq.num_arq_entries;
0427     goto init_adminq_exit;
0428 
0429 init_free_arq_bufs:
0430     for (i = 0; i < hw->aq.num_arq_entries; i++)
0431         iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
0432     iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
0433 init_adminq_free_rings:
0434     iavf_free_adminq_arq(hw);
0435 
0436 init_adminq_exit:
0437     return ret_code;
0438 }
0439 
0440 /**
0441  *  iavf_shutdown_asq - shutdown the ASQ
0442  *  @hw: pointer to the hardware structure
0443  *
0444  *  The main shutdown routine for the Admin Send Queue
0445  **/
0446 static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
0447 {
0448     enum iavf_status ret_code = 0;
0449 
0450     mutex_lock(&hw->aq.asq_mutex);
0451 
0452     if (hw->aq.asq.count == 0) {
0453         ret_code = IAVF_ERR_NOT_READY;
0454         goto shutdown_asq_out;
0455     }
0456 
0457     /* Stop firmware AdminQ processing */
0458     wr32(hw, hw->aq.asq.head, 0);
0459     wr32(hw, hw->aq.asq.tail, 0);
0460     wr32(hw, hw->aq.asq.len, 0);
0461     wr32(hw, hw->aq.asq.bal, 0);
0462     wr32(hw, hw->aq.asq.bah, 0);
0463 
0464     hw->aq.asq.count = 0; /* to indicate uninitialized queue */
0465 
0466     /* free ring buffers */
0467     iavf_free_asq_bufs(hw);
0468 
0469 shutdown_asq_out:
0470     mutex_unlock(&hw->aq.asq_mutex);
0471     return ret_code;
0472 }
0473 
0474 /**
0475  *  iavf_shutdown_arq - shutdown ARQ
0476  *  @hw: pointer to the hardware structure
0477  *
0478  *  The main shutdown routine for the Admin Receive Queue
0479  **/
0480 static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
0481 {
0482     enum iavf_status ret_code = 0;
0483 
0484     mutex_lock(&hw->aq.arq_mutex);
0485 
0486     if (hw->aq.arq.count == 0) {
0487         ret_code = IAVF_ERR_NOT_READY;
0488         goto shutdown_arq_out;
0489     }
0490 
0491     /* Stop firmware AdminQ processing */
0492     wr32(hw, hw->aq.arq.head, 0);
0493     wr32(hw, hw->aq.arq.tail, 0);
0494     wr32(hw, hw->aq.arq.len, 0);
0495     wr32(hw, hw->aq.arq.bal, 0);
0496     wr32(hw, hw->aq.arq.bah, 0);
0497 
0498     hw->aq.arq.count = 0; /* to indicate uninitialized queue */
0499 
0500     /* free ring buffers */
0501     iavf_free_arq_bufs(hw);
0502 
0503 shutdown_arq_out:
0504     mutex_unlock(&hw->aq.arq_mutex);
0505     return ret_code;
0506 }
0507 
0508 /**
0509  *  iavf_init_adminq - main initialization routine for Admin Queue
0510  *  @hw: pointer to the hardware structure
0511  *
0512  *  Prior to calling this function, drivers *MUST* set the following fields
0513  *  in the hw->aq structure:
0514  *     - hw->aq.num_asq_entries
0515  *     - hw->aq.num_arq_entries
0516  *     - hw->aq.arq_buf_size
0517  *     - hw->aq.asq_buf_size
0518  **/
0519 enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
0520 {
0521     enum iavf_status ret_code;
0522 
0523     /* verify input for valid configuration */
0524     if ((hw->aq.num_arq_entries == 0) ||
0525         (hw->aq.num_asq_entries == 0) ||
0526         (hw->aq.arq_buf_size == 0) ||
0527         (hw->aq.asq_buf_size == 0)) {
0528         ret_code = IAVF_ERR_CONFIG;
0529         goto init_adminq_exit;
0530     }
0531 
0532     /* Set up register offsets */
0533     iavf_adminq_init_regs(hw);
0534 
0535     /* setup ASQ command write back timeout */
0536     hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
0537 
0538     /* allocate the ASQ */
0539     ret_code = iavf_init_asq(hw);
0540     if (ret_code)
0541         goto init_adminq_destroy_locks;
0542 
0543     /* allocate the ARQ */
0544     ret_code = iavf_init_arq(hw);
0545     if (ret_code)
0546         goto init_adminq_free_asq;
0547 
0548     /* success! */
0549     goto init_adminq_exit;
0550 
0551 init_adminq_free_asq:
0552     iavf_shutdown_asq(hw);
0553 init_adminq_destroy_locks:
0554 
0555 init_adminq_exit:
0556     return ret_code;
0557 }
0558 
0559 /**
0560  *  iavf_shutdown_adminq - shutdown routine for the Admin Queue
0561  *  @hw: pointer to the hardware structure
0562  **/
0563 enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
0564 {
0565     if (iavf_check_asq_alive(hw))
0566         iavf_aq_queue_shutdown(hw, true);
0567 
0568     iavf_shutdown_asq(hw);
0569     iavf_shutdown_arq(hw);
0570 
0571     return 0;
0572 }
0573 
0574 /**
0575  *  iavf_clean_asq - cleans Admin send queue
0576  *  @hw: pointer to the hardware structure
0577  *
0578  *  returns the number of free desc
0579  **/
0580 static u16 iavf_clean_asq(struct iavf_hw *hw)
0581 {
0582     struct iavf_adminq_ring *asq = &hw->aq.asq;
0583     struct iavf_asq_cmd_details *details;
0584     u16 ntc = asq->next_to_clean;
0585     struct iavf_aq_desc desc_cb;
0586     struct iavf_aq_desc *desc;
0587 
0588     desc = IAVF_ADMINQ_DESC(*asq, ntc);
0589     details = IAVF_ADMINQ_DETAILS(*asq, ntc);
0590     while (rd32(hw, hw->aq.asq.head) != ntc) {
0591         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
0592                "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
0593 
0594         if (details->callback) {
0595             IAVF_ADMINQ_CALLBACK cb_func =
0596                     (IAVF_ADMINQ_CALLBACK)details->callback;
0597             desc_cb = *desc;
0598             cb_func(hw, &desc_cb);
0599         }
0600         memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
0601         memset((void *)details, 0,
0602                sizeof(struct iavf_asq_cmd_details));
0603         ntc++;
0604         if (ntc == asq->count)
0605             ntc = 0;
0606         desc = IAVF_ADMINQ_DESC(*asq, ntc);
0607         details = IAVF_ADMINQ_DETAILS(*asq, ntc);
0608     }
0609 
0610     asq->next_to_clean = ntc;
0611 
0612     return IAVF_DESC_UNUSED(asq);
0613 }
0614 
0615 /**
0616  *  iavf_asq_done - check if FW has processed the Admin Send Queue
0617  *  @hw: pointer to the hw struct
0618  *
0619  *  Returns true if the firmware has processed all descriptors on the
0620  *  admin send queue. Returns false if there are still requests pending.
0621  **/
0622 bool iavf_asq_done(struct iavf_hw *hw)
0623 {
0624     /* AQ designers suggest use of head for better
0625      * timing reliability than DD bit
0626      */
0627     return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
0628 }
0629 
0630 /**
0631  *  iavf_asq_send_command - send command to Admin Queue
0632  *  @hw: pointer to the hw struct
0633  *  @desc: prefilled descriptor describing the command (non DMA mem)
0634  *  @buff: buffer to use for indirect commands
0635  *  @buff_size: size of buffer for indirect commands
0636  *  @cmd_details: pointer to command details structure
0637  *
0638  *  This is the main send command driver routine for the Admin Queue send
0639  *  queue.  It runs the queue, cleans the queue, etc
0640  **/
0641 enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
0642                        struct iavf_aq_desc *desc,
0643                        void *buff, /* can be NULL */
0644                        u16  buff_size,
0645                        struct iavf_asq_cmd_details *cmd_details)
0646 {
0647     struct iavf_dma_mem *dma_buff = NULL;
0648     struct iavf_asq_cmd_details *details;
0649     struct iavf_aq_desc *desc_on_ring;
0650     bool cmd_completed = false;
0651     enum iavf_status status = 0;
0652     u16  retval = 0;
0653     u32  val = 0;
0654 
0655     mutex_lock(&hw->aq.asq_mutex);
0656 
0657     if (hw->aq.asq.count == 0) {
0658         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
0659                "AQTX: Admin queue not initialized.\n");
0660         status = IAVF_ERR_QUEUE_EMPTY;
0661         goto asq_send_command_error;
0662     }
0663 
0664     hw->aq.asq_last_status = IAVF_AQ_RC_OK;
0665 
0666     val = rd32(hw, hw->aq.asq.head);
0667     if (val >= hw->aq.num_asq_entries) {
0668         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
0669                "AQTX: head overrun at %d\n", val);
0670         status = IAVF_ERR_QUEUE_EMPTY;
0671         goto asq_send_command_error;
0672     }
0673 
0674     details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
0675     if (cmd_details) {
0676         *details = *cmd_details;
0677 
0678         /* If the cmd_details are defined copy the cookie.  The
0679          * cpu_to_le32 is not needed here because the data is ignored
0680          * by the FW, only used by the driver
0681          */
0682         if (details->cookie) {
0683             desc->cookie_high =
0684                 cpu_to_le32(upper_32_bits(details->cookie));
0685             desc->cookie_low =
0686                 cpu_to_le32(lower_32_bits(details->cookie));
0687         }
0688     } else {
0689         memset(details, 0, sizeof(struct iavf_asq_cmd_details));
0690     }
0691 
0692     /* clear requested flags and then set additional flags if defined */
0693     desc->flags &= ~cpu_to_le16(details->flags_dis);
0694     desc->flags |= cpu_to_le16(details->flags_ena);
0695 
0696     if (buff_size > hw->aq.asq_buf_size) {
0697         iavf_debug(hw,
0698                IAVF_DEBUG_AQ_MESSAGE,
0699                "AQTX: Invalid buffer size: %d.\n",
0700                buff_size);
0701         status = IAVF_ERR_INVALID_SIZE;
0702         goto asq_send_command_error;
0703     }
0704 
0705     if (details->postpone && !details->async) {
0706         iavf_debug(hw,
0707                IAVF_DEBUG_AQ_MESSAGE,
0708                "AQTX: Async flag not set along with postpone flag");
0709         status = IAVF_ERR_PARAM;
0710         goto asq_send_command_error;
0711     }
0712 
0713     /* call clean and check queue available function to reclaim the
0714      * descriptors that were processed by FW, the function returns the
0715      * number of desc available
0716      */
0717     /* the clean function called here could be called in a separate thread
0718      * in case of asynchronous completions
0719      */
0720     if (iavf_clean_asq(hw) == 0) {
0721         iavf_debug(hw,
0722                IAVF_DEBUG_AQ_MESSAGE,
0723                "AQTX: Error queue is full.\n");
0724         status = IAVF_ERR_ADMIN_QUEUE_FULL;
0725         goto asq_send_command_error;
0726     }
0727 
0728     /* initialize the temp desc pointer with the right desc */
0729     desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
0730 
0731     /* if the desc is available copy the temp desc to the right place */
0732     *desc_on_ring = *desc;
0733 
0734     /* if buff is not NULL assume indirect command */
0735     if (buff) {
0736         dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use];
0737         /* copy the user buff into the respective DMA buff */
0738         memcpy(dma_buff->va, buff, buff_size);
0739         desc_on_ring->datalen = cpu_to_le16(buff_size);
0740 
0741         /* Update the address values in the desc with the pa value
0742          * for respective buffer
0743          */
0744         desc_on_ring->params.external.addr_high =
0745                 cpu_to_le32(upper_32_bits(dma_buff->pa));
0746         desc_on_ring->params.external.addr_low =
0747                 cpu_to_le32(lower_32_bits(dma_buff->pa));
0748     }
0749 
0750     /* bump the tail */
0751     iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
0752     iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
0753               buff, buff_size);
0754     (hw->aq.asq.next_to_use)++;
0755     if (hw->aq.asq.next_to_use == hw->aq.asq.count)
0756         hw->aq.asq.next_to_use = 0;
0757     if (!details->postpone)
0758         wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
0759 
0760     /* if cmd_details are not defined or async flag is not set,
0761      * we need to wait for desc write back
0762      */
0763     if (!details->async && !details->postpone) {
0764         u32 total_delay = 0;
0765 
0766         do {
0767             /* AQ designers suggest use of head for better
0768              * timing reliability than DD bit
0769              */
0770             if (iavf_asq_done(hw))
0771                 break;
0772             udelay(50);
0773             total_delay += 50;
0774         } while (total_delay < hw->aq.asq_cmd_timeout);
0775     }
0776 
0777     /* if ready, copy the desc back to temp */
0778     if (iavf_asq_done(hw)) {
0779         *desc = *desc_on_ring;
0780         if (buff)
0781             memcpy(buff, dma_buff->va, buff_size);
0782         retval = le16_to_cpu(desc->retval);
0783         if (retval != 0) {
0784             iavf_debug(hw,
0785                    IAVF_DEBUG_AQ_MESSAGE,
0786                    "AQTX: Command completed with error 0x%X.\n",
0787                    retval);
0788 
0789             /* strip off FW internal code */
0790             retval &= 0xff;
0791         }
0792         cmd_completed = true;
0793         if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
0794             status = 0;
0795         else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
0796             status = IAVF_ERR_NOT_READY;
0797         else
0798             status = IAVF_ERR_ADMIN_QUEUE_ERROR;
0799         hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
0800     }
0801 
0802     iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
0803            "AQTX: desc and buffer writeback:\n");
0804     iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
0805 
0806     /* save writeback aq if requested */
0807     if (details->wb_desc)
0808         *details->wb_desc = *desc_on_ring;
0809 
0810     /* update the error if time out occurred */
0811     if ((!cmd_completed) &&
0812         (!details->async && !details->postpone)) {
0813         if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
0814             iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
0815                    "AQTX: AQ Critical error.\n");
0816             status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
0817         } else {
0818             iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
0819                    "AQTX: Writeback timeout.\n");
0820             status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
0821         }
0822     }
0823 
0824 asq_send_command_error:
0825     mutex_unlock(&hw->aq.asq_mutex);
0826     return status;
0827 }
0828 
0829 /**
0830  *  iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
0831  *  @desc:     pointer to the temp descriptor (non DMA mem)
0832  *  @opcode:   the opcode can be used to decide which flags to turn off or on
0833  *
0834  *  Fill the desc with default values
0835  **/
0836 void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode)
0837 {
0838     /* zero out the desc */
0839     memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
0840     desc->opcode = cpu_to_le16(opcode);
0841     desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI);
0842 }
0843 
0844 /**
0845  *  iavf_clean_arq_element
0846  *  @hw: pointer to the hw struct
0847  *  @e: event info from the receive descriptor, includes any buffers
0848  *  @pending: number of events that could be left to process
0849  *
0850  *  This function cleans one Admin Receive Queue element and returns
0851  *  the contents through e.  It can also return how many events are
0852  *  left to process through 'pending'
0853  **/
0854 enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
0855                     struct iavf_arq_event_info *e,
0856                     u16 *pending)
0857 {
0858     u16 ntc = hw->aq.arq.next_to_clean;
0859     struct iavf_aq_desc *desc;
0860     enum iavf_status ret_code = 0;
0861     struct iavf_dma_mem *bi;
0862     u16 desc_idx;
0863     u16 datalen;
0864     u16 flags;
0865     u16 ntu;
0866 
0867     /* pre-clean the event info */
0868     memset(&e->desc, 0, sizeof(e->desc));
0869 
0870     /* take the lock before we start messing with the ring */
0871     mutex_lock(&hw->aq.arq_mutex);
0872 
0873     if (hw->aq.arq.count == 0) {
0874         iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
0875                "AQRX: Admin queue not initialized.\n");
0876         ret_code = IAVF_ERR_QUEUE_EMPTY;
0877         goto clean_arq_element_err;
0878     }
0879 
0880     /* set next_to_use to head */
0881     ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
0882     if (ntu == ntc) {
0883         /* nothing to do - shouldn't need to update ring's values */
0884         ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
0885         goto clean_arq_element_out;
0886     }
0887 
0888     /* now clean the next descriptor */
0889     desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
0890     desc_idx = ntc;
0891 
0892     hw->aq.arq_last_status =
0893         (enum iavf_admin_queue_err)le16_to_cpu(desc->retval);
0894     flags = le16_to_cpu(desc->flags);
0895     if (flags & IAVF_AQ_FLAG_ERR) {
0896         ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
0897         iavf_debug(hw,
0898                IAVF_DEBUG_AQ_MESSAGE,
0899                "AQRX: Event received with error 0x%X.\n",
0900                hw->aq.arq_last_status);
0901     }
0902 
0903     e->desc = *desc;
0904     datalen = le16_to_cpu(desc->datalen);
0905     e->msg_len = min(datalen, e->buf_len);
0906     if (e->msg_buf && (e->msg_len != 0))
0907         memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
0908                e->msg_len);
0909 
0910     iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
0911     iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
0912               hw->aq.arq_buf_size);
0913 
0914     /* Restore the original datalen and buffer address in the desc,
0915      * FW updates datalen to indicate the event message
0916      * size
0917      */
0918     bi = &hw->aq.arq.r.arq_bi[ntc];
0919     memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
0920 
0921     desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
0922     if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
0923         desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
0924     desc->datalen = cpu_to_le16((u16)bi->size);
0925     desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
0926     desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
0927 
0928     /* set tail = the last cleaned desc index. */
0929     wr32(hw, hw->aq.arq.tail, ntc);
0930     /* ntc is updated to tail + 1 */
0931     ntc++;
0932     if (ntc == hw->aq.num_arq_entries)
0933         ntc = 0;
0934     hw->aq.arq.next_to_clean = ntc;
0935     hw->aq.arq.next_to_use = ntu;
0936 
0937 clean_arq_element_out:
0938     /* Set pending if needed, unlock and return */
0939     if (pending)
0940         *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
0941 
0942 clean_arq_element_err:
0943     mutex_unlock(&hw->aq.arq_mutex);
0944 
0945     return ret_code;
0946 }