0001
0002
0003
0004 #include "iavf_status.h"
0005 #include "iavf_type.h"
0006 #include "iavf_register.h"
0007 #include "iavf_adminq.h"
0008 #include "iavf_prototype.h"
0009
0010
0011
0012
0013
0014
0015
0016 static void iavf_adminq_init_regs(struct iavf_hw *hw)
0017 {
0018
0019 hw->aq.asq.tail = IAVF_VF_ATQT1;
0020 hw->aq.asq.head = IAVF_VF_ATQH1;
0021 hw->aq.asq.len = IAVF_VF_ATQLEN1;
0022 hw->aq.asq.bal = IAVF_VF_ATQBAL1;
0023 hw->aq.asq.bah = IAVF_VF_ATQBAH1;
0024 hw->aq.arq.tail = IAVF_VF_ARQT1;
0025 hw->aq.arq.head = IAVF_VF_ARQH1;
0026 hw->aq.arq.len = IAVF_VF_ARQLEN1;
0027 hw->aq.arq.bal = IAVF_VF_ARQBAL1;
0028 hw->aq.arq.bah = IAVF_VF_ARQBAH1;
0029 }
0030
0031
0032
0033
0034
0035 static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
0036 {
0037 enum iavf_status ret_code;
0038
0039 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
0040 iavf_mem_atq_ring,
0041 (hw->aq.num_asq_entries *
0042 sizeof(struct iavf_aq_desc)),
0043 IAVF_ADMINQ_DESC_ALIGNMENT);
0044 if (ret_code)
0045 return ret_code;
0046
0047 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
0048 (hw->aq.num_asq_entries *
0049 sizeof(struct iavf_asq_cmd_details)));
0050 if (ret_code) {
0051 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
0052 return ret_code;
0053 }
0054
0055 return ret_code;
0056 }
0057
0058
0059
0060
0061
0062 static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
0063 {
0064 enum iavf_status ret_code;
0065
0066 ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
0067 iavf_mem_arq_ring,
0068 (hw->aq.num_arq_entries *
0069 sizeof(struct iavf_aq_desc)),
0070 IAVF_ADMINQ_DESC_ALIGNMENT);
0071
0072 return ret_code;
0073 }
0074
0075
0076
0077
0078
0079
0080
0081
0082 static void iavf_free_adminq_asq(struct iavf_hw *hw)
0083 {
0084 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
0085 }
0086
0087
0088
0089
0090
0091
0092
0093
0094 static void iavf_free_adminq_arq(struct iavf_hw *hw)
0095 {
0096 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
0097 }
0098
0099
0100
0101
0102
0103 static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
0104 {
0105 struct iavf_aq_desc *desc;
0106 struct iavf_dma_mem *bi;
0107 enum iavf_status ret_code;
0108 int i;
0109
0110
0111
0112
0113
0114
0115 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
0116 (hw->aq.num_arq_entries *
0117 sizeof(struct iavf_dma_mem)));
0118 if (ret_code)
0119 goto alloc_arq_bufs;
0120 hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
0121
0122
0123 for (i = 0; i < hw->aq.num_arq_entries; i++) {
0124 bi = &hw->aq.arq.r.arq_bi[i];
0125 ret_code = iavf_allocate_dma_mem(hw, bi,
0126 iavf_mem_arq_buf,
0127 hw->aq.arq_buf_size,
0128 IAVF_ADMINQ_DESC_ALIGNMENT);
0129 if (ret_code)
0130 goto unwind_alloc_arq_bufs;
0131
0132
0133 desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
0134
0135 desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
0136 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
0137 desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
0138 desc->opcode = 0;
0139
0140
0141
0142 desc->datalen = cpu_to_le16((u16)bi->size);
0143 desc->retval = 0;
0144 desc->cookie_high = 0;
0145 desc->cookie_low = 0;
0146 desc->params.external.addr_high =
0147 cpu_to_le32(upper_32_bits(bi->pa));
0148 desc->params.external.addr_low =
0149 cpu_to_le32(lower_32_bits(bi->pa));
0150 desc->params.external.param0 = 0;
0151 desc->params.external.param1 = 0;
0152 }
0153
0154 alloc_arq_bufs:
0155 return ret_code;
0156
0157 unwind_alloc_arq_bufs:
0158
0159 i--;
0160 for (; i >= 0; i--)
0161 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
0162 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
0163
0164 return ret_code;
0165 }
0166
0167
0168
0169
0170
0171 static enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
0172 {
0173 struct iavf_dma_mem *bi;
0174 enum iavf_status ret_code;
0175 int i;
0176
0177
0178 ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
0179 (hw->aq.num_asq_entries *
0180 sizeof(struct iavf_dma_mem)));
0181 if (ret_code)
0182 goto alloc_asq_bufs;
0183 hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
0184
0185
0186 for (i = 0; i < hw->aq.num_asq_entries; i++) {
0187 bi = &hw->aq.asq.r.asq_bi[i];
0188 ret_code = iavf_allocate_dma_mem(hw, bi,
0189 iavf_mem_asq_buf,
0190 hw->aq.asq_buf_size,
0191 IAVF_ADMINQ_DESC_ALIGNMENT);
0192 if (ret_code)
0193 goto unwind_alloc_asq_bufs;
0194 }
0195 alloc_asq_bufs:
0196 return ret_code;
0197
0198 unwind_alloc_asq_bufs:
0199
0200 i--;
0201 for (; i >= 0; i--)
0202 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
0203 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
0204
0205 return ret_code;
0206 }
0207
0208
0209
0210
0211
0212 static void iavf_free_arq_bufs(struct iavf_hw *hw)
0213 {
0214 int i;
0215
0216
0217 for (i = 0; i < hw->aq.num_arq_entries; i++)
0218 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
0219
0220
0221 iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
0222
0223
0224 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
0225 }
0226
0227
0228
0229
0230
0231 static void iavf_free_asq_bufs(struct iavf_hw *hw)
0232 {
0233 int i;
0234
0235
0236 for (i = 0; i < hw->aq.num_asq_entries; i++)
0237 if (hw->aq.asq.r.asq_bi[i].pa)
0238 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
0239
0240
0241 iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
0242
0243
0244 iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
0245
0246
0247 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
0248 }
0249
0250
0251
0252
0253
0254
0255
0256 static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
0257 {
0258 enum iavf_status ret_code = 0;
0259 u32 reg = 0;
0260
0261
0262 wr32(hw, hw->aq.asq.head, 0);
0263 wr32(hw, hw->aq.asq.tail, 0);
0264
0265
0266 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
0267 IAVF_VF_ATQLEN1_ATQENABLE_MASK));
0268 wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
0269 wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
0270
0271
0272 reg = rd32(hw, hw->aq.asq.bal);
0273 if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
0274 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
0275
0276 return ret_code;
0277 }
0278
0279
0280
0281
0282
0283
0284
0285 static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
0286 {
0287 enum iavf_status ret_code = 0;
0288 u32 reg = 0;
0289
0290
0291 wr32(hw, hw->aq.arq.head, 0);
0292 wr32(hw, hw->aq.arq.tail, 0);
0293
0294
0295 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
0296 IAVF_VF_ARQLEN1_ARQENABLE_MASK));
0297 wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
0298 wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
0299
0300
0301 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
0302
0303
0304 reg = rd32(hw, hw->aq.arq.bal);
0305 if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
0306 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
0307
0308 return ret_code;
0309 }
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324 static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
0325 {
0326 enum iavf_status ret_code = 0;
0327 int i;
0328
0329 if (hw->aq.asq.count > 0) {
0330
0331 ret_code = IAVF_ERR_NOT_READY;
0332 goto init_adminq_exit;
0333 }
0334
0335
0336 if ((hw->aq.num_asq_entries == 0) ||
0337 (hw->aq.asq_buf_size == 0)) {
0338 ret_code = IAVF_ERR_CONFIG;
0339 goto init_adminq_exit;
0340 }
0341
0342 hw->aq.asq.next_to_use = 0;
0343 hw->aq.asq.next_to_clean = 0;
0344
0345
0346 ret_code = iavf_alloc_adminq_asq_ring(hw);
0347 if (ret_code)
0348 goto init_adminq_exit;
0349
0350
0351 ret_code = iavf_alloc_asq_bufs(hw);
0352 if (ret_code)
0353 goto init_adminq_free_rings;
0354
0355
0356 ret_code = iavf_config_asq_regs(hw);
0357 if (ret_code)
0358 goto init_free_asq_bufs;
0359
0360
0361 hw->aq.asq.count = hw->aq.num_asq_entries;
0362 goto init_adminq_exit;
0363
0364 init_free_asq_bufs:
0365 for (i = 0; i < hw->aq.num_asq_entries; i++)
0366 iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
0367 iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
0368
0369 init_adminq_free_rings:
0370 iavf_free_adminq_asq(hw);
0371
0372 init_adminq_exit:
0373 return ret_code;
0374 }
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389 static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
0390 {
0391 enum iavf_status ret_code = 0;
0392 int i;
0393
0394 if (hw->aq.arq.count > 0) {
0395
0396 ret_code = IAVF_ERR_NOT_READY;
0397 goto init_adminq_exit;
0398 }
0399
0400
0401 if ((hw->aq.num_arq_entries == 0) ||
0402 (hw->aq.arq_buf_size == 0)) {
0403 ret_code = IAVF_ERR_CONFIG;
0404 goto init_adminq_exit;
0405 }
0406
0407 hw->aq.arq.next_to_use = 0;
0408 hw->aq.arq.next_to_clean = 0;
0409
0410
0411 ret_code = iavf_alloc_adminq_arq_ring(hw);
0412 if (ret_code)
0413 goto init_adminq_exit;
0414
0415
0416 ret_code = iavf_alloc_arq_bufs(hw);
0417 if (ret_code)
0418 goto init_adminq_free_rings;
0419
0420
0421 ret_code = iavf_config_arq_regs(hw);
0422 if (ret_code)
0423 goto init_free_arq_bufs;
0424
0425
0426 hw->aq.arq.count = hw->aq.num_arq_entries;
0427 goto init_adminq_exit;
0428
0429 init_free_arq_bufs:
0430 for (i = 0; i < hw->aq.num_arq_entries; i++)
0431 iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
0432 iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
0433 init_adminq_free_rings:
0434 iavf_free_adminq_arq(hw);
0435
0436 init_adminq_exit:
0437 return ret_code;
0438 }
0439
0440
0441
0442
0443
0444
0445
0446 static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
0447 {
0448 enum iavf_status ret_code = 0;
0449
0450 mutex_lock(&hw->aq.asq_mutex);
0451
0452 if (hw->aq.asq.count == 0) {
0453 ret_code = IAVF_ERR_NOT_READY;
0454 goto shutdown_asq_out;
0455 }
0456
0457
0458 wr32(hw, hw->aq.asq.head, 0);
0459 wr32(hw, hw->aq.asq.tail, 0);
0460 wr32(hw, hw->aq.asq.len, 0);
0461 wr32(hw, hw->aq.asq.bal, 0);
0462 wr32(hw, hw->aq.asq.bah, 0);
0463
0464 hw->aq.asq.count = 0;
0465
0466
0467 iavf_free_asq_bufs(hw);
0468
0469 shutdown_asq_out:
0470 mutex_unlock(&hw->aq.asq_mutex);
0471 return ret_code;
0472 }
0473
0474
0475
0476
0477
0478
0479
0480 static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
0481 {
0482 enum iavf_status ret_code = 0;
0483
0484 mutex_lock(&hw->aq.arq_mutex);
0485
0486 if (hw->aq.arq.count == 0) {
0487 ret_code = IAVF_ERR_NOT_READY;
0488 goto shutdown_arq_out;
0489 }
0490
0491
0492 wr32(hw, hw->aq.arq.head, 0);
0493 wr32(hw, hw->aq.arq.tail, 0);
0494 wr32(hw, hw->aq.arq.len, 0);
0495 wr32(hw, hw->aq.arq.bal, 0);
0496 wr32(hw, hw->aq.arq.bah, 0);
0497
0498 hw->aq.arq.count = 0;
0499
0500
0501 iavf_free_arq_bufs(hw);
0502
0503 shutdown_arq_out:
0504 mutex_unlock(&hw->aq.arq_mutex);
0505 return ret_code;
0506 }
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518
0519 enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
0520 {
0521 enum iavf_status ret_code;
0522
0523
0524 if ((hw->aq.num_arq_entries == 0) ||
0525 (hw->aq.num_asq_entries == 0) ||
0526 (hw->aq.arq_buf_size == 0) ||
0527 (hw->aq.asq_buf_size == 0)) {
0528 ret_code = IAVF_ERR_CONFIG;
0529 goto init_adminq_exit;
0530 }
0531
0532
0533 iavf_adminq_init_regs(hw);
0534
0535
0536 hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
0537
0538
0539 ret_code = iavf_init_asq(hw);
0540 if (ret_code)
0541 goto init_adminq_destroy_locks;
0542
0543
0544 ret_code = iavf_init_arq(hw);
0545 if (ret_code)
0546 goto init_adminq_free_asq;
0547
0548
0549 goto init_adminq_exit;
0550
0551 init_adminq_free_asq:
0552 iavf_shutdown_asq(hw);
0553 init_adminq_destroy_locks:
0554
0555 init_adminq_exit:
0556 return ret_code;
0557 }
0558
0559
0560
0561
0562
0563 enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
0564 {
0565 if (iavf_check_asq_alive(hw))
0566 iavf_aq_queue_shutdown(hw, true);
0567
0568 iavf_shutdown_asq(hw);
0569 iavf_shutdown_arq(hw);
0570
0571 return 0;
0572 }
0573
0574
0575
0576
0577
0578
0579
0580 static u16 iavf_clean_asq(struct iavf_hw *hw)
0581 {
0582 struct iavf_adminq_ring *asq = &hw->aq.asq;
0583 struct iavf_asq_cmd_details *details;
0584 u16 ntc = asq->next_to_clean;
0585 struct iavf_aq_desc desc_cb;
0586 struct iavf_aq_desc *desc;
0587
0588 desc = IAVF_ADMINQ_DESC(*asq, ntc);
0589 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
0590 while (rd32(hw, hw->aq.asq.head) != ntc) {
0591 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
0592 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
0593
0594 if (details->callback) {
0595 IAVF_ADMINQ_CALLBACK cb_func =
0596 (IAVF_ADMINQ_CALLBACK)details->callback;
0597 desc_cb = *desc;
0598 cb_func(hw, &desc_cb);
0599 }
0600 memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
0601 memset((void *)details, 0,
0602 sizeof(struct iavf_asq_cmd_details));
0603 ntc++;
0604 if (ntc == asq->count)
0605 ntc = 0;
0606 desc = IAVF_ADMINQ_DESC(*asq, ntc);
0607 details = IAVF_ADMINQ_DETAILS(*asq, ntc);
0608 }
0609
0610 asq->next_to_clean = ntc;
0611
0612 return IAVF_DESC_UNUSED(asq);
0613 }
0614
0615
0616
0617
0618
0619
0620
0621
0622 bool iavf_asq_done(struct iavf_hw *hw)
0623 {
0624
0625
0626
0627 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
0628 }
0629
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641 enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
0642 struct iavf_aq_desc *desc,
0643 void *buff,
0644 u16 buff_size,
0645 struct iavf_asq_cmd_details *cmd_details)
0646 {
0647 struct iavf_dma_mem *dma_buff = NULL;
0648 struct iavf_asq_cmd_details *details;
0649 struct iavf_aq_desc *desc_on_ring;
0650 bool cmd_completed = false;
0651 enum iavf_status status = 0;
0652 u16 retval = 0;
0653 u32 val = 0;
0654
0655 mutex_lock(&hw->aq.asq_mutex);
0656
0657 if (hw->aq.asq.count == 0) {
0658 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
0659 "AQTX: Admin queue not initialized.\n");
0660 status = IAVF_ERR_QUEUE_EMPTY;
0661 goto asq_send_command_error;
0662 }
0663
0664 hw->aq.asq_last_status = IAVF_AQ_RC_OK;
0665
0666 val = rd32(hw, hw->aq.asq.head);
0667 if (val >= hw->aq.num_asq_entries) {
0668 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
0669 "AQTX: head overrun at %d\n", val);
0670 status = IAVF_ERR_QUEUE_EMPTY;
0671 goto asq_send_command_error;
0672 }
0673
0674 details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
0675 if (cmd_details) {
0676 *details = *cmd_details;
0677
0678
0679
0680
0681
0682 if (details->cookie) {
0683 desc->cookie_high =
0684 cpu_to_le32(upper_32_bits(details->cookie));
0685 desc->cookie_low =
0686 cpu_to_le32(lower_32_bits(details->cookie));
0687 }
0688 } else {
0689 memset(details, 0, sizeof(struct iavf_asq_cmd_details));
0690 }
0691
0692
0693 desc->flags &= ~cpu_to_le16(details->flags_dis);
0694 desc->flags |= cpu_to_le16(details->flags_ena);
0695
0696 if (buff_size > hw->aq.asq_buf_size) {
0697 iavf_debug(hw,
0698 IAVF_DEBUG_AQ_MESSAGE,
0699 "AQTX: Invalid buffer size: %d.\n",
0700 buff_size);
0701 status = IAVF_ERR_INVALID_SIZE;
0702 goto asq_send_command_error;
0703 }
0704
0705 if (details->postpone && !details->async) {
0706 iavf_debug(hw,
0707 IAVF_DEBUG_AQ_MESSAGE,
0708 "AQTX: Async flag not set along with postpone flag");
0709 status = IAVF_ERR_PARAM;
0710 goto asq_send_command_error;
0711 }
0712
0713
0714
0715
0716
0717
0718
0719
0720 if (iavf_clean_asq(hw) == 0) {
0721 iavf_debug(hw,
0722 IAVF_DEBUG_AQ_MESSAGE,
0723 "AQTX: Error queue is full.\n");
0724 status = IAVF_ERR_ADMIN_QUEUE_FULL;
0725 goto asq_send_command_error;
0726 }
0727
0728
0729 desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
0730
0731
0732 *desc_on_ring = *desc;
0733
0734
0735 if (buff) {
0736 dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use];
0737
0738 memcpy(dma_buff->va, buff, buff_size);
0739 desc_on_ring->datalen = cpu_to_le16(buff_size);
0740
0741
0742
0743
0744 desc_on_ring->params.external.addr_high =
0745 cpu_to_le32(upper_32_bits(dma_buff->pa));
0746 desc_on_ring->params.external.addr_low =
0747 cpu_to_le32(lower_32_bits(dma_buff->pa));
0748 }
0749
0750
0751 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
0752 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
0753 buff, buff_size);
0754 (hw->aq.asq.next_to_use)++;
0755 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
0756 hw->aq.asq.next_to_use = 0;
0757 if (!details->postpone)
0758 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
0759
0760
0761
0762
0763 if (!details->async && !details->postpone) {
0764 u32 total_delay = 0;
0765
0766 do {
0767
0768
0769
0770 if (iavf_asq_done(hw))
0771 break;
0772 udelay(50);
0773 total_delay += 50;
0774 } while (total_delay < hw->aq.asq_cmd_timeout);
0775 }
0776
0777
0778 if (iavf_asq_done(hw)) {
0779 *desc = *desc_on_ring;
0780 if (buff)
0781 memcpy(buff, dma_buff->va, buff_size);
0782 retval = le16_to_cpu(desc->retval);
0783 if (retval != 0) {
0784 iavf_debug(hw,
0785 IAVF_DEBUG_AQ_MESSAGE,
0786 "AQTX: Command completed with error 0x%X.\n",
0787 retval);
0788
0789
0790 retval &= 0xff;
0791 }
0792 cmd_completed = true;
0793 if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
0794 status = 0;
0795 else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
0796 status = IAVF_ERR_NOT_READY;
0797 else
0798 status = IAVF_ERR_ADMIN_QUEUE_ERROR;
0799 hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
0800 }
0801
0802 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
0803 "AQTX: desc and buffer writeback:\n");
0804 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
0805
0806
0807 if (details->wb_desc)
0808 *details->wb_desc = *desc_on_ring;
0809
0810
0811 if ((!cmd_completed) &&
0812 (!details->async && !details->postpone)) {
0813 if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
0814 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
0815 "AQTX: AQ Critical error.\n");
0816 status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
0817 } else {
0818 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
0819 "AQTX: Writeback timeout.\n");
0820 status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
0821 }
0822 }
0823
0824 asq_send_command_error:
0825 mutex_unlock(&hw->aq.asq_mutex);
0826 return status;
0827 }
0828
0829
0830
0831
0832
0833
0834
0835
0836 void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode)
0837 {
0838
0839 memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
0840 desc->opcode = cpu_to_le16(opcode);
0841 desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI);
0842 }
0843
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854 enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
0855 struct iavf_arq_event_info *e,
0856 u16 *pending)
0857 {
0858 u16 ntc = hw->aq.arq.next_to_clean;
0859 struct iavf_aq_desc *desc;
0860 enum iavf_status ret_code = 0;
0861 struct iavf_dma_mem *bi;
0862 u16 desc_idx;
0863 u16 datalen;
0864 u16 flags;
0865 u16 ntu;
0866
0867
0868 memset(&e->desc, 0, sizeof(e->desc));
0869
0870
0871 mutex_lock(&hw->aq.arq_mutex);
0872
0873 if (hw->aq.arq.count == 0) {
0874 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
0875 "AQRX: Admin queue not initialized.\n");
0876 ret_code = IAVF_ERR_QUEUE_EMPTY;
0877 goto clean_arq_element_err;
0878 }
0879
0880
0881 ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
0882 if (ntu == ntc) {
0883
0884 ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
0885 goto clean_arq_element_out;
0886 }
0887
0888
0889 desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
0890 desc_idx = ntc;
0891
0892 hw->aq.arq_last_status =
0893 (enum iavf_admin_queue_err)le16_to_cpu(desc->retval);
0894 flags = le16_to_cpu(desc->flags);
0895 if (flags & IAVF_AQ_FLAG_ERR) {
0896 ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
0897 iavf_debug(hw,
0898 IAVF_DEBUG_AQ_MESSAGE,
0899 "AQRX: Event received with error 0x%X.\n",
0900 hw->aq.arq_last_status);
0901 }
0902
0903 e->desc = *desc;
0904 datalen = le16_to_cpu(desc->datalen);
0905 e->msg_len = min(datalen, e->buf_len);
0906 if (e->msg_buf && (e->msg_len != 0))
0907 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
0908 e->msg_len);
0909
0910 iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
0911 iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
0912 hw->aq.arq_buf_size);
0913
0914
0915
0916
0917
0918 bi = &hw->aq.arq.r.arq_bi[ntc];
0919 memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
0920
0921 desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
0922 if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
0923 desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
0924 desc->datalen = cpu_to_le16((u16)bi->size);
0925 desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
0926 desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
0927
0928
0929 wr32(hw, hw->aq.arq.tail, ntc);
0930
0931 ntc++;
0932 if (ntc == hw->aq.num_arq_entries)
0933 ntc = 0;
0934 hw->aq.arq.next_to_clean = ntc;
0935 hw->aq.arq.next_to_use = ntu;
0936
0937 clean_arq_element_out:
0938
0939 if (pending)
0940 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
0941
0942 clean_arq_element_err:
0943 mutex_unlock(&hw->aq.arq_mutex);
0944
0945 return ret_code;
0946 }