0001
0002
0003
0004 #include "i40e_status.h"
0005 #include "i40e_type.h"
0006 #include "i40e_register.h"
0007 #include "i40e_adminq.h"
0008 #include "i40e_prototype.h"
0009
0010 static void i40e_resume_aq(struct i40e_hw *hw);
0011
0012
0013
0014
0015
0016
0017
0018 static void i40e_adminq_init_regs(struct i40e_hw *hw)
0019 {
0020
0021 if (i40e_is_vf(hw)) {
0022 hw->aq.asq.tail = I40E_VF_ATQT1;
0023 hw->aq.asq.head = I40E_VF_ATQH1;
0024 hw->aq.asq.len = I40E_VF_ATQLEN1;
0025 hw->aq.asq.bal = I40E_VF_ATQBAL1;
0026 hw->aq.asq.bah = I40E_VF_ATQBAH1;
0027 hw->aq.arq.tail = I40E_VF_ARQT1;
0028 hw->aq.arq.head = I40E_VF_ARQH1;
0029 hw->aq.arq.len = I40E_VF_ARQLEN1;
0030 hw->aq.arq.bal = I40E_VF_ARQBAL1;
0031 hw->aq.arq.bah = I40E_VF_ARQBAH1;
0032 } else {
0033 hw->aq.asq.tail = I40E_PF_ATQT;
0034 hw->aq.asq.head = I40E_PF_ATQH;
0035 hw->aq.asq.len = I40E_PF_ATQLEN;
0036 hw->aq.asq.bal = I40E_PF_ATQBAL;
0037 hw->aq.asq.bah = I40E_PF_ATQBAH;
0038 hw->aq.arq.tail = I40E_PF_ARQT;
0039 hw->aq.arq.head = I40E_PF_ARQH;
0040 hw->aq.arq.len = I40E_PF_ARQLEN;
0041 hw->aq.arq.bal = I40E_PF_ARQBAL;
0042 hw->aq.arq.bah = I40E_PF_ARQBAH;
0043 }
0044 }
0045
0046
0047
0048
0049
0050 static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
0051 {
0052 i40e_status ret_code;
0053
0054 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
0055 i40e_mem_atq_ring,
0056 (hw->aq.num_asq_entries *
0057 sizeof(struct i40e_aq_desc)),
0058 I40E_ADMINQ_DESC_ALIGNMENT);
0059 if (ret_code)
0060 return ret_code;
0061
0062 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
0063 (hw->aq.num_asq_entries *
0064 sizeof(struct i40e_asq_cmd_details)));
0065 if (ret_code) {
0066 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
0067 return ret_code;
0068 }
0069
0070 return ret_code;
0071 }
0072
0073
0074
0075
0076
0077 static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
0078 {
0079 i40e_status ret_code;
0080
0081 ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
0082 i40e_mem_arq_ring,
0083 (hw->aq.num_arq_entries *
0084 sizeof(struct i40e_aq_desc)),
0085 I40E_ADMINQ_DESC_ALIGNMENT);
0086
0087 return ret_code;
0088 }
0089
0090
0091
0092
0093
0094
0095
0096
0097 static void i40e_free_adminq_asq(struct i40e_hw *hw)
0098 {
0099 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
0100 }
0101
0102
0103
0104
0105
0106
0107
0108
0109 static void i40e_free_adminq_arq(struct i40e_hw *hw)
0110 {
0111 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
0112 }
0113
0114
0115
0116
0117
0118 static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
0119 {
0120 i40e_status ret_code;
0121 struct i40e_aq_desc *desc;
0122 struct i40e_dma_mem *bi;
0123 int i;
0124
0125
0126
0127
0128
0129
0130 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
0131 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
0132 if (ret_code)
0133 goto alloc_arq_bufs;
0134 hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
0135
0136
0137 for (i = 0; i < hw->aq.num_arq_entries; i++) {
0138 bi = &hw->aq.arq.r.arq_bi[i];
0139 ret_code = i40e_allocate_dma_mem(hw, bi,
0140 i40e_mem_arq_buf,
0141 hw->aq.arq_buf_size,
0142 I40E_ADMINQ_DESC_ALIGNMENT);
0143 if (ret_code)
0144 goto unwind_alloc_arq_bufs;
0145
0146
0147 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
0148
0149 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
0150 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
0151 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
0152 desc->opcode = 0;
0153
0154
0155
0156 desc->datalen = cpu_to_le16((u16)bi->size);
0157 desc->retval = 0;
0158 desc->cookie_high = 0;
0159 desc->cookie_low = 0;
0160 desc->params.external.addr_high =
0161 cpu_to_le32(upper_32_bits(bi->pa));
0162 desc->params.external.addr_low =
0163 cpu_to_le32(lower_32_bits(bi->pa));
0164 desc->params.external.param0 = 0;
0165 desc->params.external.param1 = 0;
0166 }
0167
0168 alloc_arq_bufs:
0169 return ret_code;
0170
0171 unwind_alloc_arq_bufs:
0172
0173 i--;
0174 for (; i >= 0; i--)
0175 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
0176 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
0177
0178 return ret_code;
0179 }
0180
0181
0182
0183
0184
0185 static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
0186 {
0187 i40e_status ret_code;
0188 struct i40e_dma_mem *bi;
0189 int i;
0190
0191
0192 ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
0193 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
0194 if (ret_code)
0195 goto alloc_asq_bufs;
0196 hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
0197
0198
0199 for (i = 0; i < hw->aq.num_asq_entries; i++) {
0200 bi = &hw->aq.asq.r.asq_bi[i];
0201 ret_code = i40e_allocate_dma_mem(hw, bi,
0202 i40e_mem_asq_buf,
0203 hw->aq.asq_buf_size,
0204 I40E_ADMINQ_DESC_ALIGNMENT);
0205 if (ret_code)
0206 goto unwind_alloc_asq_bufs;
0207 }
0208 alloc_asq_bufs:
0209 return ret_code;
0210
0211 unwind_alloc_asq_bufs:
0212
0213 i--;
0214 for (; i >= 0; i--)
0215 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
0216 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
0217
0218 return ret_code;
0219 }
0220
0221
0222
0223
0224
0225 static void i40e_free_arq_bufs(struct i40e_hw *hw)
0226 {
0227 int i;
0228
0229
0230 for (i = 0; i < hw->aq.num_arq_entries; i++)
0231 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
0232
0233
0234 i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
0235
0236
0237 i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
0238 }
0239
0240
0241
0242
0243
0244 static void i40e_free_asq_bufs(struct i40e_hw *hw)
0245 {
0246 int i;
0247
0248
0249 for (i = 0; i < hw->aq.num_asq_entries; i++)
0250 if (hw->aq.asq.r.asq_bi[i].pa)
0251 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
0252
0253
0254 i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
0255
0256
0257 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
0258
0259
0260 i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
0261 }
0262
0263
0264
0265
0266
0267
0268
0269 static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
0270 {
0271 i40e_status ret_code = 0;
0272 u32 reg = 0;
0273
0274
0275 wr32(hw, hw->aq.asq.head, 0);
0276 wr32(hw, hw->aq.asq.tail, 0);
0277
0278
0279 wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
0280 I40E_PF_ATQLEN_ATQENABLE_MASK));
0281 wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
0282 wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
0283
0284
0285 reg = rd32(hw, hw->aq.asq.bal);
0286 if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
0287 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
0288
0289 return ret_code;
0290 }
0291
0292
0293
0294
0295
0296
0297
0298 static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
0299 {
0300 i40e_status ret_code = 0;
0301 u32 reg = 0;
0302
0303
0304 wr32(hw, hw->aq.arq.head, 0);
0305 wr32(hw, hw->aq.arq.tail, 0);
0306
0307
0308 wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
0309 I40E_PF_ARQLEN_ARQENABLE_MASK));
0310 wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
0311 wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
0312
0313
0314 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
0315
0316
0317 reg = rd32(hw, hw->aq.arq.bal);
0318 if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
0319 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
0320
0321 return ret_code;
0322 }
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336
0337 static i40e_status i40e_init_asq(struct i40e_hw *hw)
0338 {
0339 i40e_status ret_code = 0;
0340
0341 if (hw->aq.asq.count > 0) {
0342
0343 ret_code = I40E_ERR_NOT_READY;
0344 goto init_adminq_exit;
0345 }
0346
0347
0348 if ((hw->aq.num_asq_entries == 0) ||
0349 (hw->aq.asq_buf_size == 0)) {
0350 ret_code = I40E_ERR_CONFIG;
0351 goto init_adminq_exit;
0352 }
0353
0354 hw->aq.asq.next_to_use = 0;
0355 hw->aq.asq.next_to_clean = 0;
0356
0357
0358 ret_code = i40e_alloc_adminq_asq_ring(hw);
0359 if (ret_code)
0360 goto init_adminq_exit;
0361
0362
0363 ret_code = i40e_alloc_asq_bufs(hw);
0364 if (ret_code)
0365 goto init_adminq_free_rings;
0366
0367
0368 ret_code = i40e_config_asq_regs(hw);
0369 if (ret_code)
0370 goto init_adminq_free_rings;
0371
0372
0373 hw->aq.asq.count = hw->aq.num_asq_entries;
0374 goto init_adminq_exit;
0375
0376 init_adminq_free_rings:
0377 i40e_free_adminq_asq(hw);
0378
0379 init_adminq_exit:
0380 return ret_code;
0381 }
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395
0396 static i40e_status i40e_init_arq(struct i40e_hw *hw)
0397 {
0398 i40e_status ret_code = 0;
0399
0400 if (hw->aq.arq.count > 0) {
0401
0402 ret_code = I40E_ERR_NOT_READY;
0403 goto init_adminq_exit;
0404 }
0405
0406
0407 if ((hw->aq.num_arq_entries == 0) ||
0408 (hw->aq.arq_buf_size == 0)) {
0409 ret_code = I40E_ERR_CONFIG;
0410 goto init_adminq_exit;
0411 }
0412
0413 hw->aq.arq.next_to_use = 0;
0414 hw->aq.arq.next_to_clean = 0;
0415
0416
0417 ret_code = i40e_alloc_adminq_arq_ring(hw);
0418 if (ret_code)
0419 goto init_adminq_exit;
0420
0421
0422 ret_code = i40e_alloc_arq_bufs(hw);
0423 if (ret_code)
0424 goto init_adminq_free_rings;
0425
0426
0427 ret_code = i40e_config_arq_regs(hw);
0428 if (ret_code)
0429 goto init_adminq_free_rings;
0430
0431
0432 hw->aq.arq.count = hw->aq.num_arq_entries;
0433 goto init_adminq_exit;
0434
0435 init_adminq_free_rings:
0436 i40e_free_adminq_arq(hw);
0437
0438 init_adminq_exit:
0439 return ret_code;
0440 }
0441
0442
0443
0444
0445
0446
0447
0448 static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
0449 {
0450 i40e_status ret_code = 0;
0451
0452 mutex_lock(&hw->aq.asq_mutex);
0453
0454 if (hw->aq.asq.count == 0) {
0455 ret_code = I40E_ERR_NOT_READY;
0456 goto shutdown_asq_out;
0457 }
0458
0459
0460 wr32(hw, hw->aq.asq.head, 0);
0461 wr32(hw, hw->aq.asq.tail, 0);
0462 wr32(hw, hw->aq.asq.len, 0);
0463 wr32(hw, hw->aq.asq.bal, 0);
0464 wr32(hw, hw->aq.asq.bah, 0);
0465
0466 hw->aq.asq.count = 0;
0467
0468
0469 i40e_free_asq_bufs(hw);
0470
0471 shutdown_asq_out:
0472 mutex_unlock(&hw->aq.asq_mutex);
0473 return ret_code;
0474 }
0475
0476
0477
0478
0479
0480
0481
0482 static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
0483 {
0484 i40e_status ret_code = 0;
0485
0486 mutex_lock(&hw->aq.arq_mutex);
0487
0488 if (hw->aq.arq.count == 0) {
0489 ret_code = I40E_ERR_NOT_READY;
0490 goto shutdown_arq_out;
0491 }
0492
0493
0494 wr32(hw, hw->aq.arq.head, 0);
0495 wr32(hw, hw->aq.arq.tail, 0);
0496 wr32(hw, hw->aq.arq.len, 0);
0497 wr32(hw, hw->aq.arq.bal, 0);
0498 wr32(hw, hw->aq.arq.bah, 0);
0499
0500 hw->aq.arq.count = 0;
0501
0502
0503 i40e_free_arq_bufs(hw);
0504
0505 shutdown_arq_out:
0506 mutex_unlock(&hw->aq.arq_mutex);
0507 return ret_code;
0508 }
0509
0510
0511
0512
0513
0514 static void i40e_set_hw_flags(struct i40e_hw *hw)
0515 {
0516 struct i40e_adminq_info *aq = &hw->aq;
0517
0518 hw->flags = 0;
0519
0520 switch (hw->mac.type) {
0521 case I40E_MAC_XL710:
0522 if (aq->api_maj_ver > 1 ||
0523 (aq->api_maj_ver == 1 &&
0524 aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
0525 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
0526 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
0527
0528 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
0529 }
0530 break;
0531 case I40E_MAC_X722:
0532 hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
0533 I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
0534
0535 if (aq->api_maj_ver > 1 ||
0536 (aq->api_maj_ver == 1 &&
0537 aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
0538 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
0539
0540 if (aq->api_maj_ver > 1 ||
0541 (aq->api_maj_ver == 1 &&
0542 aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
0543 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
0544
0545 if (aq->api_maj_ver > 1 ||
0546 (aq->api_maj_ver == 1 &&
0547 aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
0548 hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
0549
0550 fallthrough;
0551 default:
0552 break;
0553 }
0554
0555
0556 if (aq->api_maj_ver > 1 ||
0557 (aq->api_maj_ver == 1 &&
0558 aq->api_min_ver >= 5))
0559 hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
0560
0561 if (aq->api_maj_ver > 1 ||
0562 (aq->api_maj_ver == 1 &&
0563 aq->api_min_ver >= 8)) {
0564 hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
0565 hw->flags |= I40E_HW_FLAG_DROP_MODE;
0566 }
0567
0568 if (aq->api_maj_ver > 1 ||
0569 (aq->api_maj_ver == 1 &&
0570 aq->api_min_ver >= 9))
0571 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
0572 }
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585 i40e_status i40e_init_adminq(struct i40e_hw *hw)
0586 {
0587 u16 cfg_ptr, oem_hi, oem_lo;
0588 u16 eetrack_lo, eetrack_hi;
0589 i40e_status ret_code;
0590 int retry = 0;
0591
0592
0593 if ((hw->aq.num_arq_entries == 0) ||
0594 (hw->aq.num_asq_entries == 0) ||
0595 (hw->aq.arq_buf_size == 0) ||
0596 (hw->aq.asq_buf_size == 0)) {
0597 ret_code = I40E_ERR_CONFIG;
0598 goto init_adminq_exit;
0599 }
0600
0601
0602 i40e_adminq_init_regs(hw);
0603
0604
0605 hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
0606
0607
0608 ret_code = i40e_init_asq(hw);
0609 if (ret_code)
0610 goto init_adminq_destroy_locks;
0611
0612
0613 ret_code = i40e_init_arq(hw);
0614 if (ret_code)
0615 goto init_adminq_free_asq;
0616
0617
0618
0619
0620
0621 do {
0622 ret_code = i40e_aq_get_firmware_version(hw,
0623 &hw->aq.fw_maj_ver,
0624 &hw->aq.fw_min_ver,
0625 &hw->aq.fw_build,
0626 &hw->aq.api_maj_ver,
0627 &hw->aq.api_min_ver,
0628 NULL);
0629 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
0630 break;
0631 retry++;
0632 msleep(100);
0633 i40e_resume_aq(hw);
0634 } while (retry < 10);
0635 if (ret_code != I40E_SUCCESS)
0636 goto init_adminq_free_arq;
0637
0638
0639
0640
0641 i40e_set_hw_flags(hw);
0642
0643
0644 i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
0645 &hw->nvm.version);
0646 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
0647 i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
0648 hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
0649 i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
0650 i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
0651 &oem_hi);
0652 i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
0653 &oem_lo);
0654 hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
0655
0656 if (hw->mac.type == I40E_MAC_XL710 &&
0657 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
0658 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
0659 hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
0660 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
0661 }
0662 if (hw->mac.type == I40E_MAC_X722 &&
0663 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
0664 hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) {
0665 hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
0666 }
0667
0668
0669 if (hw->aq.api_maj_ver > 1 ||
0670 (hw->aq.api_maj_ver == 1 &&
0671 hw->aq.api_min_ver >= 7))
0672 hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
0673
0674 if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
0675 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
0676 goto init_adminq_free_arq;
0677 }
0678
0679
0680 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
0681 hw->nvm_release_on_done = false;
0682 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
0683
0684 ret_code = 0;
0685
0686
0687 goto init_adminq_exit;
0688
0689 init_adminq_free_arq:
0690 i40e_shutdown_arq(hw);
0691 init_adminq_free_asq:
0692 i40e_shutdown_asq(hw);
0693 init_adminq_destroy_locks:
0694
0695 init_adminq_exit:
0696 return ret_code;
0697 }
0698
0699
0700
0701
0702
0703 void i40e_shutdown_adminq(struct i40e_hw *hw)
0704 {
0705 if (i40e_check_asq_alive(hw))
0706 i40e_aq_queue_shutdown(hw, true);
0707
0708 i40e_shutdown_asq(hw);
0709 i40e_shutdown_arq(hw);
0710
0711 if (hw->nvm_buff.va)
0712 i40e_free_virt_mem(hw, &hw->nvm_buff);
0713 }
0714
0715
0716
0717
0718
0719
0720
0721 static u16 i40e_clean_asq(struct i40e_hw *hw)
0722 {
0723 struct i40e_adminq_ring *asq = &(hw->aq.asq);
0724 struct i40e_asq_cmd_details *details;
0725 u16 ntc = asq->next_to_clean;
0726 struct i40e_aq_desc desc_cb;
0727 struct i40e_aq_desc *desc;
0728
0729 desc = I40E_ADMINQ_DESC(*asq, ntc);
0730 details = I40E_ADMINQ_DETAILS(*asq, ntc);
0731 while (rd32(hw, hw->aq.asq.head) != ntc) {
0732 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
0733 "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
0734
0735 if (details->callback) {
0736 I40E_ADMINQ_CALLBACK cb_func =
0737 (I40E_ADMINQ_CALLBACK)details->callback;
0738 desc_cb = *desc;
0739 cb_func(hw, &desc_cb);
0740 }
0741 memset(desc, 0, sizeof(*desc));
0742 memset(details, 0, sizeof(*details));
0743 ntc++;
0744 if (ntc == asq->count)
0745 ntc = 0;
0746 desc = I40E_ADMINQ_DESC(*asq, ntc);
0747 details = I40E_ADMINQ_DETAILS(*asq, ntc);
0748 }
0749
0750 asq->next_to_clean = ntc;
0751
0752 return I40E_DESC_UNUSED(asq);
0753 }
0754
0755
0756
0757
0758
0759
0760
0761
0762 static bool i40e_asq_done(struct i40e_hw *hw)
0763 {
0764
0765
0766
0767 return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
0768
0769 }
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783 static i40e_status
0784 i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
0785 struct i40e_aq_desc *desc,
0786 void *buff,
0787 u16 buff_size,
0788 struct i40e_asq_cmd_details *cmd_details,
0789 bool is_atomic_context)
0790 {
0791 i40e_status status = 0;
0792 struct i40e_dma_mem *dma_buff = NULL;
0793 struct i40e_asq_cmd_details *details;
0794 struct i40e_aq_desc *desc_on_ring;
0795 bool cmd_completed = false;
0796 u16 retval = 0;
0797 u32 val = 0;
0798
0799 if (hw->aq.asq.count == 0) {
0800 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
0801 "AQTX: Admin queue not initialized.\n");
0802 status = I40E_ERR_QUEUE_EMPTY;
0803 goto asq_send_command_error;
0804 }
0805
0806 hw->aq.asq_last_status = I40E_AQ_RC_OK;
0807
0808 val = rd32(hw, hw->aq.asq.head);
0809 if (val >= hw->aq.num_asq_entries) {
0810 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
0811 "AQTX: head overrun at %d\n", val);
0812 status = I40E_ERR_ADMIN_QUEUE_FULL;
0813 goto asq_send_command_error;
0814 }
0815
0816 details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
0817 if (cmd_details) {
0818 *details = *cmd_details;
0819
0820
0821
0822
0823
0824 if (details->cookie) {
0825 desc->cookie_high =
0826 cpu_to_le32(upper_32_bits(details->cookie));
0827 desc->cookie_low =
0828 cpu_to_le32(lower_32_bits(details->cookie));
0829 }
0830 } else {
0831 memset(details, 0, sizeof(struct i40e_asq_cmd_details));
0832 }
0833
0834
0835 desc->flags &= ~cpu_to_le16(details->flags_dis);
0836 desc->flags |= cpu_to_le16(details->flags_ena);
0837
0838 if (buff_size > hw->aq.asq_buf_size) {
0839 i40e_debug(hw,
0840 I40E_DEBUG_AQ_MESSAGE,
0841 "AQTX: Invalid buffer size: %d.\n",
0842 buff_size);
0843 status = I40E_ERR_INVALID_SIZE;
0844 goto asq_send_command_error;
0845 }
0846
0847 if (details->postpone && !details->async) {
0848 i40e_debug(hw,
0849 I40E_DEBUG_AQ_MESSAGE,
0850 "AQTX: Async flag not set along with postpone flag");
0851 status = I40E_ERR_PARAM;
0852 goto asq_send_command_error;
0853 }
0854
0855
0856
0857
0858
0859
0860
0861
0862 if (i40e_clean_asq(hw) == 0) {
0863 i40e_debug(hw,
0864 I40E_DEBUG_AQ_MESSAGE,
0865 "AQTX: Error queue is full.\n");
0866 status = I40E_ERR_ADMIN_QUEUE_FULL;
0867 goto asq_send_command_error;
0868 }
0869
0870
0871 desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
0872
0873
0874 *desc_on_ring = *desc;
0875
0876
0877 if (buff != NULL) {
0878 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
0879
0880 memcpy(dma_buff->va, buff, buff_size);
0881 desc_on_ring->datalen = cpu_to_le16(buff_size);
0882
0883
0884
0885
0886 desc_on_ring->params.external.addr_high =
0887 cpu_to_le32(upper_32_bits(dma_buff->pa));
0888 desc_on_ring->params.external.addr_low =
0889 cpu_to_le32(lower_32_bits(dma_buff->pa));
0890 }
0891
0892
0893 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
0894 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
0895 buff, buff_size);
0896 (hw->aq.asq.next_to_use)++;
0897 if (hw->aq.asq.next_to_use == hw->aq.asq.count)
0898 hw->aq.asq.next_to_use = 0;
0899 if (!details->postpone)
0900 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
0901
0902
0903
0904
0905 if (!details->async && !details->postpone) {
0906 u32 total_delay = 0;
0907
0908 do {
0909
0910
0911
0912 if (i40e_asq_done(hw))
0913 break;
0914
0915 if (is_atomic_context)
0916 udelay(50);
0917 else
0918 usleep_range(40, 60);
0919
0920 total_delay += 50;
0921 } while (total_delay < hw->aq.asq_cmd_timeout);
0922 }
0923
0924
0925 if (i40e_asq_done(hw)) {
0926 *desc = *desc_on_ring;
0927 if (buff != NULL)
0928 memcpy(buff, dma_buff->va, buff_size);
0929 retval = le16_to_cpu(desc->retval);
0930 if (retval != 0) {
0931 i40e_debug(hw,
0932 I40E_DEBUG_AQ_MESSAGE,
0933 "AQTX: Command completed with error 0x%X.\n",
0934 retval);
0935
0936
0937 retval &= 0xff;
0938 }
0939 cmd_completed = true;
0940 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
0941 status = 0;
0942 else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
0943 status = I40E_ERR_NOT_READY;
0944 else
0945 status = I40E_ERR_ADMIN_QUEUE_ERROR;
0946 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
0947 }
0948
0949 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
0950 "AQTX: desc and buffer writeback:\n");
0951 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
0952
0953
0954 if (details->wb_desc)
0955 *details->wb_desc = *desc_on_ring;
0956
0957
0958 if ((!cmd_completed) &&
0959 (!details->async && !details->postpone)) {
0960 if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
0961 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
0962 "AQTX: AQ Critical error.\n");
0963 status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
0964 } else {
0965 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
0966 "AQTX: Writeback timeout.\n");
0967 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
0968 }
0969 }
0970
0971 asq_send_command_error:
0972 return status;
0973 }
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987 i40e_status
0988 i40e_asq_send_command_atomic(struct i40e_hw *hw,
0989 struct i40e_aq_desc *desc,
0990 void *buff,
0991 u16 buff_size,
0992 struct i40e_asq_cmd_details *cmd_details,
0993 bool is_atomic_context)
0994 {
0995 i40e_status status;
0996
0997 mutex_lock(&hw->aq.asq_mutex);
0998 status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size,
0999 cmd_details,
1000 is_atomic_context);
1001
1002 mutex_unlock(&hw->aq.asq_mutex);
1003 return status;
1004 }
1005
1006 i40e_status
1007 i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
1008 void *buff, u16 buff_size,
1009 struct i40e_asq_cmd_details *cmd_details)
1010 {
1011 return i40e_asq_send_command_atomic(hw, desc, buff, buff_size,
1012 cmd_details, false);
1013 }
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029 i40e_status
1030 i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
1031 struct i40e_aq_desc *desc,
1032 void *buff,
1033 u16 buff_size,
1034 struct i40e_asq_cmd_details *cmd_details,
1035 bool is_atomic_context,
1036 enum i40e_admin_queue_err *aq_status)
1037 {
1038 i40e_status status;
1039
1040 mutex_lock(&hw->aq.asq_mutex);
1041 status = i40e_asq_send_command_atomic_exec(hw, desc, buff,
1042 buff_size,
1043 cmd_details,
1044 is_atomic_context);
1045 if (aq_status)
1046 *aq_status = hw->aq.asq_last_status;
1047 mutex_unlock(&hw->aq.asq_mutex);
1048 return status;
1049 }
1050
1051 i40e_status
1052 i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
1053 void *buff, u16 buff_size,
1054 struct i40e_asq_cmd_details *cmd_details,
1055 enum i40e_admin_queue_err *aq_status)
1056 {
1057 return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size,
1058 cmd_details, true, aq_status);
1059 }
1060
1061
1062
1063
1064
1065
1066
1067
1068 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1069 u16 opcode)
1070 {
1071
1072 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
1073 desc->opcode = cpu_to_le16(opcode);
1074 desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
1075 }
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087 i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
1088 struct i40e_arq_event_info *e,
1089 u16 *pending)
1090 {
1091 i40e_status ret_code = 0;
1092 u16 ntc = hw->aq.arq.next_to_clean;
1093 struct i40e_aq_desc *desc;
1094 struct i40e_dma_mem *bi;
1095 u16 desc_idx;
1096 u16 datalen;
1097 u16 flags;
1098 u16 ntu;
1099
1100
1101 memset(&e->desc, 0, sizeof(e->desc));
1102
1103
1104 mutex_lock(&hw->aq.arq_mutex);
1105
1106 if (hw->aq.arq.count == 0) {
1107 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1108 "AQRX: Admin queue not initialized.\n");
1109 ret_code = I40E_ERR_QUEUE_EMPTY;
1110 goto clean_arq_element_err;
1111 }
1112
1113
1114 ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1115 if (ntu == ntc) {
1116
1117 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1118 goto clean_arq_element_out;
1119 }
1120
1121
1122 desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1123 desc_idx = ntc;
1124
1125 hw->aq.arq_last_status =
1126 (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
1127 flags = le16_to_cpu(desc->flags);
1128 if (flags & I40E_AQ_FLAG_ERR) {
1129 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1130 i40e_debug(hw,
1131 I40E_DEBUG_AQ_MESSAGE,
1132 "AQRX: Event received with error 0x%X.\n",
1133 hw->aq.arq_last_status);
1134 }
1135
1136 e->desc = *desc;
1137 datalen = le16_to_cpu(desc->datalen);
1138 e->msg_len = min(datalen, e->buf_len);
1139 if (e->msg_buf != NULL && (e->msg_len != 0))
1140 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
1141 e->msg_len);
1142
1143 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
1144 i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1145 hw->aq.arq_buf_size);
1146
1147
1148
1149
1150
1151 bi = &hw->aq.arq.r.arq_bi[ntc];
1152 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
1153
1154 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
1155 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1156 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
1157 desc->datalen = cpu_to_le16((u16)bi->size);
1158 desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1159 desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1160
1161
1162 wr32(hw, hw->aq.arq.tail, ntc);
1163
1164 ntc++;
1165 if (ntc == hw->aq.num_arq_entries)
1166 ntc = 0;
1167 hw->aq.arq.next_to_clean = ntc;
1168 hw->aq.arq.next_to_use = ntu;
1169
1170 i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc);
1171 clean_arq_element_out:
1172
1173 if (pending)
1174 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1175 clean_arq_element_err:
1176 mutex_unlock(&hw->aq.arq_mutex);
1177
1178 return ret_code;
1179 }
1180
1181 static void i40e_resume_aq(struct i40e_hw *hw)
1182 {
1183
1184 hw->aq.asq.next_to_use = 0;
1185 hw->aq.asq.next_to_clean = 0;
1186
1187 i40e_config_asq_regs(hw);
1188
1189 hw->aq.arq.next_to_use = 0;
1190 hw->aq.arq.next_to_clean = 0;
1191
1192 i40e_config_arq_regs(hw);
1193 }