0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/blkdev.h>
0025 #include <linux/pci.h>
0026 #include <linux/interrupt.h>
0027 #include <linux/delay.h>
0028 #include <linux/slab.h>
0029 #include <linux/lockdep.h>
0030
0031 #include <scsi/scsi.h>
0032 #include <scsi/scsi_cmnd.h>
0033 #include <scsi/scsi_device.h>
0034 #include <scsi/scsi_host.h>
0035 #include <scsi/scsi_transport_fc.h>
0036 #include <scsi/fc/fc_fs.h>
0037 #include <linux/aer.h>
0038 #include <linux/crash_dump.h>
0039 #ifdef CONFIG_X86
0040 #include <asm/set_memory.h>
0041 #endif
0042
0043 #include "lpfc_hw4.h"
0044 #include "lpfc_hw.h"
0045 #include "lpfc_sli.h"
0046 #include "lpfc_sli4.h"
0047 #include "lpfc_nl.h"
0048 #include "lpfc_disc.h"
0049 #include "lpfc.h"
0050 #include "lpfc_scsi.h"
0051 #include "lpfc_nvme.h"
0052 #include "lpfc_crtn.h"
0053 #include "lpfc_logmsg.h"
0054 #include "lpfc_compat.h"
0055 #include "lpfc_debugfs.h"
0056 #include "lpfc_vport.h"
0057 #include "lpfc_version.h"
0058
0059
0060 typedef enum _lpfc_iocb_type {
0061 LPFC_UNKNOWN_IOCB,
0062 LPFC_UNSOL_IOCB,
0063 LPFC_SOL_IOCB,
0064 LPFC_ABORT_IOCB
0065 } lpfc_iocb_type;
0066
0067
0068
0069 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
0070 uint32_t);
0071 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
0072 uint8_t *, uint32_t *);
0073 static struct lpfc_iocbq *
0074 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
0075 struct lpfc_iocbq *rspiocbq);
0076 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
0077 struct hbq_dmabuf *);
0078 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
0079 struct hbq_dmabuf *dmabuf);
0080 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
0081 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
0082 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
0083 int);
0084 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
0085 struct lpfc_queue *eq,
0086 struct lpfc_eqe *eqe);
0087 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
0088 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
0089 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
0090 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
0091 struct lpfc_queue *cq,
0092 struct lpfc_cqe *cqe);
0093 static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
0094 struct lpfc_iocbq *pwqeq,
0095 struct lpfc_sglq *sglq);
0096
0097 union lpfc_wqe128 lpfc_iread_cmd_template;
0098 union lpfc_wqe128 lpfc_iwrite_cmd_template;
0099 union lpfc_wqe128 lpfc_icmnd_cmd_template;
0100
0101
0102 void lpfc_wqe_cmd_template(void)
0103 {
0104 union lpfc_wqe128 *wqe;
0105
0106
0107 wqe = &lpfc_iread_cmd_template;
0108 memset(wqe, 0, sizeof(union lpfc_wqe128));
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
0122 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
0123 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
0124 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
0125
0126
0127
0128
0129
0130
0131 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
0132 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
0133 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
0134 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
0135 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
0136
0137
0138 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
0139 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
0140 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
0141
0142
0143
0144
0145
0146
0147 wqe = &lpfc_iwrite_cmd_template;
0148 memset(wqe, 0, sizeof(union lpfc_wqe128));
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
0162 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
0163 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
0164 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
0165
0166
0167
0168
0169
0170
0171 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
0172 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
0173 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
0174 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
0175 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
0176
0177
0178 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
0179 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
0180 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
0181
0182
0183
0184
0185
0186
0187 wqe = &lpfc_icmnd_cmd_template;
0188 memset(wqe, 0, sizeof(union lpfc_wqe128));
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
0200 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
0201 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
0202 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
0203
0204
0205
0206
0207
0208
0209 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
0210 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
0211 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
0212 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
0213 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
0214
0215
0216 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
0217 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
0218 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
0219
0220
0221 }
0222
0223 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237 static void
0238 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
0239 {
0240 uint64_t *src = srcp;
0241 uint64_t *dest = destp;
0242 int i;
0243
0244 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
0245 *dest++ = *src++;
0246 }
0247 #else
0248 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
0249 #endif
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263 static int
0264 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
0265 {
0266 union lpfc_wqe *temp_wqe;
0267 struct lpfc_register doorbell;
0268 uint32_t host_index;
0269 uint32_t idx;
0270 uint32_t i = 0;
0271 uint8_t *tmp;
0272 u32 if_type;
0273
0274
0275 if (unlikely(!q))
0276 return -ENOMEM;
0277
0278 temp_wqe = lpfc_sli4_qe(q, q->host_index);
0279
0280
0281 idx = ((q->host_index + 1) % q->entry_count);
0282 if (idx == q->hba_index) {
0283 q->WQ_overflow++;
0284 return -EBUSY;
0285 }
0286 q->WQ_posted++;
0287
0288 if (!((q->host_index + 1) % q->notify_interval))
0289 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
0290 else
0291 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
0292 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
0293 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
0294 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
0295 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
0296
0297 tmp = (uint8_t *)temp_wqe;
0298 #ifdef __raw_writeq
0299 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
0300 __raw_writeq(*((uint64_t *)(tmp + i)),
0301 q->dpp_regaddr + i);
0302 #else
0303 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
0304 __raw_writel(*((uint32_t *)(tmp + i)),
0305 q->dpp_regaddr + i);
0306 #endif
0307 }
0308
0309 wmb();
0310
0311
0312 host_index = q->host_index;
0313
0314 q->host_index = idx;
0315
0316
0317 doorbell.word0 = 0;
0318 if (q->db_format == LPFC_DB_LIST_FORMAT) {
0319 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
0320 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
0321 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
0322 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
0323 q->dpp_id);
0324 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
0325 q->queue_id);
0326 } else {
0327 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
0328 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
0329
0330
0331 if_type = bf_get(lpfc_sli_intf_if_type,
0332 &q->phba->sli4_hba.sli_intf);
0333 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
0334 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
0335 host_index);
0336 }
0337 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
0338 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
0339 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
0340 } else {
0341 return -EINVAL;
0342 }
0343 writel(doorbell.word0, q->db_regaddr);
0344
0345 return 0;
0346 }
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357
0358 static void
0359 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
0360 {
0361
0362 if (unlikely(!q))
0363 return;
0364
0365 q->hba_index = index;
0366 }
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380 static uint32_t
0381 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
0382 {
0383 struct lpfc_mqe *temp_mqe;
0384 struct lpfc_register doorbell;
0385
0386
0387 if (unlikely(!q))
0388 return -ENOMEM;
0389 temp_mqe = lpfc_sli4_qe(q, q->host_index);
0390
0391
0392 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
0393 return -ENOMEM;
0394 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
0395
0396 q->phba->mbox = (MAILBOX_t *)temp_mqe;
0397
0398
0399 q->host_index = ((q->host_index + 1) % q->entry_count);
0400
0401
0402 doorbell.word0 = 0;
0403 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
0404 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
0405 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
0406 return 0;
0407 }
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419 static uint32_t
0420 lpfc_sli4_mq_release(struct lpfc_queue *q)
0421 {
0422
0423 if (unlikely(!q))
0424 return 0;
0425
0426
0427 q->phba->mbox = NULL;
0428 q->hba_index = ((q->hba_index + 1) % q->entry_count);
0429 return 1;
0430 }
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440
0441 static struct lpfc_eqe *
0442 lpfc_sli4_eq_get(struct lpfc_queue *q)
0443 {
0444 struct lpfc_eqe *eqe;
0445
0446
0447 if (unlikely(!q))
0448 return NULL;
0449 eqe = lpfc_sli4_qe(q, q->host_index);
0450
0451
0452 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
0453 return NULL;
0454
0455
0456
0457
0458
0459
0460
0461
0462
0463
0464 mb();
0465 return eqe;
0466 }
0467
0468
0469
0470
0471
0472
0473 void
0474 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
0475 {
0476 struct lpfc_register doorbell;
0477
0478 doorbell.word0 = 0;
0479 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
0480 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
0481 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
0482 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
0483 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
0484 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
0485 }
0486
0487
0488
0489
0490
0491
0492 void
0493 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
0494 {
0495 struct lpfc_register doorbell;
0496
0497 doorbell.word0 = 0;
0498 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
0499 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
0500 }
0501
0502
0503
0504
0505
0506
0507
0508
0509
0510
0511
0512
0513 void
0514 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
0515 uint32_t count, bool arm)
0516 {
0517 struct lpfc_register doorbell;
0518
0519
0520 if (unlikely(!q || (count == 0 && !arm)))
0521 return;
0522
0523
0524 doorbell.word0 = 0;
0525 if (arm) {
0526 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
0527 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
0528 }
0529 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
0530 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
0531 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
0532 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
0533 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
0534 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
0535
0536 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
0537 readl(q->phba->sli4_hba.EQDBregaddr);
0538 }
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551 void
0552 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
0553 uint32_t count, bool arm)
0554 {
0555 struct lpfc_register doorbell;
0556
0557
0558 if (unlikely(!q || (count == 0 && !arm)))
0559 return;
0560
0561
0562 doorbell.word0 = 0;
0563 if (arm)
0564 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
0565 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
0566 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
0567 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
0568
0569 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
0570 readl(q->phba->sli4_hba.EQDBregaddr);
0571 }
0572
0573 static void
0574 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
0575 struct lpfc_eqe *eqe)
0576 {
0577 if (!phba->sli4_hba.pc_sli4_params.eqav)
0578 bf_set_le32(lpfc_eqe_valid, eqe, 0);
0579
0580 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
0581
0582
0583 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
0584 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
0585 }
0586
0587 static void
0588 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
0589 {
0590 struct lpfc_eqe *eqe = NULL;
0591 u32 eq_count = 0, cq_count = 0;
0592 struct lpfc_cqe *cqe = NULL;
0593 struct lpfc_queue *cq = NULL, *childq = NULL;
0594 int cqid = 0;
0595
0596
0597 eqe = lpfc_sli4_eq_get(eq);
0598 while (eqe) {
0599
0600 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
0601 cq = NULL;
0602
0603 list_for_each_entry(childq, &eq->child_list, list) {
0604 if (childq->queue_id == cqid) {
0605 cq = childq;
0606 break;
0607 }
0608 }
0609
0610 if (cq) {
0611 cqe = lpfc_sli4_cq_get(cq);
0612 while (cqe) {
0613 __lpfc_sli4_consume_cqe(phba, cq, cqe);
0614 cq_count++;
0615 cqe = lpfc_sli4_cq_get(cq);
0616 }
0617
0618 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
0619 LPFC_QUEUE_REARM);
0620 cq_count = 0;
0621 }
0622 __lpfc_sli4_consume_eqe(phba, eq, eqe);
0623 eq_count++;
0624 eqe = lpfc_sli4_eq_get(eq);
0625 }
0626
0627
0628 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
0629 }
0630
0631 static int
0632 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
0633 uint8_t rearm)
0634 {
0635 struct lpfc_eqe *eqe;
0636 int count = 0, consumed = 0;
0637
0638 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
0639 goto rearm_and_exit;
0640
0641 eqe = lpfc_sli4_eq_get(eq);
0642 while (eqe) {
0643 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
0644 __lpfc_sli4_consume_eqe(phba, eq, eqe);
0645
0646 consumed++;
0647 if (!(++count % eq->max_proc_limit))
0648 break;
0649
0650 if (!(count % eq->notify_interval)) {
0651 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
0652 LPFC_QUEUE_NOARM);
0653 consumed = 0;
0654 }
0655
0656 eqe = lpfc_sli4_eq_get(eq);
0657 }
0658 eq->EQ_processed += count;
0659
0660
0661 if (count > eq->EQ_max_eqe)
0662 eq->EQ_max_eqe = count;
0663
0664 xchg(&eq->queue_claimed, 0);
0665
0666 rearm_and_exit:
0667
0668 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
0669
0670 return count;
0671 }
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682 static struct lpfc_cqe *
0683 lpfc_sli4_cq_get(struct lpfc_queue *q)
0684 {
0685 struct lpfc_cqe *cqe;
0686
0687
0688 if (unlikely(!q))
0689 return NULL;
0690 cqe = lpfc_sli4_qe(q, q->host_index);
0691
0692
0693 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
0694 return NULL;
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704 mb();
0705 return cqe;
0706 }
0707
0708 static void
0709 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
0710 struct lpfc_cqe *cqe)
0711 {
0712 if (!phba->sli4_hba.pc_sli4_params.cqav)
0713 bf_set_le32(lpfc_cqe_valid, cqe, 0);
0714
0715 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
0716
0717
0718 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
0719 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
0720 }
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733 void
0734 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
0735 uint32_t count, bool arm)
0736 {
0737 struct lpfc_register doorbell;
0738
0739
0740 if (unlikely(!q || (count == 0 && !arm)))
0741 return;
0742
0743
0744 doorbell.word0 = 0;
0745 if (arm)
0746 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
0747 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
0748 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
0749 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
0750 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
0751 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
0752 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
0753 }
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766 void
0767 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
0768 uint32_t count, bool arm)
0769 {
0770 struct lpfc_register doorbell;
0771
0772
0773 if (unlikely(!q || (count == 0 && !arm)))
0774 return;
0775
0776
0777 doorbell.word0 = 0;
0778 if (arm)
0779 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
0780 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
0781 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
0782 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
0783 }
0784
0785
0786
0787
0788
0789
0790
0791
0792
0793
0794
0795 int
0796 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
0797 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
0798 {
0799 struct lpfc_rqe *temp_hrqe;
0800 struct lpfc_rqe *temp_drqe;
0801 struct lpfc_register doorbell;
0802 int hq_put_index;
0803 int dq_put_index;
0804
0805
0806 if (unlikely(!hq) || unlikely(!dq))
0807 return -ENOMEM;
0808 hq_put_index = hq->host_index;
0809 dq_put_index = dq->host_index;
0810 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
0811 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
0812
0813 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
0814 return -EINVAL;
0815 if (hq_put_index != dq_put_index)
0816 return -EINVAL;
0817
0818 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
0819 return -EBUSY;
0820 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
0821 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
0822
0823
0824 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
0825 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
0826 hq->RQ_buf_posted++;
0827
0828
0829 if (!(hq->host_index % hq->notify_interval)) {
0830 doorbell.word0 = 0;
0831 if (hq->db_format == LPFC_DB_RING_FORMAT) {
0832 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
0833 hq->notify_interval);
0834 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
0835 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
0836 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
0837 hq->notify_interval);
0838 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
0839 hq->host_index);
0840 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
0841 } else {
0842 return -EINVAL;
0843 }
0844 writel(doorbell.word0, hq->db_regaddr);
0845 }
0846 return hq_put_index;
0847 }
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857
0858 static uint32_t
0859 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
0860 {
0861
0862 if (unlikely(!hq) || unlikely(!dq))
0863 return 0;
0864
0865 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
0866 return 0;
0867 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
0868 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
0869 return 1;
0870 }
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881
0882 static inline IOCB_t *
0883 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
0884 {
0885 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
0886 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
0887 }
0888
0889
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899 static inline IOCB_t *
0900 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
0901 {
0902 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
0903 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
0904 }
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915 struct lpfc_iocbq *
0916 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
0917 {
0918 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
0919 struct lpfc_iocbq * iocbq = NULL;
0920
0921 lockdep_assert_held(&phba->hbalock);
0922
0923 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
0924 if (iocbq)
0925 phba->iocb_cnt++;
0926 if (phba->iocb_cnt > phba->iocb_max)
0927 phba->iocb_max = phba->iocb_cnt;
0928 return iocbq;
0929 }
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941
0942
0943 struct lpfc_sglq *
0944 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
0945 {
0946 struct lpfc_sglq *sglq;
0947
0948 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
0949 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
0950 return sglq;
0951 }
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965 struct lpfc_sglq *
0966 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
0967 {
0968 struct lpfc_sglq *sglq;
0969
0970 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
0971 return sglq;
0972 }
0973
0974
0975
0976
0977
0978
0979
0980
0981 void
0982 lpfc_clr_rrq_active(struct lpfc_hba *phba,
0983 uint16_t xritag,
0984 struct lpfc_node_rrq *rrq)
0985 {
0986 struct lpfc_nodelist *ndlp = NULL;
0987
0988
0989 if (rrq->vport)
0990 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
0991
0992 if (!ndlp)
0993 goto out;
0994
0995 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
0996 rrq->send_rrq = 0;
0997 rrq->xritag = 0;
0998 rrq->rrq_stop_time = 0;
0999 }
1000 out:
1001 mempool_free(rrq, phba->rrq_pool);
1002 }
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018 void
1019 lpfc_handle_rrq_active(struct lpfc_hba *phba)
1020 {
1021 struct lpfc_node_rrq *rrq;
1022 struct lpfc_node_rrq *nextrrq;
1023 unsigned long next_time;
1024 unsigned long iflags;
1025 LIST_HEAD(send_rrq);
1026
1027 spin_lock_irqsave(&phba->hbalock, iflags);
1028 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1029 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1030 list_for_each_entry_safe(rrq, nextrrq,
1031 &phba->active_rrq_list, list) {
1032 if (time_after(jiffies, rrq->rrq_stop_time))
1033 list_move(&rrq->list, &send_rrq);
1034 else if (time_before(rrq->rrq_stop_time, next_time))
1035 next_time = rrq->rrq_stop_time;
1036 }
1037 spin_unlock_irqrestore(&phba->hbalock, iflags);
1038 if ((!list_empty(&phba->active_rrq_list)) &&
1039 (!(phba->pport->load_flag & FC_UNLOADING)))
1040 mod_timer(&phba->rrq_tmr, next_time);
1041 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1042 list_del(&rrq->list);
1043 if (!rrq->send_rrq) {
1044
1045 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1046 } else if (lpfc_send_rrq(phba, rrq)) {
1047
1048
1049
1050 lpfc_clr_rrq_active(phba, rrq->xritag,
1051 rrq);
1052 }
1053 }
1054 }
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 struct lpfc_node_rrq *
1066 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1067 {
1068 struct lpfc_hba *phba = vport->phba;
1069 struct lpfc_node_rrq *rrq;
1070 struct lpfc_node_rrq *nextrrq;
1071 unsigned long iflags;
1072
1073 if (phba->sli_rev != LPFC_SLI_REV4)
1074 return NULL;
1075 spin_lock_irqsave(&phba->hbalock, iflags);
1076 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1077 if (rrq->vport == vport && rrq->xritag == xri &&
1078 rrq->nlp_DID == did){
1079 list_del(&rrq->list);
1080 spin_unlock_irqrestore(&phba->hbalock, iflags);
1081 return rrq;
1082 }
1083 }
1084 spin_unlock_irqrestore(&phba->hbalock, iflags);
1085 return NULL;
1086 }
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096 void
1097 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1098
1099 {
1100 struct lpfc_hba *phba = vport->phba;
1101 struct lpfc_node_rrq *rrq;
1102 struct lpfc_node_rrq *nextrrq;
1103 unsigned long iflags;
1104 LIST_HEAD(rrq_list);
1105
1106 if (phba->sli_rev != LPFC_SLI_REV4)
1107 return;
1108 if (!ndlp) {
1109 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1110 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1111 }
1112 spin_lock_irqsave(&phba->hbalock, iflags);
1113 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1114 if (rrq->vport != vport)
1115 continue;
1116
1117 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1118 list_move(&rrq->list, &rrq_list);
1119
1120 }
1121 spin_unlock_irqrestore(&phba->hbalock, iflags);
1122
1123 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1124 list_del(&rrq->list);
1125 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1126 }
1127 }
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139 int
1140 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1141 uint16_t xritag)
1142 {
1143 if (!ndlp)
1144 return 0;
1145 if (!ndlp->active_rrqs_xri_bitmap)
1146 return 0;
1147 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1148 return 1;
1149 else
1150 return 0;
1151 }
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 int
1169 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1170 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1171 {
1172 unsigned long iflags;
1173 struct lpfc_node_rrq *rrq;
1174 int empty;
1175
1176 if (!ndlp)
1177 return -EINVAL;
1178
1179 if (!phba->cfg_enable_rrq)
1180 return -EINVAL;
1181
1182 spin_lock_irqsave(&phba->hbalock, iflags);
1183 if (phba->pport->load_flag & FC_UNLOADING) {
1184 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1185 goto out;
1186 }
1187
1188 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1189 goto out;
1190
1191 if (!ndlp->active_rrqs_xri_bitmap)
1192 goto out;
1193
1194 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1195 goto out;
1196
1197 spin_unlock_irqrestore(&phba->hbalock, iflags);
1198 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1199 if (!rrq) {
1200 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1201 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1202 " DID:0x%x Send:%d\n",
1203 xritag, rxid, ndlp->nlp_DID, send_rrq);
1204 return -EINVAL;
1205 }
1206 if (phba->cfg_enable_rrq == 1)
1207 rrq->send_rrq = send_rrq;
1208 else
1209 rrq->send_rrq = 0;
1210 rrq->xritag = xritag;
1211 rrq->rrq_stop_time = jiffies +
1212 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1213 rrq->nlp_DID = ndlp->nlp_DID;
1214 rrq->vport = ndlp->vport;
1215 rrq->rxid = rxid;
1216 spin_lock_irqsave(&phba->hbalock, iflags);
1217 empty = list_empty(&phba->active_rrq_list);
1218 list_add_tail(&rrq->list, &phba->active_rrq_list);
1219 phba->hba_flag |= HBA_RRQ_ACTIVE;
1220 if (empty)
1221 lpfc_worker_wake_up(phba);
1222 spin_unlock_irqrestore(&phba->hbalock, iflags);
1223 return 0;
1224 out:
1225 spin_unlock_irqrestore(&phba->hbalock, iflags);
1226 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1227 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1228 " DID:0x%x Send:%d\n",
1229 xritag, rxid, ndlp->nlp_DID, send_rrq);
1230 return -EINVAL;
1231 }
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244 static struct lpfc_sglq *
1245 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1246 {
1247 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1248 struct lpfc_sglq *sglq = NULL;
1249 struct lpfc_sglq *start_sglq = NULL;
1250 struct lpfc_io_buf *lpfc_cmd;
1251 struct lpfc_nodelist *ndlp;
1252 int found = 0;
1253 u8 cmnd;
1254
1255 cmnd = get_job_cmnd(phba, piocbq);
1256
1257 if (piocbq->cmd_flag & LPFC_IO_FCP) {
1258 lpfc_cmd = piocbq->io_buf;
1259 ndlp = lpfc_cmd->rdata->pnode;
1260 } else if ((cmnd == CMD_GEN_REQUEST64_CR) &&
1261 !(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
1262 ndlp = piocbq->ndlp;
1263 } else if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
1264 if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
1265 ndlp = NULL;
1266 else
1267 ndlp = piocbq->ndlp;
1268 } else {
1269 ndlp = piocbq->ndlp;
1270 }
1271
1272 spin_lock(&phba->sli4_hba.sgl_list_lock);
1273 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1274 start_sglq = sglq;
1275 while (!found) {
1276 if (!sglq)
1277 break;
1278 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1279 test_bit(sglq->sli4_lxritag,
1280 ndlp->active_rrqs_xri_bitmap)) {
1281
1282
1283
1284 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1285 sglq = NULL;
1286 list_remove_head(lpfc_els_sgl_list, sglq,
1287 struct lpfc_sglq, list);
1288 if (sglq == start_sglq) {
1289 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1290 sglq = NULL;
1291 break;
1292 } else
1293 continue;
1294 }
1295 sglq->ndlp = ndlp;
1296 found = 1;
1297 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1298 sglq->state = SGL_ALLOCATED;
1299 }
1300 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1301 return sglq;
1302 }
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314 struct lpfc_sglq *
1315 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1316 {
1317 struct list_head *lpfc_nvmet_sgl_list;
1318 struct lpfc_sglq *sglq = NULL;
1319
1320 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1321
1322 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1323
1324 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1325 if (!sglq)
1326 return NULL;
1327 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1328 sglq->state = SGL_ALLOCATED;
1329 return sglq;
1330 }
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341 struct lpfc_iocbq *
1342 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1343 {
1344 struct lpfc_iocbq * iocbq = NULL;
1345 unsigned long iflags;
1346
1347 spin_lock_irqsave(&phba->hbalock, iflags);
1348 iocbq = __lpfc_sli_get_iocbq(phba);
1349 spin_unlock_irqrestore(&phba->hbalock, iflags);
1350 return iocbq;
1351 }
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 static void
1373 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1374 {
1375 struct lpfc_sglq *sglq;
1376 size_t start_clean = offsetof(struct lpfc_iocbq, wqe);
1377 unsigned long iflag = 0;
1378 struct lpfc_sli_ring *pring;
1379
1380 if (iocbq->sli4_xritag == NO_XRI)
1381 sglq = NULL;
1382 else
1383 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1384
1385
1386 if (sglq) {
1387 if (iocbq->cmd_flag & LPFC_IO_NVMET) {
1388 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1389 iflag);
1390 sglq->state = SGL_FREED;
1391 sglq->ndlp = NULL;
1392 list_add_tail(&sglq->list,
1393 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1394 spin_unlock_irqrestore(
1395 &phba->sli4_hba.sgl_list_lock, iflag);
1396 goto out;
1397 }
1398
1399 if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
1400 (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
1401 sglq->state != SGL_XRI_ABORTED) {
1402 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1403 iflag);
1404
1405
1406 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1407 sglq->ndlp = NULL;
1408
1409 list_add(&sglq->list,
1410 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1411 spin_unlock_irqrestore(
1412 &phba->sli4_hba.sgl_list_lock, iflag);
1413 } else {
1414 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1415 iflag);
1416 sglq->state = SGL_FREED;
1417 sglq->ndlp = NULL;
1418 list_add_tail(&sglq->list,
1419 &phba->sli4_hba.lpfc_els_sgl_list);
1420 spin_unlock_irqrestore(
1421 &phba->sli4_hba.sgl_list_lock, iflag);
1422 pring = lpfc_phba_elsring(phba);
1423
1424 if (pring && (!list_empty(&pring->txq)))
1425 lpfc_worker_wake_up(phba);
1426 }
1427 }
1428
1429 out:
1430
1431
1432
1433 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1434 iocbq->sli4_lxritag = NO_XRI;
1435 iocbq->sli4_xritag = NO_XRI;
1436 iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
1437 LPFC_IO_NVME_LS);
1438 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1439 }
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453 static void
1454 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1455 {
1456 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1457
1458
1459
1460
1461 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1462 iocbq->sli4_xritag = NO_XRI;
1463 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1464 }
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476 static void
1477 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1478 {
1479 lockdep_assert_held(&phba->hbalock);
1480
1481 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1482 phba->iocb_cnt--;
1483 }
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493 void
1494 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1495 {
1496 unsigned long iflags;
1497
1498
1499
1500
1501 spin_lock_irqsave(&phba->hbalock, iflags);
1502 __lpfc_sli_release_iocbq(phba, iocbq);
1503 spin_unlock_irqrestore(&phba->hbalock, iflags);
1504 }
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518 void
1519 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1520 uint32_t ulpstatus, uint32_t ulpWord4)
1521 {
1522 struct lpfc_iocbq *piocb;
1523
1524 while (!list_empty(iocblist)) {
1525 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1526 if (piocb->cmd_cmpl) {
1527 if (piocb->cmd_flag & LPFC_IO_NVME) {
1528 lpfc_nvme_cancel_iocb(phba, piocb,
1529 ulpstatus, ulpWord4);
1530 } else {
1531 if (phba->sli_rev == LPFC_SLI_REV4) {
1532 bf_set(lpfc_wcqe_c_status,
1533 &piocb->wcqe_cmpl, ulpstatus);
1534 piocb->wcqe_cmpl.parameter = ulpWord4;
1535 } else {
1536 piocb->iocb.ulpStatus = ulpstatus;
1537 piocb->iocb.un.ulpWord[4] = ulpWord4;
1538 }
1539 (piocb->cmd_cmpl) (phba, piocb, piocb);
1540 }
1541 } else {
1542 lpfc_sli_release_iocbq(phba, piocb);
1543 }
1544 }
1545 return;
1546 }
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563 static lpfc_iocb_type
1564 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1565 {
1566 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1567
1568 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1569 return 0;
1570
1571 switch (iocb_cmnd) {
1572 case CMD_XMIT_SEQUENCE_CR:
1573 case CMD_XMIT_SEQUENCE_CX:
1574 case CMD_XMIT_BCAST_CN:
1575 case CMD_XMIT_BCAST_CX:
1576 case CMD_ELS_REQUEST_CR:
1577 case CMD_ELS_REQUEST_CX:
1578 case CMD_CREATE_XRI_CR:
1579 case CMD_CREATE_XRI_CX:
1580 case CMD_GET_RPI_CN:
1581 case CMD_XMIT_ELS_RSP_CX:
1582 case CMD_GET_RPI_CR:
1583 case CMD_FCP_IWRITE_CR:
1584 case CMD_FCP_IWRITE_CX:
1585 case CMD_FCP_IREAD_CR:
1586 case CMD_FCP_IREAD_CX:
1587 case CMD_FCP_ICMND_CR:
1588 case CMD_FCP_ICMND_CX:
1589 case CMD_FCP_TSEND_CX:
1590 case CMD_FCP_TRSP_CX:
1591 case CMD_FCP_TRECEIVE_CX:
1592 case CMD_FCP_AUTO_TRSP_CX:
1593 case CMD_ADAPTER_MSG:
1594 case CMD_ADAPTER_DUMP:
1595 case CMD_XMIT_SEQUENCE64_CR:
1596 case CMD_XMIT_SEQUENCE64_CX:
1597 case CMD_XMIT_BCAST64_CN:
1598 case CMD_XMIT_BCAST64_CX:
1599 case CMD_ELS_REQUEST64_CR:
1600 case CMD_ELS_REQUEST64_CX:
1601 case CMD_FCP_IWRITE64_CR:
1602 case CMD_FCP_IWRITE64_CX:
1603 case CMD_FCP_IREAD64_CR:
1604 case CMD_FCP_IREAD64_CX:
1605 case CMD_FCP_ICMND64_CR:
1606 case CMD_FCP_ICMND64_CX:
1607 case CMD_FCP_TSEND64_CX:
1608 case CMD_FCP_TRSP64_CX:
1609 case CMD_FCP_TRECEIVE64_CX:
1610 case CMD_GEN_REQUEST64_CR:
1611 case CMD_GEN_REQUEST64_CX:
1612 case CMD_XMIT_ELS_RSP64_CX:
1613 case DSSCMD_IWRITE64_CR:
1614 case DSSCMD_IWRITE64_CX:
1615 case DSSCMD_IREAD64_CR:
1616 case DSSCMD_IREAD64_CX:
1617 case CMD_SEND_FRAME:
1618 type = LPFC_SOL_IOCB;
1619 break;
1620 case CMD_ABORT_XRI_CN:
1621 case CMD_ABORT_XRI_CX:
1622 case CMD_CLOSE_XRI_CN:
1623 case CMD_CLOSE_XRI_CX:
1624 case CMD_XRI_ABORTED_CX:
1625 case CMD_ABORT_MXRI64_CN:
1626 case CMD_XMIT_BLS_RSP64_CX:
1627 type = LPFC_ABORT_IOCB;
1628 break;
1629 case CMD_RCV_SEQUENCE_CX:
1630 case CMD_RCV_ELS_REQ_CX:
1631 case CMD_RCV_SEQUENCE64_CX:
1632 case CMD_RCV_ELS_REQ64_CX:
1633 case CMD_ASYNC_STATUS:
1634 case CMD_IOCB_RCV_SEQ64_CX:
1635 case CMD_IOCB_RCV_ELS64_CX:
1636 case CMD_IOCB_RCV_CONT64_CX:
1637 case CMD_IOCB_RET_XRI64_CX:
1638 type = LPFC_UNSOL_IOCB;
1639 break;
1640 case CMD_IOCB_XMIT_MSEQ64_CR:
1641 case CMD_IOCB_XMIT_MSEQ64_CX:
1642 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1643 case CMD_IOCB_RCV_ELS_LIST64_CX:
1644 case CMD_IOCB_CLOSE_EXTENDED_CN:
1645 case CMD_IOCB_ABORT_EXTENDED_CN:
1646 case CMD_IOCB_RET_HBQE64_CN:
1647 case CMD_IOCB_FCP_IBIDIR64_CR:
1648 case CMD_IOCB_FCP_IBIDIR64_CX:
1649 case CMD_IOCB_FCP_ITASKMGT64_CX:
1650 case CMD_IOCB_LOGENTRY_CN:
1651 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1652 printk("%s - Unhandled SLI-3 Command x%x\n",
1653 __func__, iocb_cmnd);
1654 type = LPFC_UNKNOWN_IOCB;
1655 break;
1656 default:
1657 type = LPFC_UNKNOWN_IOCB;
1658 break;
1659 }
1660
1661 return type;
1662 }
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675 static int
1676 lpfc_sli_ring_map(struct lpfc_hba *phba)
1677 {
1678 struct lpfc_sli *psli = &phba->sli;
1679 LPFC_MBOXQ_t *pmb;
1680 MAILBOX_t *pmbox;
1681 int i, rc, ret = 0;
1682
1683 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1684 if (!pmb)
1685 return -ENOMEM;
1686 pmbox = &pmb->u.mb;
1687 phba->link_state = LPFC_INIT_MBX_CMDS;
1688 for (i = 0; i < psli->num_rings; i++) {
1689 lpfc_config_ring(phba, i, pmb);
1690 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1691 if (rc != MBX_SUCCESS) {
1692 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1693 "0446 Adapter failed to init (%d), "
1694 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1695 "ring %d\n",
1696 rc, pmbox->mbxCommand,
1697 pmbox->mbxStatus, i);
1698 phba->link_state = LPFC_HBA_ERROR;
1699 ret = -ENXIO;
1700 break;
1701 }
1702 }
1703 mempool_free(pmb, phba->mbox_mem_pool);
1704 return ret;
1705 }
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720 static int
1721 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1722 struct lpfc_iocbq *piocb)
1723 {
1724 u32 ulp_command = 0;
1725
1726 BUG_ON(!piocb);
1727 ulp_command = get_job_cmnd(phba, piocb);
1728
1729 list_add_tail(&piocb->list, &pring->txcmplq);
1730 piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
1731 pring->txcmplq_cnt++;
1732 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1733 (ulp_command != CMD_ABORT_XRI_WQE) &&
1734 (ulp_command != CMD_ABORT_XRI_CN) &&
1735 (ulp_command != CMD_CLOSE_XRI_CN)) {
1736 BUG_ON(!piocb->vport);
1737 if (!(piocb->vport->load_flag & FC_UNLOADING))
1738 mod_timer(&piocb->vport->els_tmofunc,
1739 jiffies +
1740 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1741 }
1742
1743 return 0;
1744 }
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756 struct lpfc_iocbq *
1757 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1758 {
1759 struct lpfc_iocbq *cmd_iocb;
1760
1761 lockdep_assert_held(&phba->hbalock);
1762
1763 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1764 return cmd_iocb;
1765 }
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778 static void
1779 lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1780 struct lpfc_iocbq *rspiocb)
1781 {
1782 union lpfc_wqe128 *wqe;
1783 uint32_t status, info;
1784 struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
1785 uint64_t bw, bwdif, slop;
1786 uint64_t pcent, bwpcent;
1787 int asig, afpin, sigcnt, fpincnt;
1788 int wsigmax, wfpinmax, cg, tdp;
1789 char *s;
1790
1791
1792 status = bf_get(lpfc_wcqe_c_status, wcqe);
1793 if (status) {
1794 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1795 "6211 CMF_SYNC_WQE Error "
1796 "req_tag x%x status x%x hwstatus x%x "
1797 "tdatap x%x parm x%x\n",
1798 bf_get(lpfc_wcqe_c_request_tag, wcqe),
1799 bf_get(lpfc_wcqe_c_status, wcqe),
1800 bf_get(lpfc_wcqe_c_hw_status, wcqe),
1801 wcqe->total_data_placed,
1802 wcqe->parameter);
1803 goto out;
1804 }
1805
1806
1807 info = wcqe->parameter;
1808 phba->cmf_active_info = info;
1809
1810
1811 if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
1812 info = 0;
1813 else
1814 phba->cmf_info_per_interval = info;
1815
1816 tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
1817 cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
1818
1819
1820 bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
1821 if (!bw) {
1822 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1823 "6212 CMF_SYNC_WQE x%x: NULL bw\n",
1824 bf_get(lpfc_wcqe_c_request_tag, wcqe));
1825 goto out;
1826 }
1827
1828
1829 wqe = &cmdiocb->wqe;
1830 asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
1831 afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
1832 fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
1833 sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
1834 if (phba->cmf_max_bytes_per_interval != bw ||
1835 (asig || afpin || sigcnt || fpincnt)) {
1836
1837 if (phba->cmf_max_bytes_per_interval < bw) {
1838 bwdif = bw - phba->cmf_max_bytes_per_interval;
1839 s = "Increase";
1840 } else {
1841 bwdif = phba->cmf_max_bytes_per_interval - bw;
1842 s = "Decrease";
1843 }
1844
1845
1846 slop = div_u64(phba->cmf_link_byte_count, 200);
1847 pcent = div64_u64(bwdif * 100 + slop,
1848 phba->cmf_link_byte_count);
1849 bwpcent = div64_u64(bw * 100 + slop,
1850 phba->cmf_link_byte_count);
1851 if (asig) {
1852 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1853 "6237 BW Threshold %lld%% (%lld): "
1854 "%lld%% %s: Signal Alarm: cg:%d "
1855 "Info:%u\n",
1856 bwpcent, bw, pcent, s, cg,
1857 phba->cmf_active_info);
1858 } else if (afpin) {
1859 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1860 "6238 BW Threshold %lld%% (%lld): "
1861 "%lld%% %s: FPIN Alarm: cg:%d "
1862 "Info:%u\n",
1863 bwpcent, bw, pcent, s, cg,
1864 phba->cmf_active_info);
1865 } else if (sigcnt) {
1866 wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
1867 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1868 "6239 BW Threshold %lld%% (%lld): "
1869 "%lld%% %s: Signal Warning: "
1870 "Cnt %d Max %d: cg:%d Info:%u\n",
1871 bwpcent, bw, pcent, s, sigcnt,
1872 wsigmax, cg, phba->cmf_active_info);
1873 } else if (fpincnt) {
1874 wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
1875 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1876 "6240 BW Threshold %lld%% (%lld): "
1877 "%lld%% %s: FPIN Warning: "
1878 "Cnt %d Max %d: cg:%d Info:%u\n",
1879 bwpcent, bw, pcent, s, fpincnt,
1880 wfpinmax, cg, phba->cmf_active_info);
1881 } else {
1882 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1883 "6241 BW Threshold %lld%% (%lld): "
1884 "CMF %lld%% %s: cg:%d Info:%u\n",
1885 bwpcent, bw, pcent, s, cg,
1886 phba->cmf_active_info);
1887 }
1888 } else if (info) {
1889 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1890 "6246 Info Threshold %u\n", info);
1891 }
1892
1893
1894 phba->cmf_last_sync_bw = bw;
1895 out:
1896 lpfc_sli_release_iocbq(phba, cmdiocb);
1897 }
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911 int
1912 lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
1913 {
1914 union lpfc_wqe128 *wqe;
1915 struct lpfc_iocbq *sync_buf;
1916 unsigned long iflags;
1917 u32 ret_val;
1918 u32 atot, wtot, max;
1919
1920
1921 atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
1922 wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
1923
1924
1925 if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
1926 phba->link_state == LPFC_LINK_DOWN)
1927 return 0;
1928
1929 spin_lock_irqsave(&phba->hbalock, iflags);
1930 sync_buf = __lpfc_sli_get_iocbq(phba);
1931 if (!sync_buf) {
1932 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
1933 "6244 No available WQEs for CMF_SYNC_WQE\n");
1934 ret_val = ENOMEM;
1935 goto out_unlock;
1936 }
1937
1938 wqe = &sync_buf->wqe;
1939
1940
1941 memset(wqe, 0, sizeof(*wqe));
1942
1943
1944 if (!ms) {
1945 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1946 "6441 CMF Init %d - CMF_SYNC_WQE\n",
1947 phba->fc_eventTag);
1948 bf_set(cmf_sync_op, &wqe->cmf_sync, 1);
1949 bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
1950 goto initpath;
1951 }
1952
1953 bf_set(cmf_sync_op, &wqe->cmf_sync, 0);
1954 bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
1955
1956
1957 if (atot) {
1958 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1959
1960 bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
1961 } else {
1962
1963 bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
1964 }
1965 } else if (wtot) {
1966 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
1967 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1968
1969 max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
1970 lpfc_acqe_cgn_frequency;
1971 bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
1972 bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
1973 } else {
1974
1975 bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
1976 bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
1977 }
1978 }
1979
1980
1981 wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
1982
1983 initpath:
1984 bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
1985 wqe->cmf_sync.event_tag = phba->fc_eventTag;
1986 bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
1987
1988
1989 bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
1990
1991 bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
1992
1993 bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
1994 bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
1995 bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
1996
1997 sync_buf->vport = phba->pport;
1998 sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
1999 sync_buf->cmd_dmabuf = NULL;
2000 sync_buf->rsp_dmabuf = NULL;
2001 sync_buf->bpl_dmabuf = NULL;
2002 sync_buf->sli4_xritag = NO_XRI;
2003
2004 sync_buf->cmd_flag |= LPFC_IO_CMF;
2005 ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
2006 if (ret_val) {
2007 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
2008 "6214 Cannot issue CMF_SYNC_WQE: x%x\n",
2009 ret_val);
2010 __lpfc_sli_release_iocbq(phba, sync_buf);
2011 }
2012 out_unlock:
2013 spin_unlock_irqrestore(&phba->hbalock, iflags);
2014 return ret_val;
2015 }
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031 static IOCB_t *
2032 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2033 {
2034 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2035 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
2036
2037 lockdep_assert_held(&phba->hbalock);
2038
2039 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
2040 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
2041 pring->sli.sli3.next_cmdidx = 0;
2042
2043 if (unlikely(pring->sli.sli3.local_getidx ==
2044 pring->sli.sli3.next_cmdidx)) {
2045
2046 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
2047
2048 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
2049 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2050 "0315 Ring %d issue: portCmdGet %d "
2051 "is bigger than cmd ring %d\n",
2052 pring->ringno,
2053 pring->sli.sli3.local_getidx,
2054 max_cmd_idx);
2055
2056 phba->link_state = LPFC_HBA_ERROR;
2057
2058
2059
2060
2061 phba->work_ha |= HA_ERATT;
2062 phba->work_hs = HS_FFER3;
2063
2064 lpfc_worker_wake_up(phba);
2065
2066 return NULL;
2067 }
2068
2069 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
2070 return NULL;
2071 }
2072
2073 return lpfc_cmd_iocb(phba, pring);
2074 }
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088 uint16_t
2089 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
2090 {
2091 struct lpfc_iocbq **new_arr;
2092 struct lpfc_iocbq **old_arr;
2093 size_t new_len;
2094 struct lpfc_sli *psli = &phba->sli;
2095 uint16_t iotag;
2096
2097 spin_lock_irq(&phba->hbalock);
2098 iotag = psli->last_iotag;
2099 if(++iotag < psli->iocbq_lookup_len) {
2100 psli->last_iotag = iotag;
2101 psli->iocbq_lookup[iotag] = iocbq;
2102 spin_unlock_irq(&phba->hbalock);
2103 iocbq->iotag = iotag;
2104 return iotag;
2105 } else if (psli->iocbq_lookup_len < (0xffff
2106 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
2107 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2108 spin_unlock_irq(&phba->hbalock);
2109 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
2110 GFP_KERNEL);
2111 if (new_arr) {
2112 spin_lock_irq(&phba->hbalock);
2113 old_arr = psli->iocbq_lookup;
2114 if (new_len <= psli->iocbq_lookup_len) {
2115
2116 kfree(new_arr);
2117 iotag = psli->last_iotag;
2118 if(++iotag < psli->iocbq_lookup_len) {
2119 psli->last_iotag = iotag;
2120 psli->iocbq_lookup[iotag] = iocbq;
2121 spin_unlock_irq(&phba->hbalock);
2122 iocbq->iotag = iotag;
2123 return iotag;
2124 }
2125 spin_unlock_irq(&phba->hbalock);
2126 return 0;
2127 }
2128 if (psli->iocbq_lookup)
2129 memcpy(new_arr, old_arr,
2130 ((psli->last_iotag + 1) *
2131 sizeof (struct lpfc_iocbq *)));
2132 psli->iocbq_lookup = new_arr;
2133 psli->iocbq_lookup_len = new_len;
2134 psli->last_iotag = iotag;
2135 psli->iocbq_lookup[iotag] = iocbq;
2136 spin_unlock_irq(&phba->hbalock);
2137 iocbq->iotag = iotag;
2138 kfree(old_arr);
2139 return iotag;
2140 }
2141 } else
2142 spin_unlock_irq(&phba->hbalock);
2143
2144 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2145 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
2146 psli->last_iotag);
2147
2148 return 0;
2149 }
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166 static void
2167 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2168 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
2169 {
2170
2171
2172
2173 nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
2174
2175
2176 if (pring->ringno == LPFC_ELS_RING) {
2177 lpfc_debugfs_slow_ring_trc(phba,
2178 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
2179 *(((uint32_t *) &nextiocb->iocb) + 4),
2180 *(((uint32_t *) &nextiocb->iocb) + 6),
2181 *(((uint32_t *) &nextiocb->iocb) + 7));
2182 }
2183
2184
2185
2186
2187 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
2188 wmb();
2189 pring->stats.iocb_cmd++;
2190
2191
2192
2193
2194
2195
2196 if (nextiocb->cmd_cmpl)
2197 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
2198 else
2199 __lpfc_sli_release_iocbq(phba, nextiocb);
2200
2201
2202
2203
2204
2205 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
2206 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
2207 }
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221 static void
2222 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2223 {
2224 int ringno = pring->ringno;
2225
2226 pring->flag |= LPFC_CALL_RING_AVAILABLE;
2227
2228 wmb();
2229
2230
2231
2232
2233
2234 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
2235 readl(phba->CAregaddr);
2236
2237 pring->stats.iocb_cmd_full++;
2238 }
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249 static void
2250 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2251 {
2252 int ringno = pring->ringno;
2253
2254
2255
2256
2257 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2258 wmb();
2259 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2260 readl(phba->CAregaddr);
2261 }
2262 }
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273 static void
2274 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2275 {
2276 IOCB_t *iocb;
2277 struct lpfc_iocbq *nextiocb;
2278
2279 lockdep_assert_held(&phba->hbalock);
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289 if (lpfc_is_link_up(phba) &&
2290 (!list_empty(&pring->txq)) &&
2291 (pring->ringno != LPFC_FCP_RING ||
2292 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2293
2294 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2295 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2296 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2297
2298 if (iocb)
2299 lpfc_sli_update_ring(phba, pring);
2300 else
2301 lpfc_sli_update_full_ring(phba, pring);
2302 }
2303
2304 return;
2305 }
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317 static struct lpfc_hbq_entry *
2318 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2319 {
2320 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2321
2322 lockdep_assert_held(&phba->hbalock);
2323
2324 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2325 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2326 hbqp->next_hbqPutIdx = 0;
2327
2328 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2329 uint32_t raw_index = phba->hbq_get[hbqno];
2330 uint32_t getidx = le32_to_cpu(raw_index);
2331
2332 hbqp->local_hbqGetIdx = getidx;
2333
2334 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2335 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2336 "1802 HBQ %d: local_hbqGetIdx "
2337 "%u is > than hbqp->entry_count %u\n",
2338 hbqno, hbqp->local_hbqGetIdx,
2339 hbqp->entry_count);
2340
2341 phba->link_state = LPFC_HBA_ERROR;
2342 return NULL;
2343 }
2344
2345 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2346 return NULL;
2347 }
2348
2349 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2350 hbqp->hbqPutIdx;
2351 }
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362 void
2363 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2364 {
2365 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2366 struct hbq_dmabuf *hbq_buf;
2367 unsigned long flags;
2368 int i, hbq_count;
2369
2370 hbq_count = lpfc_sli_hbq_count();
2371
2372 spin_lock_irqsave(&phba->hbalock, flags);
2373 for (i = 0; i < hbq_count; ++i) {
2374 list_for_each_entry_safe(dmabuf, next_dmabuf,
2375 &phba->hbqs[i].hbq_buffer_list, list) {
2376 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2377 list_del(&hbq_buf->dbuf.list);
2378 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2379 }
2380 phba->hbqs[i].buffer_count = 0;
2381 }
2382
2383
2384 phba->hbq_in_use = 0;
2385 spin_unlock_irqrestore(&phba->hbalock, flags);
2386 }
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400 static int
2401 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2402 struct hbq_dmabuf *hbq_buf)
2403 {
2404 lockdep_assert_held(&phba->hbalock);
2405 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2406 }
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419 static int
2420 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2421 struct hbq_dmabuf *hbq_buf)
2422 {
2423 struct lpfc_hbq_entry *hbqe;
2424 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2425
2426 lockdep_assert_held(&phba->hbalock);
2427
2428 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2429 if (hbqe) {
2430 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2431
2432 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2433 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2434 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2435 hbqe->bde.tus.f.bdeFlags = 0;
2436 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2437 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2438
2439 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2440 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2441
2442 readl(phba->hbq_put + hbqno);
2443 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2444 return 0;
2445 } else
2446 return -ENOMEM;
2447 }
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459 static int
2460 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2461 struct hbq_dmabuf *hbq_buf)
2462 {
2463 int rc;
2464 struct lpfc_rqe hrqe;
2465 struct lpfc_rqe drqe;
2466 struct lpfc_queue *hrq;
2467 struct lpfc_queue *drq;
2468
2469 if (hbqno != LPFC_ELS_HBQ)
2470 return 1;
2471 hrq = phba->sli4_hba.hdr_rq;
2472 drq = phba->sli4_hba.dat_rq;
2473
2474 lockdep_assert_held(&phba->hbalock);
2475 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2476 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2477 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2478 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2479 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2480 if (rc < 0)
2481 return rc;
2482 hbq_buf->tag = (rc | (hbqno << 16));
2483 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2484 return 0;
2485 }
2486
2487
2488 static struct lpfc_hbq_init lpfc_els_hbq = {
2489 .rn = 1,
2490 .entry_count = 256,
2491 .mask_count = 0,
2492 .profile = 0,
2493 .ring_mask = (1 << LPFC_ELS_RING),
2494 .buffer_count = 0,
2495 .init_count = 40,
2496 .add_count = 40,
2497 };
2498
2499
2500 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2501 &lpfc_els_hbq,
2502 };
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514 static int
2515 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2516 {
2517 uint32_t i, posted = 0;
2518 unsigned long flags;
2519 struct hbq_dmabuf *hbq_buffer;
2520 LIST_HEAD(hbq_buf_list);
2521 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2522 return 0;
2523
2524 if ((phba->hbqs[hbqno].buffer_count + count) >
2525 lpfc_hbq_defs[hbqno]->entry_count)
2526 count = lpfc_hbq_defs[hbqno]->entry_count -
2527 phba->hbqs[hbqno].buffer_count;
2528 if (!count)
2529 return 0;
2530
2531 for (i = 0; i < count; i++) {
2532 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2533 if (!hbq_buffer)
2534 break;
2535 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2536 }
2537
2538 spin_lock_irqsave(&phba->hbalock, flags);
2539 if (!phba->hbq_in_use)
2540 goto err;
2541 while (!list_empty(&hbq_buf_list)) {
2542 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2543 dbuf.list);
2544 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2545 (hbqno << 16));
2546 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2547 phba->hbqs[hbqno].buffer_count++;
2548 posted++;
2549 } else
2550 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2551 }
2552 spin_unlock_irqrestore(&phba->hbalock, flags);
2553 return posted;
2554 err:
2555 spin_unlock_irqrestore(&phba->hbalock, flags);
2556 while (!list_empty(&hbq_buf_list)) {
2557 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2558 dbuf.list);
2559 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2560 }
2561 return 0;
2562 }
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573 int
2574 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2575 {
2576 if (phba->sli_rev == LPFC_SLI_REV4)
2577 return 0;
2578 else
2579 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2580 lpfc_hbq_defs[qno]->add_count);
2581 }
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592 static int
2593 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2594 {
2595 if (phba->sli_rev == LPFC_SLI_REV4)
2596 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2597 lpfc_hbq_defs[qno]->entry_count);
2598 else
2599 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2600 lpfc_hbq_defs[qno]->init_count);
2601 }
2602
2603
2604
2605
2606
2607
2608
2609 static struct hbq_dmabuf *
2610 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2611 {
2612 struct lpfc_dmabuf *d_buf;
2613
2614 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2615 if (!d_buf)
2616 return NULL;
2617 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2618 }
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628 static struct rqb_dmabuf *
2629 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2630 {
2631 struct lpfc_dmabuf *h_buf;
2632 struct lpfc_rqb *rqbp;
2633
2634 rqbp = hrq->rqbp;
2635 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2636 struct lpfc_dmabuf, list);
2637 if (!h_buf)
2638 return NULL;
2639 rqbp->buffer_count--;
2640 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2641 }
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652 static struct hbq_dmabuf *
2653 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2654 {
2655 struct lpfc_dmabuf *d_buf;
2656 struct hbq_dmabuf *hbq_buf;
2657 uint32_t hbqno;
2658
2659 hbqno = tag >> 16;
2660 if (hbqno >= LPFC_MAX_HBQS)
2661 return NULL;
2662
2663 spin_lock_irq(&phba->hbalock);
2664 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2665 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2666 if (hbq_buf->tag == tag) {
2667 spin_unlock_irq(&phba->hbalock);
2668 return hbq_buf;
2669 }
2670 }
2671 spin_unlock_irq(&phba->hbalock);
2672 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2673 "1803 Bad hbq tag. Data: x%x x%x\n",
2674 tag, phba->hbqs[tag >> 16].buffer_count);
2675 return NULL;
2676 }
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687 void
2688 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2689 {
2690 uint32_t hbqno;
2691
2692 if (hbq_buffer) {
2693 hbqno = hbq_buffer->tag >> 16;
2694 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2695 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2696 }
2697 }
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708 static int
2709 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2710 {
2711 uint8_t ret;
2712
2713 switch (mbxCommand) {
2714 case MBX_LOAD_SM:
2715 case MBX_READ_NV:
2716 case MBX_WRITE_NV:
2717 case MBX_WRITE_VPARMS:
2718 case MBX_RUN_BIU_DIAG:
2719 case MBX_INIT_LINK:
2720 case MBX_DOWN_LINK:
2721 case MBX_CONFIG_LINK:
2722 case MBX_CONFIG_RING:
2723 case MBX_RESET_RING:
2724 case MBX_READ_CONFIG:
2725 case MBX_READ_RCONFIG:
2726 case MBX_READ_SPARM:
2727 case MBX_READ_STATUS:
2728 case MBX_READ_RPI:
2729 case MBX_READ_XRI:
2730 case MBX_READ_REV:
2731 case MBX_READ_LNK_STAT:
2732 case MBX_REG_LOGIN:
2733 case MBX_UNREG_LOGIN:
2734 case MBX_CLEAR_LA:
2735 case MBX_DUMP_MEMORY:
2736 case MBX_DUMP_CONTEXT:
2737 case MBX_RUN_DIAGS:
2738 case MBX_RESTART:
2739 case MBX_UPDATE_CFG:
2740 case MBX_DOWN_LOAD:
2741 case MBX_DEL_LD_ENTRY:
2742 case MBX_RUN_PROGRAM:
2743 case MBX_SET_MASK:
2744 case MBX_SET_VARIABLE:
2745 case MBX_UNREG_D_ID:
2746 case MBX_KILL_BOARD:
2747 case MBX_CONFIG_FARP:
2748 case MBX_BEACON:
2749 case MBX_LOAD_AREA:
2750 case MBX_RUN_BIU_DIAG64:
2751 case MBX_CONFIG_PORT:
2752 case MBX_READ_SPARM64:
2753 case MBX_READ_RPI64:
2754 case MBX_REG_LOGIN64:
2755 case MBX_READ_TOPOLOGY:
2756 case MBX_WRITE_WWN:
2757 case MBX_SET_DEBUG:
2758 case MBX_LOAD_EXP_ROM:
2759 case MBX_ASYNCEVT_ENABLE:
2760 case MBX_REG_VPI:
2761 case MBX_UNREG_VPI:
2762 case MBX_HEARTBEAT:
2763 case MBX_PORT_CAPABILITIES:
2764 case MBX_PORT_IOV_CONTROL:
2765 case MBX_SLI4_CONFIG:
2766 case MBX_SLI4_REQ_FTRS:
2767 case MBX_REG_FCFI:
2768 case MBX_UNREG_FCFI:
2769 case MBX_REG_VFI:
2770 case MBX_UNREG_VFI:
2771 case MBX_INIT_VPI:
2772 case MBX_INIT_VFI:
2773 case MBX_RESUME_RPI:
2774 case MBX_READ_EVENT_LOG_STATUS:
2775 case MBX_READ_EVENT_LOG:
2776 case MBX_SECURITY_MGMT:
2777 case MBX_AUTH_PORT:
2778 case MBX_ACCESS_VDATA:
2779 ret = mbxCommand;
2780 break;
2781 default:
2782 ret = MBX_SHUTDOWN;
2783 break;
2784 }
2785 return ret;
2786 }
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799 void
2800 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2801 {
2802 unsigned long drvr_flag;
2803 struct completion *pmbox_done;
2804
2805
2806
2807
2808
2809 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2810 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2811 pmbox_done = (struct completion *)pmboxq->context3;
2812 if (pmbox_done)
2813 complete(pmbox_done);
2814 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2815 return;
2816 }
2817
2818 static void
2819 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2820 {
2821 unsigned long iflags;
2822
2823 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2824 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2825 spin_lock_irqsave(&ndlp->lock, iflags);
2826 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2827 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2828 spin_unlock_irqrestore(&ndlp->lock, iflags);
2829 }
2830 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2831 }
2832
2833 void
2834 lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2835 {
2836 __lpfc_sli_rpi_release(vport, ndlp);
2837 }
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849 void
2850 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2851 {
2852 struct lpfc_vport *vport = pmb->vport;
2853 struct lpfc_nodelist *ndlp;
2854 struct Scsi_Host *shost;
2855 uint16_t rpi, vpi;
2856 int rc;
2857
2858
2859
2860
2861
2862 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2863 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2864 !pmb->u.mb.mbxStatus) {
2865 rpi = pmb->u.mb.un.varWords[0];
2866 vpi = pmb->u.mb.un.varRegLogin.vpi;
2867 if (phba->sli_rev == LPFC_SLI_REV4)
2868 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2869 lpfc_unreg_login(phba, vpi, rpi, pmb);
2870 pmb->vport = vport;
2871 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2872 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2873 if (rc != MBX_NOT_FINISHED)
2874 return;
2875 }
2876
2877 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2878 !(phba->pport->load_flag & FC_UNLOADING) &&
2879 !pmb->u.mb.mbxStatus) {
2880 shost = lpfc_shost_from_vport(vport);
2881 spin_lock_irq(shost->host_lock);
2882 vport->vpi_state |= LPFC_VPI_REGISTERED;
2883 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2884 spin_unlock_irq(shost->host_lock);
2885 }
2886
2887 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2888 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2889 lpfc_nlp_put(ndlp);
2890 }
2891
2892 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2893 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2894
2895
2896 if (ndlp) {
2897 lpfc_printf_vlog(
2898 vport,
2899 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2900 "1438 UNREG cmpl deferred mbox x%x "
2901 "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
2902 ndlp->nlp_rpi, ndlp->nlp_DID,
2903 ndlp->nlp_flag, ndlp->nlp_defer_did,
2904 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2905
2906 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2907 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2908 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2909 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2910 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2911 } else {
2912 __lpfc_sli_rpi_release(vport, ndlp);
2913 }
2914
2915
2916
2917
2918
2919 lpfc_nlp_put(ndlp);
2920 pmb->ctx_ndlp = NULL;
2921 }
2922 }
2923
2924
2925 if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2926 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2927 lpfc_nlp_put(ndlp);
2928 }
2929
2930
2931 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2932 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2933 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2934 "2860 SLI authentication is required "
2935 "for INIT_LINK but has not done yet\n");
2936
2937 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2938 lpfc_sli4_mbox_cmd_free(phba, pmb);
2939 else
2940 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2941 }
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955 void
2956 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2957 {
2958 struct lpfc_vport *vport = pmb->vport;
2959 struct lpfc_nodelist *ndlp;
2960
2961 ndlp = pmb->ctx_ndlp;
2962 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2963 if (phba->sli_rev == LPFC_SLI_REV4 &&
2964 (bf_get(lpfc_sli_intf_if_type,
2965 &phba->sli4_hba.sli_intf) >=
2966 LPFC_SLI_INTF_IF_TYPE_2)) {
2967 if (ndlp) {
2968 lpfc_printf_vlog(
2969 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2970 "0010 UNREG_LOGIN vpi:%x "
2971 "rpi:%x DID:%x defer x%x flg x%x "
2972 "x%px\n",
2973 vport->vpi, ndlp->nlp_rpi,
2974 ndlp->nlp_DID, ndlp->nlp_defer_did,
2975 ndlp->nlp_flag,
2976 ndlp);
2977 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2978
2979
2980
2981
2982 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2983 (ndlp->nlp_defer_did !=
2984 NLP_EVT_NOTHING_PENDING)) {
2985 lpfc_printf_vlog(
2986 vport, KERN_INFO, LOG_DISCOVERY,
2987 "4111 UNREG cmpl deferred "
2988 "clr x%x on "
2989 "NPort x%x Data: x%x x%px\n",
2990 ndlp->nlp_rpi, ndlp->nlp_DID,
2991 ndlp->nlp_defer_did, ndlp);
2992 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2993 ndlp->nlp_defer_did =
2994 NLP_EVT_NOTHING_PENDING;
2995 lpfc_issue_els_plogi(
2996 vport, ndlp->nlp_DID, 0);
2997 } else {
2998 __lpfc_sli_rpi_release(vport, ndlp);
2999 }
3000 lpfc_nlp_put(ndlp);
3001 }
3002 }
3003 }
3004
3005 mempool_free(pmb, phba->mbox_mem_pool);
3006 }
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021 int
3022 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
3023 {
3024 MAILBOX_t *pmbox;
3025 LPFC_MBOXQ_t *pmb;
3026 int rc;
3027 LIST_HEAD(cmplq);
3028
3029 phba->sli.slistat.mbox_event++;
3030
3031
3032 spin_lock_irq(&phba->hbalock);
3033 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
3034 spin_unlock_irq(&phba->hbalock);
3035
3036
3037 do {
3038 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
3039 if (pmb == NULL)
3040 break;
3041
3042 pmbox = &pmb->u.mb;
3043
3044 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
3045 if (pmb->vport) {
3046 lpfc_debugfs_disc_trc(pmb->vport,
3047 LPFC_DISC_TRC_MBOX_VPORT,
3048 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
3049 (uint32_t)pmbox->mbxCommand,
3050 pmbox->un.varWords[0],
3051 pmbox->un.varWords[1]);
3052 }
3053 else {
3054 lpfc_debugfs_disc_trc(phba->pport,
3055 LPFC_DISC_TRC_MBOX,
3056 "MBOX cmpl: cmd:x%x mb:x%x x%x",
3057 (uint32_t)pmbox->mbxCommand,
3058 pmbox->un.varWords[0],
3059 pmbox->un.varWords[1]);
3060 }
3061 }
3062
3063
3064
3065
3066 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
3067 MBX_SHUTDOWN) {
3068
3069 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3070 "(%d):0323 Unknown Mailbox command "
3071 "x%x (x%x/x%x) Cmpl\n",
3072 pmb->vport ? pmb->vport->vpi :
3073 LPFC_VPORT_UNKNOWN,
3074 pmbox->mbxCommand,
3075 lpfc_sli_config_mbox_subsys_get(phba,
3076 pmb),
3077 lpfc_sli_config_mbox_opcode_get(phba,
3078 pmb));
3079 phba->link_state = LPFC_HBA_ERROR;
3080 phba->work_hs = HS_FFER3;
3081 lpfc_handle_eratt(phba);
3082 continue;
3083 }
3084
3085 if (pmbox->mbxStatus) {
3086 phba->sli.slistat.mbox_stat_err++;
3087 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
3088
3089 lpfc_printf_log(phba, KERN_INFO,
3090 LOG_MBOX | LOG_SLI,
3091 "(%d):0305 Mbox cmd cmpl "
3092 "error - RETRYing Data: x%x "
3093 "(x%x/x%x) x%x x%x x%x\n",
3094 pmb->vport ? pmb->vport->vpi :
3095 LPFC_VPORT_UNKNOWN,
3096 pmbox->mbxCommand,
3097 lpfc_sli_config_mbox_subsys_get(phba,
3098 pmb),
3099 lpfc_sli_config_mbox_opcode_get(phba,
3100 pmb),
3101 pmbox->mbxStatus,
3102 pmbox->un.varWords[0],
3103 pmb->vport ? pmb->vport->port_state :
3104 LPFC_VPORT_UNKNOWN);
3105 pmbox->mbxStatus = 0;
3106 pmbox->mbxOwner = OWN_HOST;
3107 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3108 if (rc != MBX_NOT_FINISHED)
3109 continue;
3110 }
3111 }
3112
3113
3114 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3115 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
3116 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3117 "x%x x%x x%x\n",
3118 pmb->vport ? pmb->vport->vpi : 0,
3119 pmbox->mbxCommand,
3120 lpfc_sli_config_mbox_subsys_get(phba, pmb),
3121 lpfc_sli_config_mbox_opcode_get(phba, pmb),
3122 pmb->mbox_cmpl,
3123 *((uint32_t *) pmbox),
3124 pmbox->un.varWords[0],
3125 pmbox->un.varWords[1],
3126 pmbox->un.varWords[2],
3127 pmbox->un.varWords[3],
3128 pmbox->un.varWords[4],
3129 pmbox->un.varWords[5],
3130 pmbox->un.varWords[6],
3131 pmbox->un.varWords[7],
3132 pmbox->un.varWords[8],
3133 pmbox->un.varWords[9],
3134 pmbox->un.varWords[10]);
3135
3136 if (pmb->mbox_cmpl)
3137 pmb->mbox_cmpl(phba,pmb);
3138 } while (1);
3139 return 0;
3140 }
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154 static struct lpfc_dmabuf *
3155 lpfc_sli_get_buff(struct lpfc_hba *phba,
3156 struct lpfc_sli_ring *pring,
3157 uint32_t tag)
3158 {
3159 struct hbq_dmabuf *hbq_entry;
3160
3161 if (tag & QUE_BUFTAG_BIT)
3162 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
3163 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
3164 if (!hbq_entry)
3165 return NULL;
3166 return &hbq_entry->dbuf;
3167 }
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180 static void
3181 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
3182 {
3183 struct lpfc_nodelist *ndlp;
3184 struct lpfc_dmabuf *d_buf;
3185 struct hbq_dmabuf *nvmebuf;
3186 struct fc_frame_header *fc_hdr;
3187 struct lpfc_async_xchg_ctx *axchg = NULL;
3188 char *failwhy = NULL;
3189 uint32_t oxid, sid, did, fctl, size;
3190 int ret = 1;
3191
3192 d_buf = piocb->cmd_dmabuf;
3193
3194 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
3195 fc_hdr = nvmebuf->hbuf.virt;
3196 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
3197 sid = sli4_sid_from_fc_hdr(fc_hdr);
3198 did = sli4_did_from_fc_hdr(fc_hdr);
3199 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
3200 fc_hdr->fh_f_ctl[1] << 8 |
3201 fc_hdr->fh_f_ctl[2]);
3202 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
3203
3204 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
3205 oxid, size, sid);
3206
3207 if (phba->pport->load_flag & FC_UNLOADING) {
3208 failwhy = "Driver Unloading";
3209 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
3210 failwhy = "NVME FC4 Disabled";
3211 } else if (!phba->nvmet_support && !phba->pport->localport) {
3212 failwhy = "No Localport";
3213 } else if (phba->nvmet_support && !phba->targetport) {
3214 failwhy = "No Targetport";
3215 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
3216 failwhy = "Bad NVME LS R_CTL";
3217 } else if (unlikely((fctl & 0x00FF0000) !=
3218 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
3219 failwhy = "Bad NVME LS F_CTL";
3220 } else {
3221 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
3222 if (!axchg)
3223 failwhy = "No CTX memory";
3224 }
3225
3226 if (unlikely(failwhy)) {
3227 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3228 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
3229 sid, oxid, failwhy);
3230 goto out_fail;
3231 }
3232
3233
3234 ndlp = lpfc_findnode_did(phba->pport, sid);
3235 if (!ndlp ||
3236 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3237 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3238 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
3239 "6216 NVME Unsol rcv: No ndlp: "
3240 "NPort_ID x%x oxid x%x\n",
3241 sid, oxid);
3242 goto out_fail;
3243 }
3244
3245 axchg->phba = phba;
3246 axchg->ndlp = ndlp;
3247 axchg->size = size;
3248 axchg->oxid = oxid;
3249 axchg->sid = sid;
3250 axchg->wqeq = NULL;
3251 axchg->state = LPFC_NVME_STE_LS_RCV;
3252 axchg->entry_cnt = 1;
3253 axchg->rqb_buffer = (void *)nvmebuf;
3254 axchg->hdwq = &phba->sli4_hba.hdwq[0];
3255 axchg->payload = nvmebuf->dbuf.virt;
3256 INIT_LIST_HEAD(&axchg->list);
3257
3258 if (phba->nvmet_support) {
3259 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3260 spin_lock_irq(&ndlp->lock);
3261 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3262 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3263 spin_unlock_irq(&ndlp->lock);
3264
3265
3266
3267
3268
3269 if (!lpfc_nlp_get(ndlp))
3270 goto out_fail;
3271
3272 lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3273 "6206 NVMET unsol ls_req ndlp x%px "
3274 "DID x%x xflags x%x refcnt %d\n",
3275 ndlp, ndlp->nlp_DID,
3276 ndlp->fc4_xpt_flags,
3277 kref_read(&ndlp->kref));
3278 } else {
3279 spin_unlock_irq(&ndlp->lock);
3280 }
3281 } else {
3282 ret = lpfc_nvme_handle_lsreq(phba, axchg);
3283 }
3284
3285
3286 if (!ret)
3287 return;
3288
3289 out_fail:
3290 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3291 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3292 "NVMe%s handler failed %d\n",
3293 did, sid, oxid,
3294 (phba->nvmet_support) ? "T" : "I", ret);
3295
3296
3297 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3298
3299
3300 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3301 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3302
3303 if (ret)
3304 kfree(axchg);
3305 }
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319 static int
3320 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3321 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3322 uint32_t fch_type)
3323 {
3324 int i;
3325
3326 switch (fch_type) {
3327 case FC_TYPE_NVME:
3328 lpfc_nvme_unsol_ls_handler(phba, saveq);
3329 return 1;
3330 default:
3331 break;
3332 }
3333
3334
3335 if (pring->prt[0].profile) {
3336 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3337 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3338 saveq);
3339 return 1;
3340 }
3341
3342
3343 for (i = 0; i < pring->num_mask; i++) {
3344 if ((pring->prt[i].rctl == fch_r_ctl) &&
3345 (pring->prt[i].type == fch_type)) {
3346 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3347 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3348 (phba, pring, saveq);
3349 return 1;
3350 }
3351 }
3352 return 0;
3353 }
3354
3355 static void
3356 lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
3357 struct lpfc_iocbq *saveq)
3358 {
3359 IOCB_t *irsp;
3360 union lpfc_wqe128 *wqe;
3361 u16 i = 0;
3362
3363 irsp = &saveq->iocb;
3364 wqe = &saveq->wqe;
3365
3366
3367 bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
3368 saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
3369 saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
3370 saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
3371
3372
3373 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
3374
3375
3376 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
3377
3378
3379 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
3380 irsp->unsli3.rcvsli3.ox_id);
3381
3382
3383 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
3384 irsp->un.rcvels.remoteID);
3385
3386
3387 for (i = 0; i < irsp->ulpBdeCount; i++) {
3388 struct lpfc_hbq_entry *hbqe = NULL;
3389
3390 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3391 if (i == 0) {
3392 hbqe = (struct lpfc_hbq_entry *)
3393 &irsp->un.ulpWord[0];
3394 saveq->wqe.gen_req.bde.tus.f.bdeSize =
3395 hbqe->bde.tus.f.bdeSize;
3396 } else if (i == 1) {
3397 hbqe = (struct lpfc_hbq_entry *)
3398 &irsp->unsli3.sli3Words[4];
3399 saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
3400 }
3401 }
3402 }
3403 }
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419 static int
3420 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3421 struct lpfc_iocbq *saveq)
3422 {
3423 IOCB_t * irsp;
3424 WORD5 * w5p;
3425 dma_addr_t paddr;
3426 uint32_t Rctl, Type;
3427 struct lpfc_iocbq *iocbq;
3428 struct lpfc_dmabuf *dmzbuf;
3429
3430 irsp = &saveq->iocb;
3431 saveq->vport = phba->pport;
3432
3433 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3434 if (pring->lpfc_sli_rcv_async_status)
3435 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3436 else
3437 lpfc_printf_log(phba,
3438 KERN_WARNING,
3439 LOG_SLI,
3440 "0316 Ring %d handler: unexpected "
3441 "ASYNC_STATUS iocb received evt_code "
3442 "0x%x\n",
3443 pring->ringno,
3444 irsp->un.asyncstat.evt_code);
3445 return 1;
3446 }
3447
3448 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3449 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3450 if (irsp->ulpBdeCount > 0) {
3451 dmzbuf = lpfc_sli_get_buff(phba, pring,
3452 irsp->un.ulpWord[3]);
3453 lpfc_in_buf_free(phba, dmzbuf);
3454 }
3455
3456 if (irsp->ulpBdeCount > 1) {
3457 dmzbuf = lpfc_sli_get_buff(phba, pring,
3458 irsp->unsli3.sli3Words[3]);
3459 lpfc_in_buf_free(phba, dmzbuf);
3460 }
3461
3462 if (irsp->ulpBdeCount > 2) {
3463 dmzbuf = lpfc_sli_get_buff(phba, pring,
3464 irsp->unsli3.sli3Words[7]);
3465 lpfc_in_buf_free(phba, dmzbuf);
3466 }
3467
3468 return 1;
3469 }
3470
3471 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3472 if (irsp->ulpBdeCount != 0) {
3473 saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
3474 irsp->un.ulpWord[3]);
3475 if (!saveq->cmd_dmabuf)
3476 lpfc_printf_log(phba,
3477 KERN_ERR,
3478 LOG_SLI,
3479 "0341 Ring %d Cannot find buffer for "
3480 "an unsolicited iocb. tag 0x%x\n",
3481 pring->ringno,
3482 irsp->un.ulpWord[3]);
3483 }
3484 if (irsp->ulpBdeCount == 2) {
3485 saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
3486 irsp->unsli3.sli3Words[7]);
3487 if (!saveq->bpl_dmabuf)
3488 lpfc_printf_log(phba,
3489 KERN_ERR,
3490 LOG_SLI,
3491 "0342 Ring %d Cannot find buffer for an"
3492 " unsolicited iocb. tag 0x%x\n",
3493 pring->ringno,
3494 irsp->unsli3.sli3Words[7]);
3495 }
3496 list_for_each_entry(iocbq, &saveq->list, list) {
3497 irsp = &iocbq->iocb;
3498 if (irsp->ulpBdeCount != 0) {
3499 iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
3500 pring,
3501 irsp->un.ulpWord[3]);
3502 if (!iocbq->cmd_dmabuf)
3503 lpfc_printf_log(phba,
3504 KERN_ERR,
3505 LOG_SLI,
3506 "0343 Ring %d Cannot find "
3507 "buffer for an unsolicited iocb"
3508 ". tag 0x%x\n", pring->ringno,
3509 irsp->un.ulpWord[3]);
3510 }
3511 if (irsp->ulpBdeCount == 2) {
3512 iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
3513 pring,
3514 irsp->unsli3.sli3Words[7]);
3515 if (!iocbq->bpl_dmabuf)
3516 lpfc_printf_log(phba,
3517 KERN_ERR,
3518 LOG_SLI,
3519 "0344 Ring %d Cannot find "
3520 "buffer for an unsolicited "
3521 "iocb. tag 0x%x\n",
3522 pring->ringno,
3523 irsp->unsli3.sli3Words[7]);
3524 }
3525 }
3526 } else {
3527 paddr = getPaddr(irsp->un.cont64[0].addrHigh,
3528 irsp->un.cont64[0].addrLow);
3529 saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
3530 paddr);
3531 if (irsp->ulpBdeCount == 2) {
3532 paddr = getPaddr(irsp->un.cont64[1].addrHigh,
3533 irsp->un.cont64[1].addrLow);
3534 saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
3535 pring,
3536 paddr);
3537 }
3538 }
3539
3540 if (irsp->ulpBdeCount != 0 &&
3541 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3542 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3543 int found = 0;
3544
3545
3546 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3547 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3548 saveq->iocb.unsli3.rcvsli3.ox_id) {
3549 list_add_tail(&saveq->list, &iocbq->list);
3550 found = 1;
3551 break;
3552 }
3553 }
3554 if (!found)
3555 list_add_tail(&saveq->clist,
3556 &pring->iocb_continue_saveq);
3557
3558 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3559 list_del_init(&iocbq->clist);
3560 saveq = iocbq;
3561 irsp = &saveq->iocb;
3562 } else {
3563 return 0;
3564 }
3565 }
3566 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3567 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3568 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3569 Rctl = FC_RCTL_ELS_REQ;
3570 Type = FC_TYPE_ELS;
3571 } else {
3572 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3573 Rctl = w5p->hcsw.Rctl;
3574 Type = w5p->hcsw.Type;
3575
3576
3577 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3578 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3579 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3580 Rctl = FC_RCTL_ELS_REQ;
3581 Type = FC_TYPE_ELS;
3582 w5p->hcsw.Rctl = Rctl;
3583 w5p->hcsw.Type = Type;
3584 }
3585 }
3586
3587 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3588 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
3589 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3590 if (irsp->unsli3.rcvsli3.vpi == 0xffff)
3591 saveq->vport = phba->pport;
3592 else
3593 saveq->vport = lpfc_find_vport_by_vpid(phba,
3594 irsp->unsli3.rcvsli3.vpi);
3595 }
3596
3597
3598 lpfc_sli_prep_unsol_wqe(phba, saveq);
3599
3600 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3601 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3602 "0313 Ring %d handler: unexpected Rctl x%x "
3603 "Type x%x received\n",
3604 pring->ringno, Rctl, Type);
3605
3606 return 1;
3607 }
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622 static struct lpfc_iocbq *
3623 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3624 struct lpfc_sli_ring *pring,
3625 struct lpfc_iocbq *prspiocb)
3626 {
3627 struct lpfc_iocbq *cmd_iocb = NULL;
3628 u16 iotag;
3629
3630 if (phba->sli_rev == LPFC_SLI_REV4)
3631 iotag = get_wqe_reqtag(prspiocb);
3632 else
3633 iotag = prspiocb->iocb.ulpIoTag;
3634
3635 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3636 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3637 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3638
3639 list_del_init(&cmd_iocb->list);
3640 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3641 pring->txcmplq_cnt--;
3642 return cmd_iocb;
3643 }
3644 }
3645
3646 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3647 "0317 iotag x%x is out of "
3648 "range: max iotag x%x\n",
3649 iotag, phba->sli.last_iotag);
3650 return NULL;
3651 }
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665 static struct lpfc_iocbq *
3666 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3667 struct lpfc_sli_ring *pring, uint16_t iotag)
3668 {
3669 struct lpfc_iocbq *cmd_iocb = NULL;
3670
3671 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3672 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3673 if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3674
3675 list_del_init(&cmd_iocb->list);
3676 cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3677 pring->txcmplq_cnt--;
3678 return cmd_iocb;
3679 }
3680 }
3681
3682 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3683 "0372 iotag x%x lookup error: max iotag (x%x) "
3684 "cmd_flag x%x\n",
3685 iotag, phba->sli.last_iotag,
3686 cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
3687 return NULL;
3688 }
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707 static int
3708 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3709 struct lpfc_iocbq *saveq)
3710 {
3711 struct lpfc_iocbq *cmdiocbp;
3712 unsigned long iflag;
3713 u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
3714
3715 if (phba->sli_rev == LPFC_SLI_REV4)
3716 spin_lock_irqsave(&pring->ring_lock, iflag);
3717 else
3718 spin_lock_irqsave(&phba->hbalock, iflag);
3719 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3720 if (phba->sli_rev == LPFC_SLI_REV4)
3721 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3722 else
3723 spin_unlock_irqrestore(&phba->hbalock, iflag);
3724
3725 ulp_command = get_job_cmnd(phba, saveq);
3726 ulp_status = get_job_ulpstatus(phba, saveq);
3727 ulp_word4 = get_job_word4(phba, saveq);
3728 ulp_context = get_job_ulpcontext(phba, saveq);
3729 if (phba->sli_rev == LPFC_SLI_REV4)
3730 iotag = get_wqe_reqtag(saveq);
3731 else
3732 iotag = saveq->iocb.ulpIoTag;
3733
3734 if (cmdiocbp) {
3735 ulp_command = get_job_cmnd(phba, cmdiocbp);
3736 if (cmdiocbp->cmd_cmpl) {
3737
3738
3739
3740
3741 if (ulp_status &&
3742 (pring->ringno == LPFC_ELS_RING) &&
3743 (ulp_command == CMD_ELS_REQUEST64_CR))
3744 lpfc_send_els_failure_event(phba,
3745 cmdiocbp, saveq);
3746
3747
3748
3749
3750
3751 if (pring->ringno == LPFC_ELS_RING) {
3752 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3753 (cmdiocbp->cmd_flag &
3754 LPFC_DRIVER_ABORTED)) {
3755 spin_lock_irqsave(&phba->hbalock,
3756 iflag);
3757 cmdiocbp->cmd_flag &=
3758 ~LPFC_DRIVER_ABORTED;
3759 spin_unlock_irqrestore(&phba->hbalock,
3760 iflag);
3761 saveq->iocb.ulpStatus =
3762 IOSTAT_LOCAL_REJECT;
3763 saveq->iocb.un.ulpWord[4] =
3764 IOERR_SLI_ABORTED;
3765
3766
3767
3768
3769
3770 spin_lock_irqsave(&phba->hbalock,
3771 iflag);
3772 saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
3773 spin_unlock_irqrestore(&phba->hbalock,
3774 iflag);
3775 }
3776 if (phba->sli_rev == LPFC_SLI_REV4) {
3777 if (saveq->cmd_flag &
3778 LPFC_EXCHANGE_BUSY) {
3779
3780
3781
3782
3783
3784
3785 spin_lock_irqsave(
3786 &phba->hbalock, iflag);
3787 cmdiocbp->cmd_flag |=
3788 LPFC_EXCHANGE_BUSY;
3789 spin_unlock_irqrestore(
3790 &phba->hbalock, iflag);
3791 }
3792 if (cmdiocbp->cmd_flag &
3793 LPFC_DRIVER_ABORTED) {
3794
3795
3796
3797
3798
3799 spin_lock_irqsave(
3800 &phba->hbalock, iflag);
3801 cmdiocbp->cmd_flag &=
3802 ~LPFC_DRIVER_ABORTED;
3803 spin_unlock_irqrestore(
3804 &phba->hbalock, iflag);
3805 set_job_ulpstatus(cmdiocbp,
3806 IOSTAT_LOCAL_REJECT);
3807 set_job_ulpword4(cmdiocbp,
3808 IOERR_ABORT_REQUESTED);
3809
3810
3811
3812
3813
3814
3815 set_job_ulpstatus(saveq,
3816 IOSTAT_LOCAL_REJECT);
3817 set_job_ulpword4(saveq,
3818 IOERR_SLI_ABORTED);
3819 spin_lock_irqsave(
3820 &phba->hbalock, iflag);
3821 saveq->cmd_flag |=
3822 LPFC_DELAY_MEM_FREE;
3823 spin_unlock_irqrestore(
3824 &phba->hbalock, iflag);
3825 }
3826 }
3827 }
3828 cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
3829 } else
3830 lpfc_sli_release_iocbq(phba, cmdiocbp);
3831 } else {
3832
3833
3834
3835
3836
3837 if (pring->ringno != LPFC_ELS_RING) {
3838
3839
3840
3841
3842 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3843 "0322 Ring %d handler: "
3844 "unexpected completion IoTag x%x "
3845 "Data: x%x x%x x%x x%x\n",
3846 pring->ringno, iotag, ulp_status,
3847 ulp_word4, ulp_command, ulp_context);
3848 }
3849 }
3850
3851 return 1;
3852 }
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864 static void
3865 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3866 {
3867 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3868
3869
3870
3871
3872 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3873 "0312 Ring %d handler: portRspPut %d "
3874 "is bigger than rsp ring %d\n",
3875 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3876 pring->sli.sli3.numRiocb);
3877
3878 phba->link_state = LPFC_HBA_ERROR;
3879
3880
3881
3882
3883
3884 phba->work_ha |= HA_ERATT;
3885 phba->work_hs = HS_FFER3;
3886
3887 lpfc_worker_wake_up(phba);
3888
3889 return;
3890 }
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902 void lpfc_poll_eratt(struct timer_list *t)
3903 {
3904 struct lpfc_hba *phba;
3905 uint32_t eratt = 0;
3906 uint64_t sli_intr, cnt;
3907
3908 phba = from_timer(phba, t, eratt_poll);
3909
3910
3911 sli_intr = phba->sli.slistat.sli_intr;
3912
3913 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3914 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3915 sli_intr);
3916 else
3917 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3918
3919
3920 do_div(cnt, phba->eratt_poll_interval);
3921 phba->sli.slistat.sli_ips = cnt;
3922
3923 phba->sli.slistat.sli_prev_intr = sli_intr;
3924
3925
3926 eratt = lpfc_sli_check_eratt(phba);
3927
3928 if (eratt)
3929
3930 lpfc_worker_wake_up(phba);
3931 else
3932
3933 mod_timer(&phba->eratt_poll,
3934 jiffies +
3935 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3936 return;
3937 }
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957 int
3958 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3959 struct lpfc_sli_ring *pring, uint32_t mask)
3960 {
3961 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3962 IOCB_t *irsp = NULL;
3963 IOCB_t *entry = NULL;
3964 struct lpfc_iocbq *cmdiocbq = NULL;
3965 struct lpfc_iocbq rspiocbq;
3966 uint32_t status;
3967 uint32_t portRspPut, portRspMax;
3968 int rc = 1;
3969 lpfc_iocb_type type;
3970 unsigned long iflag;
3971 uint32_t rsp_cmpl = 0;
3972
3973 spin_lock_irqsave(&phba->hbalock, iflag);
3974 pring->stats.iocb_event++;
3975
3976
3977
3978
3979
3980 portRspMax = pring->sli.sli3.numRiocb;
3981 portRspPut = le32_to_cpu(pgp->rspPutInx);
3982 if (unlikely(portRspPut >= portRspMax)) {
3983 lpfc_sli_rsp_pointers_error(phba, pring);
3984 spin_unlock_irqrestore(&phba->hbalock, iflag);
3985 return 1;
3986 }
3987 if (phba->fcp_ring_in_use) {
3988 spin_unlock_irqrestore(&phba->hbalock, iflag);
3989 return 1;
3990 } else
3991 phba->fcp_ring_in_use = 1;
3992
3993 rmb();
3994 while (pring->sli.sli3.rspidx != portRspPut) {
3995
3996
3997
3998
3999
4000 entry = lpfc_resp_iocb(phba, pring);
4001 phba->last_completion_time = jiffies;
4002
4003 if (++pring->sli.sli3.rspidx >= portRspMax)
4004 pring->sli.sli3.rspidx = 0;
4005
4006 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
4007 (uint32_t *) &rspiocbq.iocb,
4008 phba->iocb_rsp_size);
4009 INIT_LIST_HEAD(&(rspiocbq.list));
4010 irsp = &rspiocbq.iocb;
4011
4012 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
4013 pring->stats.iocb_rsp++;
4014 rsp_cmpl++;
4015
4016 if (unlikely(irsp->ulpStatus)) {
4017
4018
4019
4020
4021 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4022 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
4023 IOERR_NO_RESOURCES)) {
4024 spin_unlock_irqrestore(&phba->hbalock, iflag);
4025 phba->lpfc_rampdown_queue_depth(phba);
4026 spin_lock_irqsave(&phba->hbalock, iflag);
4027 }
4028
4029
4030 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4031 "0336 Rsp Ring %d error: IOCB Data: "
4032 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
4033 pring->ringno,
4034 irsp->un.ulpWord[0],
4035 irsp->un.ulpWord[1],
4036 irsp->un.ulpWord[2],
4037 irsp->un.ulpWord[3],
4038 irsp->un.ulpWord[4],
4039 irsp->un.ulpWord[5],
4040 *(uint32_t *)&irsp->un1,
4041 *((uint32_t *)&irsp->un1 + 1));
4042 }
4043
4044 switch (type) {
4045 case LPFC_ABORT_IOCB:
4046 case LPFC_SOL_IOCB:
4047
4048
4049
4050
4051 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
4052 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4053 "0333 IOCB cmd 0x%x"
4054 " processed. Skipping"
4055 " completion\n",
4056 irsp->ulpCommand);
4057 break;
4058 }
4059
4060 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
4061 &rspiocbq);
4062 if (unlikely(!cmdiocbq))
4063 break;
4064 if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
4065 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
4066 if (cmdiocbq->cmd_cmpl) {
4067 spin_unlock_irqrestore(&phba->hbalock, iflag);
4068 cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
4069 spin_lock_irqsave(&phba->hbalock, iflag);
4070 }
4071 break;
4072 case LPFC_UNSOL_IOCB:
4073 spin_unlock_irqrestore(&phba->hbalock, iflag);
4074 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
4075 spin_lock_irqsave(&phba->hbalock, iflag);
4076 break;
4077 default:
4078 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
4079 char adaptermsg[LPFC_MAX_ADPTMSG];
4080 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4081 memcpy(&adaptermsg[0], (uint8_t *) irsp,
4082 MAX_MSG_DATA);
4083 dev_warn(&((phba->pcidev)->dev),
4084 "lpfc%d: %s\n",
4085 phba->brd_no, adaptermsg);
4086 } else {
4087
4088 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4089 "0334 Unknown IOCB command "
4090 "Data: x%x, x%x x%x x%x x%x\n",
4091 type, irsp->ulpCommand,
4092 irsp->ulpStatus,
4093 irsp->ulpIoTag,
4094 irsp->ulpContext);
4095 }
4096 break;
4097 }
4098
4099
4100
4101
4102
4103
4104
4105 writel(pring->sli.sli3.rspidx,
4106 &phba->host_gp[pring->ringno].rspGetInx);
4107
4108 if (pring->sli.sli3.rspidx == portRspPut)
4109 portRspPut = le32_to_cpu(pgp->rspPutInx);
4110 }
4111
4112 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
4113 pring->stats.iocb_rsp_full++;
4114 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4115 writel(status, phba->CAregaddr);
4116 readl(phba->CAregaddr);
4117 }
4118 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4119 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4120 pring->stats.iocb_cmd_empty++;
4121
4122
4123 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4124 lpfc_sli_resume_iocb(phba, pring);
4125
4126 if ((pring->lpfc_sli_cmd_available))
4127 (pring->lpfc_sli_cmd_available) (phba, pring);
4128
4129 }
4130
4131 phba->fcp_ring_in_use = 0;
4132 spin_unlock_irqrestore(&phba->hbalock, iflag);
4133 return rc;
4134 }
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154 static struct lpfc_iocbq *
4155 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4156 struct lpfc_iocbq *rspiocbp)
4157 {
4158 struct lpfc_iocbq *saveq;
4159 struct lpfc_iocbq *cmdiocb;
4160 struct lpfc_iocbq *next_iocb;
4161 IOCB_t *irsp;
4162 uint32_t free_saveq;
4163 u8 cmd_type;
4164 lpfc_iocb_type type;
4165 unsigned long iflag;
4166 u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
4167 u32 ulp_word4 = get_job_word4(phba, rspiocbp);
4168 u32 ulp_command = get_job_cmnd(phba, rspiocbp);
4169 int rc;
4170
4171 spin_lock_irqsave(&phba->hbalock, iflag);
4172
4173 list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
4174 pring->iocb_continueq_cnt++;
4175
4176
4177
4178
4179
4180 free_saveq = 1;
4181 saveq = list_get_first(&pring->iocb_continueq,
4182 struct lpfc_iocbq, list);
4183 list_del_init(&pring->iocb_continueq);
4184 pring->iocb_continueq_cnt = 0;
4185
4186 pring->stats.iocb_rsp++;
4187
4188
4189
4190
4191
4192 if (ulp_status == IOSTAT_LOCAL_REJECT &&
4193 ((ulp_word4 & IOERR_PARAM_MASK) ==
4194 IOERR_NO_RESOURCES)) {
4195 spin_unlock_irqrestore(&phba->hbalock, iflag);
4196 phba->lpfc_rampdown_queue_depth(phba);
4197 spin_lock_irqsave(&phba->hbalock, iflag);
4198 }
4199
4200 if (ulp_status) {
4201
4202 if (phba->sli_rev < LPFC_SLI_REV4) {
4203 irsp = &rspiocbp->iocb;
4204 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4205 "0328 Rsp Ring %d error: ulp_status x%x "
4206 "IOCB Data: "
4207 "x%08x x%08x x%08x x%08x "
4208 "x%08x x%08x x%08x x%08x "
4209 "x%08x x%08x x%08x x%08x "
4210 "x%08x x%08x x%08x x%08x\n",
4211 pring->ringno, ulp_status,
4212 get_job_ulpword(rspiocbp, 0),
4213 get_job_ulpword(rspiocbp, 1),
4214 get_job_ulpword(rspiocbp, 2),
4215 get_job_ulpword(rspiocbp, 3),
4216 get_job_ulpword(rspiocbp, 4),
4217 get_job_ulpword(rspiocbp, 5),
4218 *(((uint32_t *)irsp) + 6),
4219 *(((uint32_t *)irsp) + 7),
4220 *(((uint32_t *)irsp) + 8),
4221 *(((uint32_t *)irsp) + 9),
4222 *(((uint32_t *)irsp) + 10),
4223 *(((uint32_t *)irsp) + 11),
4224 *(((uint32_t *)irsp) + 12),
4225 *(((uint32_t *)irsp) + 13),
4226 *(((uint32_t *)irsp) + 14),
4227 *(((uint32_t *)irsp) + 15));
4228 } else {
4229 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4230 "0321 Rsp Ring %d error: "
4231 "IOCB Data: "
4232 "x%x x%x x%x x%x\n",
4233 pring->ringno,
4234 rspiocbp->wcqe_cmpl.word0,
4235 rspiocbp->wcqe_cmpl.total_data_placed,
4236 rspiocbp->wcqe_cmpl.parameter,
4237 rspiocbp->wcqe_cmpl.word3);
4238 }
4239 }
4240
4241
4242
4243
4244
4245
4246
4247
4248 cmd_type = ulp_command & CMD_IOCB_MASK;
4249 type = lpfc_sli_iocb_cmd_type(cmd_type);
4250 switch (type) {
4251 case LPFC_SOL_IOCB:
4252 spin_unlock_irqrestore(&phba->hbalock, iflag);
4253 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
4254 spin_lock_irqsave(&phba->hbalock, iflag);
4255 break;
4256 case LPFC_UNSOL_IOCB:
4257 spin_unlock_irqrestore(&phba->hbalock, iflag);
4258 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
4259 spin_lock_irqsave(&phba->hbalock, iflag);
4260 if (!rc)
4261 free_saveq = 0;
4262 break;
4263 case LPFC_ABORT_IOCB:
4264 cmdiocb = NULL;
4265 if (ulp_command != CMD_XRI_ABORTED_CX)
4266 cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
4267 saveq);
4268 if (cmdiocb) {
4269
4270 if (cmdiocb->cmd_cmpl) {
4271 spin_unlock_irqrestore(&phba->hbalock, iflag);
4272 cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
4273 spin_lock_irqsave(&phba->hbalock, iflag);
4274 } else {
4275 __lpfc_sli_release_iocbq(phba, cmdiocb);
4276 }
4277 }
4278 break;
4279 case LPFC_UNKNOWN_IOCB:
4280 if (ulp_command == CMD_ADAPTER_MSG) {
4281 char adaptermsg[LPFC_MAX_ADPTMSG];
4282
4283 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4284 memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
4285 MAX_MSG_DATA);
4286 dev_warn(&((phba->pcidev)->dev),
4287 "lpfc%d: %s\n",
4288 phba->brd_no, adaptermsg);
4289 } else {
4290
4291 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4292 "0335 Unknown IOCB "
4293 "command Data: x%x "
4294 "x%x x%x x%x\n",
4295 ulp_command,
4296 ulp_status,
4297 get_wqe_reqtag(rspiocbp),
4298 get_job_ulpcontext(phba, rspiocbp));
4299 }
4300 break;
4301 }
4302
4303 if (free_saveq) {
4304 list_for_each_entry_safe(rspiocbp, next_iocb,
4305 &saveq->list, list) {
4306 list_del_init(&rspiocbp->list);
4307 __lpfc_sli_release_iocbq(phba, rspiocbp);
4308 }
4309 __lpfc_sli_release_iocbq(phba, saveq);
4310 }
4311 rspiocbp = NULL;
4312 spin_unlock_irqrestore(&phba->hbalock, iflag);
4313 return rspiocbp;
4314 }
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325 void
4326 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4327 struct lpfc_sli_ring *pring, uint32_t mask)
4328 {
4329 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4330 }
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343 static void
4344 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4345 struct lpfc_sli_ring *pring, uint32_t mask)
4346 {
4347 struct lpfc_pgp *pgp;
4348 IOCB_t *entry;
4349 IOCB_t *irsp = NULL;
4350 struct lpfc_iocbq *rspiocbp = NULL;
4351 uint32_t portRspPut, portRspMax;
4352 unsigned long iflag;
4353 uint32_t status;
4354
4355 pgp = &phba->port_gp[pring->ringno];
4356 spin_lock_irqsave(&phba->hbalock, iflag);
4357 pring->stats.iocb_event++;
4358
4359
4360
4361
4362
4363 portRspMax = pring->sli.sli3.numRiocb;
4364 portRspPut = le32_to_cpu(pgp->rspPutInx);
4365 if (portRspPut >= portRspMax) {
4366
4367
4368
4369
4370 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4371 "0303 Ring %d handler: portRspPut %d "
4372 "is bigger than rsp ring %d\n",
4373 pring->ringno, portRspPut, portRspMax);
4374
4375 phba->link_state = LPFC_HBA_ERROR;
4376 spin_unlock_irqrestore(&phba->hbalock, iflag);
4377
4378 phba->work_hs = HS_FFER3;
4379 lpfc_handle_eratt(phba);
4380
4381 return;
4382 }
4383
4384 rmb();
4385 while (pring->sli.sli3.rspidx != portRspPut) {
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399 entry = lpfc_resp_iocb(phba, pring);
4400
4401 phba->last_completion_time = jiffies;
4402 rspiocbp = __lpfc_sli_get_iocbq(phba);
4403 if (rspiocbp == NULL) {
4404 printk(KERN_ERR "%s: out of buffers! Failing "
4405 "completion.\n", __func__);
4406 break;
4407 }
4408
4409 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4410 phba->iocb_rsp_size);
4411 irsp = &rspiocbp->iocb;
4412
4413 if (++pring->sli.sli3.rspidx >= portRspMax)
4414 pring->sli.sli3.rspidx = 0;
4415
4416 if (pring->ringno == LPFC_ELS_RING) {
4417 lpfc_debugfs_slow_ring_trc(phba,
4418 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
4419 *(((uint32_t *) irsp) + 4),
4420 *(((uint32_t *) irsp) + 6),
4421 *(((uint32_t *) irsp) + 7));
4422 }
4423
4424 writel(pring->sli.sli3.rspidx,
4425 &phba->host_gp[pring->ringno].rspGetInx);
4426
4427 spin_unlock_irqrestore(&phba->hbalock, iflag);
4428
4429 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4430 spin_lock_irqsave(&phba->hbalock, iflag);
4431
4432
4433
4434
4435
4436
4437 if (pring->sli.sli3.rspidx == portRspPut) {
4438 portRspPut = le32_to_cpu(pgp->rspPutInx);
4439 }
4440 }
4441
4442 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4443
4444 pring->stats.iocb_rsp_full++;
4445
4446 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4447 writel(status, phba->CAregaddr);
4448 readl(phba->CAregaddr);
4449 }
4450 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4451 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4452 pring->stats.iocb_cmd_empty++;
4453
4454
4455 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4456 lpfc_sli_resume_iocb(phba, pring);
4457
4458 if ((pring->lpfc_sli_cmd_available))
4459 (pring->lpfc_sli_cmd_available) (phba, pring);
4460
4461 }
4462
4463 spin_unlock_irqrestore(&phba->hbalock, iflag);
4464 return;
4465 }
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479 static void
4480 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4481 struct lpfc_sli_ring *pring, uint32_t mask)
4482 {
4483 struct lpfc_iocbq *irspiocbq;
4484 struct hbq_dmabuf *dmabuf;
4485 struct lpfc_cq_event *cq_event;
4486 unsigned long iflag;
4487 int count = 0;
4488
4489 spin_lock_irqsave(&phba->hbalock, iflag);
4490 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4491 spin_unlock_irqrestore(&phba->hbalock, iflag);
4492 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4493
4494 spin_lock_irqsave(&phba->hbalock, iflag);
4495 list_remove_head(&phba->sli4_hba.sp_queue_event,
4496 cq_event, struct lpfc_cq_event, list);
4497 spin_unlock_irqrestore(&phba->hbalock, iflag);
4498
4499 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4500 case CQE_CODE_COMPL_WQE:
4501 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4502 cq_event);
4503
4504 irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
4505 irspiocbq);
4506 if (irspiocbq)
4507 lpfc_sli_sp_handle_rspiocb(phba, pring,
4508 irspiocbq);
4509 count++;
4510 break;
4511 case CQE_CODE_RECEIVE:
4512 case CQE_CODE_RECEIVE_V1:
4513 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4514 cq_event);
4515 lpfc_sli4_handle_received_buffer(phba, dmabuf);
4516 count++;
4517 break;
4518 default:
4519 break;
4520 }
4521
4522
4523 if (count == 64)
4524 break;
4525 }
4526 }
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538 void
4539 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4540 {
4541 LIST_HEAD(tx_completions);
4542 LIST_HEAD(txcmplq_completions);
4543 struct lpfc_iocbq *iocb, *next_iocb;
4544 int offline;
4545
4546 if (pring->ringno == LPFC_ELS_RING) {
4547 lpfc_fabric_abort_hba(phba);
4548 }
4549 offline = pci_channel_offline(phba->pcidev);
4550
4551
4552
4553
4554 if (phba->sli_rev >= LPFC_SLI_REV4) {
4555 spin_lock_irq(&pring->ring_lock);
4556 list_splice_init(&pring->txq, &tx_completions);
4557 pring->txq_cnt = 0;
4558
4559 if (offline) {
4560 list_splice_init(&pring->txcmplq,
4561 &txcmplq_completions);
4562 } else {
4563
4564 list_for_each_entry_safe(iocb, next_iocb,
4565 &pring->txcmplq, list)
4566 lpfc_sli_issue_abort_iotag(phba, pring,
4567 iocb, NULL);
4568 }
4569 spin_unlock_irq(&pring->ring_lock);
4570 } else {
4571 spin_lock_irq(&phba->hbalock);
4572 list_splice_init(&pring->txq, &tx_completions);
4573 pring->txq_cnt = 0;
4574
4575 if (offline) {
4576 list_splice_init(&pring->txcmplq, &txcmplq_completions);
4577 } else {
4578
4579 list_for_each_entry_safe(iocb, next_iocb,
4580 &pring->txcmplq, list)
4581 lpfc_sli_issue_abort_iotag(phba, pring,
4582 iocb, NULL);
4583 }
4584 spin_unlock_irq(&phba->hbalock);
4585 }
4586
4587 if (offline) {
4588
4589 lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
4590 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
4591 } else {
4592
4593 lpfc_issue_hb_tmo(phba);
4594 }
4595
4596 lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
4597 IOERR_SLI_ABORTED);
4598 }
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609 void
4610 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4611 {
4612 struct lpfc_sli *psli = &phba->sli;
4613 struct lpfc_sli_ring *pring;
4614 uint32_t i;
4615
4616
4617 if (phba->sli_rev >= LPFC_SLI_REV4) {
4618 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4619 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4620 lpfc_sli_abort_iocb_ring(phba, pring);
4621 }
4622 } else {
4623 pring = &psli->sli3_ring[LPFC_FCP_RING];
4624 lpfc_sli_abort_iocb_ring(phba, pring);
4625 }
4626 }
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638 void
4639 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4640 {
4641 LIST_HEAD(txq);
4642 LIST_HEAD(txcmplq);
4643 struct lpfc_sli *psli = &phba->sli;
4644 struct lpfc_sli_ring *pring;
4645 uint32_t i;
4646 struct lpfc_iocbq *piocb, *next_iocb;
4647
4648 spin_lock_irq(&phba->hbalock);
4649
4650 phba->hba_flag |= HBA_IOQ_FLUSH;
4651 spin_unlock_irq(&phba->hbalock);
4652
4653
4654 if (phba->sli_rev >= LPFC_SLI_REV4) {
4655 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4656 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4657
4658 spin_lock_irq(&pring->ring_lock);
4659
4660 list_splice_init(&pring->txq, &txq);
4661 list_for_each_entry_safe(piocb, next_iocb,
4662 &pring->txcmplq, list)
4663 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4664
4665 list_splice_init(&pring->txcmplq, &txcmplq);
4666 pring->txq_cnt = 0;
4667 pring->txcmplq_cnt = 0;
4668 spin_unlock_irq(&pring->ring_lock);
4669
4670
4671 lpfc_sli_cancel_iocbs(phba, &txq,
4672 IOSTAT_LOCAL_REJECT,
4673 IOERR_SLI_DOWN);
4674
4675 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4676 IOSTAT_LOCAL_REJECT,
4677 IOERR_SLI_DOWN);
4678 if (unlikely(pci_channel_offline(phba->pcidev)))
4679 lpfc_sli4_io_xri_aborted(phba, NULL, 0);
4680 }
4681 } else {
4682 pring = &psli->sli3_ring[LPFC_FCP_RING];
4683
4684 spin_lock_irq(&phba->hbalock);
4685
4686 list_splice_init(&pring->txq, &txq);
4687 list_for_each_entry_safe(piocb, next_iocb,
4688 &pring->txcmplq, list)
4689 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4690
4691 list_splice_init(&pring->txcmplq, &txcmplq);
4692 pring->txq_cnt = 0;
4693 pring->txcmplq_cnt = 0;
4694 spin_unlock_irq(&phba->hbalock);
4695
4696
4697 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4698 IOERR_SLI_DOWN);
4699
4700 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4701 IOERR_SLI_DOWN);
4702 }
4703 }
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718 static int
4719 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4720 {
4721 uint32_t status;
4722 int i = 0;
4723 int retval = 0;
4724
4725
4726 if (lpfc_readl(phba->HSregaddr, &status))
4727 return 1;
4728
4729 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4730
4731
4732
4733
4734
4735
4736
4737 while (((status & mask) != mask) &&
4738 !(status & HS_FFERM) &&
4739 i++ < 20) {
4740
4741 if (i <= 5)
4742 msleep(10);
4743 else if (i <= 10)
4744 msleep(500);
4745 else
4746 msleep(2500);
4747
4748 if (i == 15) {
4749
4750 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4751 lpfc_sli_brdrestart(phba);
4752 }
4753
4754 if (lpfc_readl(phba->HSregaddr, &status)) {
4755 retval = 1;
4756 break;
4757 }
4758 }
4759
4760
4761 if ((status & HS_FFERM) || (i >= 20)) {
4762 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4763 "2751 Adapter failed to restart, "
4764 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4765 status,
4766 readl(phba->MBslimaddr + 0xa8),
4767 readl(phba->MBslimaddr + 0xac));
4768 phba->link_state = LPFC_HBA_ERROR;
4769 retval = 1;
4770 }
4771
4772 return retval;
4773 }
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786 static int
4787 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4788 {
4789 uint32_t status;
4790 int retval = 0;
4791
4792
4793 status = lpfc_sli4_post_status_check(phba);
4794
4795 if (status) {
4796 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4797 lpfc_sli_brdrestart(phba);
4798 status = lpfc_sli4_post_status_check(phba);
4799 }
4800
4801
4802 if (status) {
4803 phba->link_state = LPFC_HBA_ERROR;
4804 retval = 1;
4805 } else
4806 phba->sli4_hba.intr_enable = 0;
4807
4808 phba->hba_flag &= ~HBA_SETUP;
4809 return retval;
4810 }
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820 int
4821 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4822 {
4823 return phba->lpfc_sli_brdready(phba, mask);
4824 }
4825
4826 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4827
4828
4829
4830
4831
4832
4833
4834
4835 void lpfc_reset_barrier(struct lpfc_hba *phba)
4836 {
4837 uint32_t __iomem *resp_buf;
4838 uint32_t __iomem *mbox_buf;
4839 volatile struct MAILBOX_word0 mbox;
4840 uint32_t hc_copy, ha_copy, resp_data;
4841 int i;
4842 uint8_t hdrtype;
4843
4844 lockdep_assert_held(&phba->hbalock);
4845
4846 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4847 if (hdrtype != 0x80 ||
4848 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4849 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4850 return;
4851
4852
4853
4854
4855
4856 resp_buf = phba->MBslimaddr;
4857
4858
4859 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4860 return;
4861 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4862 readl(phba->HCregaddr);
4863 phba->link_flag |= LS_IGNORE_ERATT;
4864
4865 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4866 return;
4867 if (ha_copy & HA_ERATT) {
4868
4869 writel(HA_ERATT, phba->HAregaddr);
4870 phba->pport->stopped = 1;
4871 }
4872
4873 mbox.word0 = 0;
4874 mbox.mbxCommand = MBX_KILL_BOARD;
4875 mbox.mbxOwner = OWN_CHIP;
4876
4877 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4878 mbox_buf = phba->MBslimaddr;
4879 writel(mbox.word0, mbox_buf);
4880
4881 for (i = 0; i < 50; i++) {
4882 if (lpfc_readl((resp_buf + 1), &resp_data))
4883 return;
4884 if (resp_data != ~(BARRIER_TEST_PATTERN))
4885 mdelay(1);
4886 else
4887 break;
4888 }
4889 resp_data = 0;
4890 if (lpfc_readl((resp_buf + 1), &resp_data))
4891 return;
4892 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4893 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4894 phba->pport->stopped)
4895 goto restore_hc;
4896 else
4897 goto clear_errat;
4898 }
4899
4900 mbox.mbxOwner = OWN_HOST;
4901 resp_data = 0;
4902 for (i = 0; i < 500; i++) {
4903 if (lpfc_readl(resp_buf, &resp_data))
4904 return;
4905 if (resp_data != mbox.word0)
4906 mdelay(1);
4907 else
4908 break;
4909 }
4910
4911 clear_errat:
4912
4913 while (++i < 500) {
4914 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4915 return;
4916 if (!(ha_copy & HA_ERATT))
4917 mdelay(1);
4918 else
4919 break;
4920 }
4921
4922 if (readl(phba->HAregaddr) & HA_ERATT) {
4923 writel(HA_ERATT, phba->HAregaddr);
4924 phba->pport->stopped = 1;
4925 }
4926
4927 restore_hc:
4928 phba->link_flag &= ~LS_IGNORE_ERATT;
4929 writel(hc_copy, phba->HCregaddr);
4930 readl(phba->HCregaddr);
4931 }
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944 int
4945 lpfc_sli_brdkill(struct lpfc_hba *phba)
4946 {
4947 struct lpfc_sli *psli;
4948 LPFC_MBOXQ_t *pmb;
4949 uint32_t status;
4950 uint32_t ha_copy;
4951 int retval;
4952 int i = 0;
4953
4954 psli = &phba->sli;
4955
4956
4957 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4958 "0329 Kill HBA Data: x%x x%x\n",
4959 phba->pport->port_state, psli->sli_flag);
4960
4961 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4962 if (!pmb)
4963 return 1;
4964
4965
4966 spin_lock_irq(&phba->hbalock);
4967 if (lpfc_readl(phba->HCregaddr, &status)) {
4968 spin_unlock_irq(&phba->hbalock);
4969 mempool_free(pmb, phba->mbox_mem_pool);
4970 return 1;
4971 }
4972 status &= ~HC_ERINT_ENA;
4973 writel(status, phba->HCregaddr);
4974 readl(phba->HCregaddr);
4975 phba->link_flag |= LS_IGNORE_ERATT;
4976 spin_unlock_irq(&phba->hbalock);
4977
4978 lpfc_kill_board(phba, pmb);
4979 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4980 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4981
4982 if (retval != MBX_SUCCESS) {
4983 if (retval != MBX_BUSY)
4984 mempool_free(pmb, phba->mbox_mem_pool);
4985 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4986 "2752 KILL_BOARD command failed retval %d\n",
4987 retval);
4988 spin_lock_irq(&phba->hbalock);
4989 phba->link_flag &= ~LS_IGNORE_ERATT;
4990 spin_unlock_irq(&phba->hbalock);
4991 return 1;
4992 }
4993
4994 spin_lock_irq(&phba->hbalock);
4995 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4996 spin_unlock_irq(&phba->hbalock);
4997
4998 mempool_free(pmb, phba->mbox_mem_pool);
4999
5000
5001
5002
5003
5004
5005 if (lpfc_readl(phba->HAregaddr, &ha_copy))
5006 return 1;
5007 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
5008 mdelay(100);
5009 if (lpfc_readl(phba->HAregaddr, &ha_copy))
5010 return 1;
5011 }
5012
5013 del_timer_sync(&psli->mbox_tmo);
5014 if (ha_copy & HA_ERATT) {
5015 writel(HA_ERATT, phba->HAregaddr);
5016 phba->pport->stopped = 1;
5017 }
5018 spin_lock_irq(&phba->hbalock);
5019 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5020 psli->mbox_active = NULL;
5021 phba->link_flag &= ~LS_IGNORE_ERATT;
5022 spin_unlock_irq(&phba->hbalock);
5023
5024 lpfc_hba_down_post(phba);
5025 phba->link_state = LPFC_HBA_ERROR;
5026
5027 return ha_copy & HA_ERATT ? 0 : 1;
5028 }
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041 int
5042 lpfc_sli_brdreset(struct lpfc_hba *phba)
5043 {
5044 struct lpfc_sli *psli;
5045 struct lpfc_sli_ring *pring;
5046 uint16_t cfg_value;
5047 int i;
5048
5049 psli = &phba->sli;
5050
5051
5052 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5053 "0325 Reset HBA Data: x%x x%x\n",
5054 (phba->pport) ? phba->pport->port_state : 0,
5055 psli->sli_flag);
5056
5057
5058 phba->fc_eventTag = 0;
5059 phba->link_events = 0;
5060 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5061 if (phba->pport) {
5062 phba->pport->fc_myDID = 0;
5063 phba->pport->fc_prevDID = 0;
5064 }
5065
5066
5067 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
5068 return -EIO;
5069
5070 pci_write_config_word(phba->pcidev, PCI_COMMAND,
5071 (cfg_value &
5072 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5073
5074 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
5075
5076
5077 writel(HC_INITFF, phba->HCregaddr);
5078 mdelay(1);
5079 readl(phba->HCregaddr);
5080 writel(0, phba->HCregaddr);
5081 readl(phba->HCregaddr);
5082
5083
5084 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5085
5086
5087 for (i = 0; i < psli->num_rings; i++) {
5088 pring = &psli->sli3_ring[i];
5089 pring->flag = 0;
5090 pring->sli.sli3.rspidx = 0;
5091 pring->sli.sli3.next_cmdidx = 0;
5092 pring->sli.sli3.local_getidx = 0;
5093 pring->sli.sli3.cmdidx = 0;
5094 pring->missbufcnt = 0;
5095 }
5096
5097 phba->link_state = LPFC_WARM_START;
5098 return 0;
5099 }
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111 int
5112 lpfc_sli4_brdreset(struct lpfc_hba *phba)
5113 {
5114 struct lpfc_sli *psli = &phba->sli;
5115 uint16_t cfg_value;
5116 int rc = 0;
5117
5118
5119 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5120 "0295 Reset HBA Data: x%x x%x x%x\n",
5121 phba->pport->port_state, psli->sli_flag,
5122 phba->hba_flag);
5123
5124
5125 phba->fc_eventTag = 0;
5126 phba->link_events = 0;
5127 phba->pport->fc_myDID = 0;
5128 phba->pport->fc_prevDID = 0;
5129 phba->hba_flag &= ~HBA_SETUP;
5130
5131 spin_lock_irq(&phba->hbalock);
5132 psli->sli_flag &= ~(LPFC_PROCESS_LA);
5133 phba->fcf.fcf_flag = 0;
5134 spin_unlock_irq(&phba->hbalock);
5135
5136
5137 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5138 "0389 Performing PCI function reset!\n");
5139
5140
5141 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
5142 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5143 "3205 PCI read Config failed\n");
5144 return -EIO;
5145 }
5146
5147 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
5148 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5149
5150
5151 rc = lpfc_pci_function_reset(phba);
5152
5153
5154 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5155
5156 return rc;
5157 }
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172 static int
5173 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
5174 {
5175 volatile struct MAILBOX_word0 mb;
5176 struct lpfc_sli *psli;
5177 void __iomem *to_slim;
5178 uint32_t hba_aer_enabled;
5179
5180 spin_lock_irq(&phba->hbalock);
5181
5182
5183 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5184
5185 psli = &phba->sli;
5186
5187
5188 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5189 "0337 Restart HBA Data: x%x x%x\n",
5190 (phba->pport) ? phba->pport->port_state : 0,
5191 psli->sli_flag);
5192
5193 mb.word0 = 0;
5194 mb.mbxCommand = MBX_RESTART;
5195 mb.mbxHc = 1;
5196
5197 lpfc_reset_barrier(phba);
5198
5199 to_slim = phba->MBslimaddr;
5200 writel(mb.word0, to_slim);
5201 readl(to_slim);
5202
5203
5204 if (phba->pport && phba->pport->port_state)
5205 mb.word0 = 1;
5206 else
5207 mb.word0 = 0;
5208 to_slim = phba->MBslimaddr + sizeof (uint32_t);
5209 writel(mb.word0, to_slim);
5210 readl(to_slim);
5211
5212 lpfc_sli_brdreset(phba);
5213 if (phba->pport)
5214 phba->pport->stopped = 0;
5215 phba->link_state = LPFC_INIT_START;
5216 phba->hba_flag = 0;
5217 spin_unlock_irq(&phba->hbalock);
5218
5219 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5220 psli->stats_start = ktime_get_seconds();
5221
5222
5223 mdelay(100);
5224
5225
5226 if (hba_aer_enabled)
5227 pci_disable_pcie_error_reporting(phba->pcidev);
5228
5229 lpfc_hba_down_post(phba);
5230
5231 return 0;
5232 }
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243 static int
5244 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
5245 {
5246 struct lpfc_sli *psli = &phba->sli;
5247 uint32_t hba_aer_enabled;
5248 int rc;
5249
5250
5251 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5252 "0296 Restart HBA Data: x%x x%x\n",
5253 phba->pport->port_state, psli->sli_flag);
5254
5255
5256 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
5257
5258 rc = lpfc_sli4_brdreset(phba);
5259 if (rc) {
5260 phba->link_state = LPFC_HBA_ERROR;
5261 goto hba_down_queue;
5262 }
5263
5264 spin_lock_irq(&phba->hbalock);
5265 phba->pport->stopped = 0;
5266 phba->link_state = LPFC_INIT_START;
5267 phba->hba_flag = 0;
5268
5269 phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC;
5270 spin_unlock_irq(&phba->hbalock);
5271
5272 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5273 psli->stats_start = ktime_get_seconds();
5274
5275
5276 if (hba_aer_enabled)
5277 pci_disable_pcie_error_reporting(phba->pcidev);
5278
5279 hba_down_queue:
5280 lpfc_hba_down_post(phba);
5281 lpfc_sli4_queue_destroy(phba);
5282
5283 return rc;
5284 }
5285
5286
5287
5288
5289
5290
5291
5292
5293 int
5294 lpfc_sli_brdrestart(struct lpfc_hba *phba)
5295 {
5296 return phba->lpfc_sli_brdrestart(phba);
5297 }
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309 int
5310 lpfc_sli_chipset_init(struct lpfc_hba *phba)
5311 {
5312 uint32_t status, i = 0;
5313
5314
5315 if (lpfc_readl(phba->HSregaddr, &status))
5316 return -EIO;
5317
5318
5319 i = 0;
5320 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
5321
5322
5323
5324
5325
5326
5327
5328
5329
5330 if (i++ >= 200) {
5331
5332
5333 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5334 "0436 Adapter failed to init, "
5335 "timeout, status reg x%x, "
5336 "FW Data: A8 x%x AC x%x\n", status,
5337 readl(phba->MBslimaddr + 0xa8),
5338 readl(phba->MBslimaddr + 0xac));
5339 phba->link_state = LPFC_HBA_ERROR;
5340 return -ETIMEDOUT;
5341 }
5342
5343
5344 if (status & HS_FFERM) {
5345
5346
5347
5348 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5349 "0437 Adapter failed to init, "
5350 "chipset, status reg x%x, "
5351 "FW Data: A8 x%x AC x%x\n", status,
5352 readl(phba->MBslimaddr + 0xa8),
5353 readl(phba->MBslimaddr + 0xac));
5354 phba->link_state = LPFC_HBA_ERROR;
5355 return -EIO;
5356 }
5357
5358 if (i <= 10)
5359 msleep(10);
5360 else if (i <= 100)
5361 msleep(100);
5362 else
5363 msleep(1000);
5364
5365 if (i == 150) {
5366
5367 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5368 lpfc_sli_brdrestart(phba);
5369 }
5370
5371 if (lpfc_readl(phba->HSregaddr, &status))
5372 return -EIO;
5373 }
5374
5375
5376 if (status & HS_FFERM) {
5377
5378
5379 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5380 "0438 Adapter failed to init, chipset, "
5381 "status reg x%x, "
5382 "FW Data: A8 x%x AC x%x\n", status,
5383 readl(phba->MBslimaddr + 0xa8),
5384 readl(phba->MBslimaddr + 0xac));
5385 phba->link_state = LPFC_HBA_ERROR;
5386 return -EIO;
5387 }
5388
5389 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5390
5391
5392 writel(0, phba->HCregaddr);
5393 readl(phba->HCregaddr);
5394
5395
5396 writel(0xffffffff, phba->HAregaddr);
5397 readl(phba->HAregaddr);
5398 return 0;
5399 }
5400
5401
5402
5403
5404
5405
5406
5407 int
5408 lpfc_sli_hbq_count(void)
5409 {
5410 return ARRAY_SIZE(lpfc_hbq_defs);
5411 }
5412
5413
5414
5415
5416
5417
5418
5419
5420 static int
5421 lpfc_sli_hbq_entry_count(void)
5422 {
5423 int hbq_count = lpfc_sli_hbq_count();
5424 int count = 0;
5425 int i;
5426
5427 for (i = 0; i < hbq_count; ++i)
5428 count += lpfc_hbq_defs[i]->entry_count;
5429 return count;
5430 }
5431
5432
5433
5434
5435
5436
5437
5438 int
5439 lpfc_sli_hbq_size(void)
5440 {
5441 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5442 }
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453 static int
5454 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5455 {
5456 int hbq_count = lpfc_sli_hbq_count();
5457 LPFC_MBOXQ_t *pmb;
5458 MAILBOX_t *pmbox;
5459 uint32_t hbqno;
5460 uint32_t hbq_entry_index;
5461
5462
5463
5464
5465 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5466
5467 if (!pmb)
5468 return -ENOMEM;
5469
5470 pmbox = &pmb->u.mb;
5471
5472
5473 phba->link_state = LPFC_INIT_MBX_CMDS;
5474 phba->hbq_in_use = 1;
5475
5476 hbq_entry_index = 0;
5477 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5478 phba->hbqs[hbqno].next_hbqPutIdx = 0;
5479 phba->hbqs[hbqno].hbqPutIdx = 0;
5480 phba->hbqs[hbqno].local_hbqGetIdx = 0;
5481 phba->hbqs[hbqno].entry_count =
5482 lpfc_hbq_defs[hbqno]->entry_count;
5483 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5484 hbq_entry_index, pmb);
5485 hbq_entry_index += phba->hbqs[hbqno].entry_count;
5486
5487 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5488
5489
5490
5491 lpfc_printf_log(phba, KERN_ERR,
5492 LOG_SLI | LOG_VPORT,
5493 "1805 Adapter failed to init. "
5494 "Data: x%x x%x x%x\n",
5495 pmbox->mbxCommand,
5496 pmbox->mbxStatus, hbqno);
5497
5498 phba->link_state = LPFC_HBA_ERROR;
5499 mempool_free(pmb, phba->mbox_mem_pool);
5500 return -ENXIO;
5501 }
5502 }
5503 phba->hbq_count = hbq_count;
5504
5505 mempool_free(pmb, phba->mbox_mem_pool);
5506
5507
5508 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5509 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5510 return 0;
5511 }
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522 static int
5523 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5524 {
5525 phba->hbq_in_use = 1;
5526
5527
5528
5529
5530
5531 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5532 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5533 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5534 else
5535 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5536 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5537 phba->hbq_count = 1;
5538 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5539
5540 return 0;
5541 }
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556 int
5557 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5558 {
5559 LPFC_MBOXQ_t *pmb;
5560 uint32_t resetcount = 0, rc = 0, done = 0;
5561
5562 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5563 if (!pmb) {
5564 phba->link_state = LPFC_HBA_ERROR;
5565 return -ENOMEM;
5566 }
5567
5568 phba->sli_rev = sli_mode;
5569 while (resetcount < 2 && !done) {
5570 spin_lock_irq(&phba->hbalock);
5571 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5572 spin_unlock_irq(&phba->hbalock);
5573 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5574 lpfc_sli_brdrestart(phba);
5575 rc = lpfc_sli_chipset_init(phba);
5576 if (rc)
5577 break;
5578
5579 spin_lock_irq(&phba->hbalock);
5580 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5581 spin_unlock_irq(&phba->hbalock);
5582 resetcount++;
5583
5584
5585
5586
5587
5588
5589 rc = lpfc_config_port_prep(phba);
5590 if (rc == -ERESTART) {
5591 phba->link_state = LPFC_LINK_UNKNOWN;
5592 continue;
5593 } else if (rc)
5594 break;
5595
5596 phba->link_state = LPFC_INIT_MBX_CMDS;
5597 lpfc_config_port(phba, pmb);
5598 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5599 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5600 LPFC_SLI3_HBQ_ENABLED |
5601 LPFC_SLI3_CRP_ENABLED |
5602 LPFC_SLI3_DSS_ENABLED);
5603 if (rc != MBX_SUCCESS) {
5604 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5605 "0442 Adapter failed to init, mbxCmd x%x "
5606 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5607 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5608 spin_lock_irq(&phba->hbalock);
5609 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5610 spin_unlock_irq(&phba->hbalock);
5611 rc = -ENXIO;
5612 } else {
5613
5614 spin_lock_irq(&phba->hbalock);
5615 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5616 spin_unlock_irq(&phba->hbalock);
5617 done = 1;
5618
5619 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5620 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5621 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5622 "3110 Port did not grant ASABT\n");
5623 }
5624 }
5625 if (!done) {
5626 rc = -EINVAL;
5627 goto do_prep_failed;
5628 }
5629 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5630 if (!pmb->u.mb.un.varCfgPort.cMA) {
5631 rc = -ENXIO;
5632 goto do_prep_failed;
5633 }
5634 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5635 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5636 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5637 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5638 phba->max_vpi : phba->max_vports;
5639
5640 } else
5641 phba->max_vpi = 0;
5642 if (pmb->u.mb.un.varCfgPort.gerbm)
5643 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5644 if (pmb->u.mb.un.varCfgPort.gcrp)
5645 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5646
5647 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5648 phba->port_gp = phba->mbox->us.s3_pgp.port;
5649
5650 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5651 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5652 phba->cfg_enable_bg = 0;
5653 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5654 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5655 "0443 Adapter did not grant "
5656 "BlockGuard\n");
5657 }
5658 }
5659 } else {
5660 phba->hbq_get = NULL;
5661 phba->port_gp = phba->mbox->us.s2.port;
5662 phba->max_vpi = 0;
5663 }
5664 do_prep_failed:
5665 mempool_free(pmb, phba->mbox_mem_pool);
5666 return rc;
5667 }
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683 int
5684 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5685 {
5686 uint32_t rc;
5687 int i;
5688 int longs;
5689
5690
5691 if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5692 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5693 if (rc)
5694 return -EIO;
5695 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
5696 }
5697 phba->fcp_embed_io = 0;
5698
5699
5700 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5701 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5702 if (!rc) {
5703 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5704 "2709 This device supports "
5705 "Advanced Error Reporting (AER)\n");
5706 spin_lock_irq(&phba->hbalock);
5707 phba->hba_flag |= HBA_AER_ENABLED;
5708 spin_unlock_irq(&phba->hbalock);
5709 } else {
5710 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5711 "2708 This device does not support "
5712 "Advanced Error Reporting (AER): %d\n",
5713 rc);
5714 phba->cfg_aer_support = 0;
5715 }
5716 }
5717
5718 if (phba->sli_rev == 3) {
5719 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5720 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5721 } else {
5722 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5723 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5724 phba->sli3_options = 0;
5725 }
5726
5727 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5728 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5729 phba->sli_rev, phba->max_vpi);
5730 rc = lpfc_sli_ring_map(phba);
5731
5732 if (rc)
5733 goto lpfc_sli_hba_setup_error;
5734
5735
5736 if (phba->sli_rev == LPFC_SLI_REV3) {
5737
5738
5739
5740
5741
5742 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5743 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5744 phba->vpi_bmask = kcalloc(longs,
5745 sizeof(unsigned long),
5746 GFP_KERNEL);
5747 if (!phba->vpi_bmask) {
5748 rc = -ENOMEM;
5749 goto lpfc_sli_hba_setup_error;
5750 }
5751
5752 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5753 sizeof(uint16_t),
5754 GFP_KERNEL);
5755 if (!phba->vpi_ids) {
5756 kfree(phba->vpi_bmask);
5757 rc = -ENOMEM;
5758 goto lpfc_sli_hba_setup_error;
5759 }
5760 for (i = 0; i < phba->max_vpi; i++)
5761 phba->vpi_ids[i] = i;
5762 }
5763 }
5764
5765
5766 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5767 rc = lpfc_sli_hbq_setup(phba);
5768 if (rc)
5769 goto lpfc_sli_hba_setup_error;
5770 }
5771 spin_lock_irq(&phba->hbalock);
5772 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5773 spin_unlock_irq(&phba->hbalock);
5774
5775 rc = lpfc_config_port_post(phba);
5776 if (rc)
5777 goto lpfc_sli_hba_setup_error;
5778
5779 return rc;
5780
5781 lpfc_sli_hba_setup_error:
5782 phba->link_state = LPFC_HBA_ERROR;
5783 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5784 "0445 Firmware initialization failed\n");
5785 return rc;
5786 }
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796 static int
5797 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5798 {
5799 LPFC_MBOXQ_t *mboxq;
5800 struct lpfc_dmabuf *mp;
5801 struct lpfc_mqe *mqe;
5802 uint32_t data_length;
5803 int rc;
5804
5805
5806 phba->valid_vlan = 0;
5807 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5808 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5809 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5810
5811 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5812 if (!mboxq)
5813 return -ENOMEM;
5814
5815 mqe = &mboxq->u.mqe;
5816 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5817 rc = -ENOMEM;
5818 goto out_free_mboxq;
5819 }
5820
5821 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5822 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5823
5824 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5825 "(%d):2571 Mailbox cmd x%x Status x%x "
5826 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5827 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5828 "CQ: x%x x%x x%x x%x\n",
5829 mboxq->vport ? mboxq->vport->vpi : 0,
5830 bf_get(lpfc_mqe_command, mqe),
5831 bf_get(lpfc_mqe_status, mqe),
5832 mqe->un.mb_words[0], mqe->un.mb_words[1],
5833 mqe->un.mb_words[2], mqe->un.mb_words[3],
5834 mqe->un.mb_words[4], mqe->un.mb_words[5],
5835 mqe->un.mb_words[6], mqe->un.mb_words[7],
5836 mqe->un.mb_words[8], mqe->un.mb_words[9],
5837 mqe->un.mb_words[10], mqe->un.mb_words[11],
5838 mqe->un.mb_words[12], mqe->un.mb_words[13],
5839 mqe->un.mb_words[14], mqe->un.mb_words[15],
5840 mqe->un.mb_words[16], mqe->un.mb_words[50],
5841 mboxq->mcqe.word0,
5842 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5843 mboxq->mcqe.trailer);
5844
5845 if (rc) {
5846 rc = -EIO;
5847 goto out_free_mboxq;
5848 }
5849 data_length = mqe->un.mb_words[5];
5850 if (data_length > DMP_RGN23_SIZE) {
5851 rc = -EIO;
5852 goto out_free_mboxq;
5853 }
5854
5855 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5856 rc = 0;
5857
5858 out_free_mboxq:
5859 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
5860 return rc;
5861 }
5862
5863
5864
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874
5875
5876
5877
5878 static int
5879 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5880 uint8_t *vpd, uint32_t *vpd_size)
5881 {
5882 int rc = 0;
5883 uint32_t dma_size;
5884 struct lpfc_dmabuf *dmabuf;
5885 struct lpfc_mqe *mqe;
5886
5887 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5888 if (!dmabuf)
5889 return -ENOMEM;
5890
5891
5892
5893
5894
5895 dma_size = *vpd_size;
5896 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5897 &dmabuf->phys, GFP_KERNEL);
5898 if (!dmabuf->virt) {
5899 kfree(dmabuf);
5900 return -ENOMEM;
5901 }
5902
5903
5904
5905
5906
5907
5908 lpfc_read_rev(phba, mboxq);
5909 mqe = &mboxq->u.mqe;
5910 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5911 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5912 mqe->un.read_rev.word1 &= 0x0000FFFF;
5913 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5914 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5915
5916 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5917 if (rc) {
5918 dma_free_coherent(&phba->pcidev->dev, dma_size,
5919 dmabuf->virt, dmabuf->phys);
5920 kfree(dmabuf);
5921 return -EIO;
5922 }
5923
5924
5925
5926
5927
5928
5929 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5930 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5931
5932 memcpy(vpd, dmabuf->virt, *vpd_size);
5933
5934 dma_free_coherent(&phba->pcidev->dev, dma_size,
5935 dmabuf->virt, dmabuf->phys);
5936 kfree(dmabuf);
5937 return 0;
5938 }
5939
5940
5941
5942
5943
5944
5945
5946
5947
5948
5949
5950
5951 static int
5952 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5953 {
5954 LPFC_MBOXQ_t *mboxq;
5955 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5956 struct lpfc_controller_attribute *cntl_attr;
5957 void *virtaddr = NULL;
5958 uint32_t alloclen, reqlen;
5959 uint32_t shdr_status, shdr_add_status;
5960 union lpfc_sli4_cfg_shdr *shdr;
5961 int rc;
5962
5963 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5964 if (!mboxq)
5965 return -ENOMEM;
5966
5967
5968 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5969 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5970 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5971 LPFC_SLI4_MBX_NEMBED);
5972
5973 if (alloclen < reqlen) {
5974 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5975 "3084 Allocated DMA memory size (%d) is "
5976 "less than the requested DMA memory size "
5977 "(%d)\n", alloclen, reqlen);
5978 rc = -ENOMEM;
5979 goto out_free_mboxq;
5980 }
5981 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5982 virtaddr = mboxq->sge_array->addr[0];
5983 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5984 shdr = &mbx_cntl_attr->cfg_shdr;
5985 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5986 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5987 if (shdr_status || shdr_add_status || rc) {
5988 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5989 "3085 Mailbox x%x (x%x/x%x) failed, "
5990 "rc:x%x, status:x%x, add_status:x%x\n",
5991 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5992 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5993 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5994 rc, shdr_status, shdr_add_status);
5995 rc = -ENXIO;
5996 goto out_free_mboxq;
5997 }
5998
5999 cntl_attr = &mbx_cntl_attr->cntl_attr;
6000 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6001 phba->sli4_hba.lnk_info.lnk_tp =
6002 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
6003 phba->sli4_hba.lnk_info.lnk_no =
6004 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
6005 phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
6006 phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
6007
6008 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
6009 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
6010 sizeof(phba->BIOSVersion));
6011
6012 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6013 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
6014 "flash_id: x%02x, asic_rev: x%02x\n",
6015 phba->sli4_hba.lnk_info.lnk_tp,
6016 phba->sli4_hba.lnk_info.lnk_no,
6017 phba->BIOSVersion, phba->sli4_hba.flash_id,
6018 phba->sli4_hba.asic_rev);
6019 out_free_mboxq:
6020 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6021 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6022 else
6023 mempool_free(mboxq, phba->mbox_mem_pool);
6024 return rc;
6025 }
6026
6027
6028
6029
6030
6031
6032
6033
6034
6035
6036
6037
6038 static int
6039 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
6040 {
6041 LPFC_MBOXQ_t *mboxq;
6042 struct lpfc_mbx_get_port_name *get_port_name;
6043 uint32_t shdr_status, shdr_add_status;
6044 union lpfc_sli4_cfg_shdr *shdr;
6045 char cport_name = 0;
6046 int rc;
6047
6048
6049 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6050 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
6051
6052 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6053 if (!mboxq)
6054 return -ENOMEM;
6055
6056 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6057 lpfc_sli4_read_config(phba);
6058
6059 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)
6060 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
6061
6062 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
6063 goto retrieve_ppname;
6064
6065
6066 rc = lpfc_sli4_get_ctl_attr(phba);
6067 if (rc)
6068 goto out_free_mboxq;
6069
6070 retrieve_ppname:
6071 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6072 LPFC_MBOX_OPCODE_GET_PORT_NAME,
6073 sizeof(struct lpfc_mbx_get_port_name) -
6074 sizeof(struct lpfc_sli4_cfg_mhdr),
6075 LPFC_SLI4_MBX_EMBED);
6076 get_port_name = &mboxq->u.mqe.un.get_port_name;
6077 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
6078 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
6079 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
6080 phba->sli4_hba.lnk_info.lnk_tp);
6081 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6082 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6083 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6084 if (shdr_status || shdr_add_status || rc) {
6085 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6086 "3087 Mailbox x%x (x%x/x%x) failed: "
6087 "rc:x%x, status:x%x, add_status:x%x\n",
6088 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6089 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6090 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6091 rc, shdr_status, shdr_add_status);
6092 rc = -ENXIO;
6093 goto out_free_mboxq;
6094 }
6095 switch (phba->sli4_hba.lnk_info.lnk_no) {
6096 case LPFC_LINK_NUMBER_0:
6097 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
6098 &get_port_name->u.response);
6099 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6100 break;
6101 case LPFC_LINK_NUMBER_1:
6102 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
6103 &get_port_name->u.response);
6104 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6105 break;
6106 case LPFC_LINK_NUMBER_2:
6107 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
6108 &get_port_name->u.response);
6109 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6110 break;
6111 case LPFC_LINK_NUMBER_3:
6112 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
6113 &get_port_name->u.response);
6114 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6115 break;
6116 default:
6117 break;
6118 }
6119
6120 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
6121 phba->Port[0] = cport_name;
6122 phba->Port[1] = '\0';
6123 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6124 "3091 SLI get port name: %s\n", phba->Port);
6125 }
6126
6127 out_free_mboxq:
6128 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6129 lpfc_sli4_mbox_cmd_free(phba, mboxq);
6130 else
6131 mempool_free(mboxq, phba->mbox_mem_pool);
6132 return rc;
6133 }
6134
6135
6136
6137
6138
6139
6140
6141
6142 static void
6143 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
6144 {
6145 int qidx;
6146 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
6147 struct lpfc_sli4_hdw_queue *qp;
6148 struct lpfc_queue *eq;
6149
6150 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
6151 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
6152 if (sli4_hba->nvmels_cq)
6153 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
6154 LPFC_QUEUE_REARM);
6155
6156 if (sli4_hba->hdwq) {
6157
6158 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
6159 qp = &sli4_hba->hdwq[qidx];
6160
6161 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
6162 LPFC_QUEUE_REARM);
6163 }
6164
6165
6166 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
6167 eq = sli4_hba->hba_eq_hdl[qidx].eq;
6168
6169 sli4_hba->sli4_write_eq_db(phba, eq,
6170 0, LPFC_QUEUE_REARM);
6171 }
6172 }
6173
6174 if (phba->nvmet_support) {
6175 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
6176 sli4_hba->sli4_write_cq_db(phba,
6177 sli4_hba->nvmet_cqset[qidx], 0,
6178 LPFC_QUEUE_REARM);
6179 }
6180 }
6181 }
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192
6193
6194
6195 int
6196 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
6197 uint16_t *extnt_count, uint16_t *extnt_size)
6198 {
6199 int rc = 0;
6200 uint32_t length;
6201 uint32_t mbox_tmo;
6202 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
6203 LPFC_MBOXQ_t *mbox;
6204
6205 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6206 if (!mbox)
6207 return -ENOMEM;
6208
6209
6210 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
6211 sizeof(struct lpfc_sli4_cfg_mhdr));
6212 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6213 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
6214 length, LPFC_SLI4_MBX_EMBED);
6215
6216
6217 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6218 LPFC_SLI4_MBX_EMBED);
6219 if (unlikely(rc)) {
6220 rc = -EIO;
6221 goto err_exit;
6222 }
6223
6224 if (!phba->sli4_hba.intr_enable)
6225 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6226 else {
6227 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6228 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6229 }
6230 if (unlikely(rc)) {
6231 rc = -EIO;
6232 goto err_exit;
6233 }
6234
6235 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
6236 if (bf_get(lpfc_mbox_hdr_status,
6237 &rsrc_info->header.cfg_shdr.response)) {
6238 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6239 "2930 Failed to get resource extents "
6240 "Status 0x%x Add'l Status 0x%x\n",
6241 bf_get(lpfc_mbox_hdr_status,
6242 &rsrc_info->header.cfg_shdr.response),
6243 bf_get(lpfc_mbox_hdr_add_status,
6244 &rsrc_info->header.cfg_shdr.response));
6245 rc = -EIO;
6246 goto err_exit;
6247 }
6248
6249 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
6250 &rsrc_info->u.rsp);
6251 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
6252 &rsrc_info->u.rsp);
6253
6254 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6255 "3162 Retrieved extents type-%d from port: count:%d, "
6256 "size:%d\n", type, *extnt_count, *extnt_size);
6257
6258 err_exit:
6259 mempool_free(mbox, phba->mbox_mem_pool);
6260 return rc;
6261 }
6262
6263
6264
6265
6266
6267
6268
6269
6270
6271
6272
6273
6274
6275
6276
6277
6278 static int
6279 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
6280 {
6281 uint16_t curr_ext_cnt, rsrc_ext_cnt;
6282 uint16_t size_diff, rsrc_ext_size;
6283 int rc = 0;
6284 struct lpfc_rsrc_blks *rsrc_entry;
6285 struct list_head *rsrc_blk_list = NULL;
6286
6287 size_diff = 0;
6288 curr_ext_cnt = 0;
6289 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6290 &rsrc_ext_cnt,
6291 &rsrc_ext_size);
6292 if (unlikely(rc))
6293 return -EIO;
6294
6295 switch (type) {
6296 case LPFC_RSC_TYPE_FCOE_RPI:
6297 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6298 break;
6299 case LPFC_RSC_TYPE_FCOE_VPI:
6300 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
6301 break;
6302 case LPFC_RSC_TYPE_FCOE_XRI:
6303 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6304 break;
6305 case LPFC_RSC_TYPE_FCOE_VFI:
6306 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6307 break;
6308 default:
6309 break;
6310 }
6311
6312 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
6313 curr_ext_cnt++;
6314 if (rsrc_entry->rsrc_size != rsrc_ext_size)
6315 size_diff++;
6316 }
6317
6318 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
6319 rc = 1;
6320
6321 return rc;
6322 }
6323
6324
6325
6326
6327
6328
6329
6330
6331
6332
6333
6334
6335
6336
6337
6338
6339
6340
6341 static int
6342 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6343 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6344 {
6345 int rc = 0;
6346 uint32_t req_len;
6347 uint32_t emb_len;
6348 uint32_t alloc_len, mbox_tmo;
6349
6350
6351 req_len = extnt_cnt * sizeof(uint16_t);
6352
6353
6354
6355
6356
6357 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6358 sizeof(uint32_t);
6359
6360
6361
6362
6363
6364 *emb = LPFC_SLI4_MBX_EMBED;
6365 if (req_len > emb_len) {
6366 req_len = extnt_cnt * sizeof(uint16_t) +
6367 sizeof(union lpfc_sli4_cfg_shdr) +
6368 sizeof(uint32_t);
6369 *emb = LPFC_SLI4_MBX_NEMBED;
6370 }
6371
6372 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6373 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6374 req_len, *emb);
6375 if (alloc_len < req_len) {
6376 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6377 "2982 Allocated DMA memory size (x%x) is "
6378 "less than the requested DMA memory "
6379 "size (x%x)\n", alloc_len, req_len);
6380 return -ENOMEM;
6381 }
6382 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6383 if (unlikely(rc))
6384 return -EIO;
6385
6386 if (!phba->sli4_hba.intr_enable)
6387 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6388 else {
6389 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6390 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6391 }
6392
6393 if (unlikely(rc))
6394 rc = -EIO;
6395 return rc;
6396 }
6397
6398
6399
6400
6401
6402
6403
6404
6405
6406 static int
6407 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6408 {
6409 bool emb = false;
6410 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6411 uint16_t rsrc_id, rsrc_start, j, k;
6412 uint16_t *ids;
6413 int i, rc;
6414 unsigned long longs;
6415 unsigned long *bmask;
6416 struct lpfc_rsrc_blks *rsrc_blks;
6417 LPFC_MBOXQ_t *mbox;
6418 uint32_t length;
6419 struct lpfc_id_range *id_array = NULL;
6420 void *virtaddr = NULL;
6421 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6422 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6423 struct list_head *ext_blk_list;
6424
6425 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6426 &rsrc_cnt,
6427 &rsrc_size);
6428 if (unlikely(rc))
6429 return -EIO;
6430
6431 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6432 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6433 "3009 No available Resource Extents "
6434 "for resource type 0x%x: Count: 0x%x, "
6435 "Size 0x%x\n", type, rsrc_cnt,
6436 rsrc_size);
6437 return -ENOMEM;
6438 }
6439
6440 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6441 "2903 Post resource extents type-0x%x: "
6442 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6443
6444 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6445 if (!mbox)
6446 return -ENOMEM;
6447
6448 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6449 if (unlikely(rc)) {
6450 rc = -EIO;
6451 goto err_exit;
6452 }
6453
6454
6455
6456
6457
6458
6459
6460 if (emb == LPFC_SLI4_MBX_EMBED) {
6461 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6462 id_array = &rsrc_ext->u.rsp.id[0];
6463 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6464 } else {
6465 virtaddr = mbox->sge_array->addr[0];
6466 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6467 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6468 id_array = &n_rsrc->id;
6469 }
6470
6471 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6472 rsrc_id_cnt = rsrc_cnt * rsrc_size;
6473
6474
6475
6476
6477
6478 length = sizeof(struct lpfc_rsrc_blks);
6479 switch (type) {
6480 case LPFC_RSC_TYPE_FCOE_RPI:
6481 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6482 sizeof(unsigned long),
6483 GFP_KERNEL);
6484 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6485 rc = -ENOMEM;
6486 goto err_exit;
6487 }
6488 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6489 sizeof(uint16_t),
6490 GFP_KERNEL);
6491 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6492 kfree(phba->sli4_hba.rpi_bmask);
6493 rc = -ENOMEM;
6494 goto err_exit;
6495 }
6496
6497
6498
6499
6500
6501
6502 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6503
6504
6505 bmask = phba->sli4_hba.rpi_bmask;
6506 ids = phba->sli4_hba.rpi_ids;
6507 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6508 break;
6509 case LPFC_RSC_TYPE_FCOE_VPI:
6510 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6511 GFP_KERNEL);
6512 if (unlikely(!phba->vpi_bmask)) {
6513 rc = -ENOMEM;
6514 goto err_exit;
6515 }
6516 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6517 GFP_KERNEL);
6518 if (unlikely(!phba->vpi_ids)) {
6519 kfree(phba->vpi_bmask);
6520 rc = -ENOMEM;
6521 goto err_exit;
6522 }
6523
6524
6525 bmask = phba->vpi_bmask;
6526 ids = phba->vpi_ids;
6527 ext_blk_list = &phba->lpfc_vpi_blk_list;
6528 break;
6529 case LPFC_RSC_TYPE_FCOE_XRI:
6530 phba->sli4_hba.xri_bmask = kcalloc(longs,
6531 sizeof(unsigned long),
6532 GFP_KERNEL);
6533 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6534 rc = -ENOMEM;
6535 goto err_exit;
6536 }
6537 phba->sli4_hba.max_cfg_param.xri_used = 0;
6538 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6539 sizeof(uint16_t),
6540 GFP_KERNEL);
6541 if (unlikely(!phba->sli4_hba.xri_ids)) {
6542 kfree(phba->sli4_hba.xri_bmask);
6543 rc = -ENOMEM;
6544 goto err_exit;
6545 }
6546
6547
6548 bmask = phba->sli4_hba.xri_bmask;
6549 ids = phba->sli4_hba.xri_ids;
6550 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6551 break;
6552 case LPFC_RSC_TYPE_FCOE_VFI:
6553 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6554 sizeof(unsigned long),
6555 GFP_KERNEL);
6556 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6557 rc = -ENOMEM;
6558 goto err_exit;
6559 }
6560 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6561 sizeof(uint16_t),
6562 GFP_KERNEL);
6563 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6564 kfree(phba->sli4_hba.vfi_bmask);
6565 rc = -ENOMEM;
6566 goto err_exit;
6567 }
6568
6569
6570 bmask = phba->sli4_hba.vfi_bmask;
6571 ids = phba->sli4_hba.vfi_ids;
6572 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6573 break;
6574 default:
6575
6576 id_array = NULL;
6577 bmask = NULL;
6578 ids = NULL;
6579 ext_blk_list = NULL;
6580 goto err_exit;
6581 }
6582
6583
6584
6585
6586
6587
6588
6589 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6590 if ((i % 2) == 0)
6591 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6592 &id_array[k]);
6593 else
6594 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6595 &id_array[k]);
6596
6597 rsrc_blks = kzalloc(length, GFP_KERNEL);
6598 if (unlikely(!rsrc_blks)) {
6599 rc = -ENOMEM;
6600 kfree(bmask);
6601 kfree(ids);
6602 goto err_exit;
6603 }
6604 rsrc_blks->rsrc_start = rsrc_id;
6605 rsrc_blks->rsrc_size = rsrc_size;
6606 list_add_tail(&rsrc_blks->list, ext_blk_list);
6607 rsrc_start = rsrc_id;
6608 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6609 phba->sli4_hba.io_xri_start = rsrc_start +
6610 lpfc_sli4_get_iocb_cnt(phba);
6611 }
6612
6613 while (rsrc_id < (rsrc_start + rsrc_size)) {
6614 ids[j] = rsrc_id;
6615 rsrc_id++;
6616 j++;
6617 }
6618
6619 if ((i % 2) == 1)
6620 k++;
6621 }
6622 err_exit:
6623 lpfc_sli4_mbox_cmd_free(phba, mbox);
6624 return rc;
6625 }
6626
6627
6628
6629
6630
6631
6632
6633
6634
6635
6636
6637
6638 static int
6639 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6640 {
6641 int rc;
6642 uint32_t length, mbox_tmo = 0;
6643 LPFC_MBOXQ_t *mbox;
6644 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6645 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6646
6647 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6648 if (!mbox)
6649 return -ENOMEM;
6650
6651
6652
6653
6654
6655
6656 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6657 sizeof(struct lpfc_sli4_cfg_mhdr));
6658 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6659 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6660 length, LPFC_SLI4_MBX_EMBED);
6661
6662
6663 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6664 LPFC_SLI4_MBX_EMBED);
6665 if (unlikely(rc)) {
6666 rc = -EIO;
6667 goto out_free_mbox;
6668 }
6669 if (!phba->sli4_hba.intr_enable)
6670 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6671 else {
6672 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6673 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6674 }
6675 if (unlikely(rc)) {
6676 rc = -EIO;
6677 goto out_free_mbox;
6678 }
6679
6680 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6681 if (bf_get(lpfc_mbox_hdr_status,
6682 &dealloc_rsrc->header.cfg_shdr.response)) {
6683 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6684 "2919 Failed to release resource extents "
6685 "for type %d - Status 0x%x Add'l Status 0x%x. "
6686 "Resource memory not released.\n",
6687 type,
6688 bf_get(lpfc_mbox_hdr_status,
6689 &dealloc_rsrc->header.cfg_shdr.response),
6690 bf_get(lpfc_mbox_hdr_add_status,
6691 &dealloc_rsrc->header.cfg_shdr.response));
6692 rc = -EIO;
6693 goto out_free_mbox;
6694 }
6695
6696
6697 switch (type) {
6698 case LPFC_RSC_TYPE_FCOE_VPI:
6699 kfree(phba->vpi_bmask);
6700 kfree(phba->vpi_ids);
6701 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6702 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6703 &phba->lpfc_vpi_blk_list, list) {
6704 list_del_init(&rsrc_blk->list);
6705 kfree(rsrc_blk);
6706 }
6707 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6708 break;
6709 case LPFC_RSC_TYPE_FCOE_XRI:
6710 kfree(phba->sli4_hba.xri_bmask);
6711 kfree(phba->sli4_hba.xri_ids);
6712 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6713 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6714 list_del_init(&rsrc_blk->list);
6715 kfree(rsrc_blk);
6716 }
6717 break;
6718 case LPFC_RSC_TYPE_FCOE_VFI:
6719 kfree(phba->sli4_hba.vfi_bmask);
6720 kfree(phba->sli4_hba.vfi_ids);
6721 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6722 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6723 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6724 list_del_init(&rsrc_blk->list);
6725 kfree(rsrc_blk);
6726 }
6727 break;
6728 case LPFC_RSC_TYPE_FCOE_RPI:
6729
6730 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6731 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6732 list_del_init(&rsrc_blk->list);
6733 kfree(rsrc_blk);
6734 }
6735 break;
6736 default:
6737 break;
6738 }
6739
6740 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6741
6742 out_free_mbox:
6743 mempool_free(mbox, phba->mbox_mem_pool);
6744 return rc;
6745 }
6746
6747 static void
6748 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6749 uint32_t feature)
6750 {
6751 uint32_t len;
6752 u32 sig_freq = 0;
6753
6754 len = sizeof(struct lpfc_mbx_set_feature) -
6755 sizeof(struct lpfc_sli4_cfg_mhdr);
6756 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6757 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6758 LPFC_SLI4_MBX_EMBED);
6759
6760 switch (feature) {
6761 case LPFC_SET_UE_RECOVERY:
6762 bf_set(lpfc_mbx_set_feature_UER,
6763 &mbox->u.mqe.un.set_feature, 1);
6764 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6765 mbox->u.mqe.un.set_feature.param_len = 8;
6766 break;
6767 case LPFC_SET_MDS_DIAGS:
6768 bf_set(lpfc_mbx_set_feature_mds,
6769 &mbox->u.mqe.un.set_feature, 1);
6770 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6771 &mbox->u.mqe.un.set_feature, 1);
6772 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6773 mbox->u.mqe.un.set_feature.param_len = 8;
6774 break;
6775 case LPFC_SET_CGN_SIGNAL:
6776 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6777 sig_freq = 0;
6778 else
6779 sig_freq = phba->cgn_sig_freq;
6780
6781 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6782 bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
6783 &mbox->u.mqe.un.set_feature, sig_freq);
6784 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6785 &mbox->u.mqe.un.set_feature, sig_freq);
6786 }
6787
6788 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
6789 bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6790 &mbox->u.mqe.un.set_feature, sig_freq);
6791
6792 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6793 phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
6794 sig_freq = 0;
6795 else
6796 sig_freq = lpfc_acqe_cgn_frequency;
6797
6798 bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
6799 &mbox->u.mqe.un.set_feature, sig_freq);
6800
6801 mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
6802 mbox->u.mqe.un.set_feature.param_len = 12;
6803 break;
6804 case LPFC_SET_DUAL_DUMP:
6805 bf_set(lpfc_mbx_set_feature_dd,
6806 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6807 bf_set(lpfc_mbx_set_feature_ddquery,
6808 &mbox->u.mqe.un.set_feature, 0);
6809 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6810 mbox->u.mqe.un.set_feature.param_len = 4;
6811 break;
6812 case LPFC_SET_ENABLE_MI:
6813 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
6814 mbox->u.mqe.un.set_feature.param_len = 4;
6815 bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
6816 phba->pport->cfg_lun_queue_depth);
6817 bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
6818 phba->sli4_hba.pc_sli4_params.mi_ver);
6819 break;
6820 case LPFC_SET_ENABLE_CMF:
6821 bf_set(lpfc_mbx_set_feature_dd, &mbox->u.mqe.un.set_feature, 1);
6822 mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
6823 mbox->u.mqe.un.set_feature.param_len = 4;
6824 bf_set(lpfc_mbx_set_feature_cmf,
6825 &mbox->u.mqe.un.set_feature, 1);
6826 break;
6827 }
6828 return;
6829 }
6830
6831
6832
6833
6834
6835
6836
6837
6838 void
6839 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6840 {
6841 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6842
6843 spin_lock_irq(&phba->hbalock);
6844 ras_fwlog->state = INACTIVE;
6845 spin_unlock_irq(&phba->hbalock);
6846
6847
6848 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6849 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6850
6851
6852 usleep_range(10 * 1000, 20 * 1000);
6853 }
6854
6855
6856
6857
6858
6859
6860
6861
6862 void
6863 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6864 {
6865 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6866 struct lpfc_dmabuf *dmabuf, *next;
6867
6868 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6869 list_for_each_entry_safe(dmabuf, next,
6870 &ras_fwlog->fwlog_buff_list,
6871 list) {
6872 list_del(&dmabuf->list);
6873 dma_free_coherent(&phba->pcidev->dev,
6874 LPFC_RAS_MAX_ENTRY_SIZE,
6875 dmabuf->virt, dmabuf->phys);
6876 kfree(dmabuf);
6877 }
6878 }
6879
6880 if (ras_fwlog->lwpd.virt) {
6881 dma_free_coherent(&phba->pcidev->dev,
6882 sizeof(uint32_t) * 2,
6883 ras_fwlog->lwpd.virt,
6884 ras_fwlog->lwpd.phys);
6885 ras_fwlog->lwpd.virt = NULL;
6886 }
6887
6888 spin_lock_irq(&phba->hbalock);
6889 ras_fwlog->state = INACTIVE;
6890 spin_unlock_irq(&phba->hbalock);
6891 }
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903
6904 static int
6905 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6906 uint32_t fwlog_buff_count)
6907 {
6908 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6909 struct lpfc_dmabuf *dmabuf;
6910 int rc = 0, i = 0;
6911
6912
6913 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6914
6915
6916 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6917 sizeof(uint32_t) * 2,
6918 &ras_fwlog->lwpd.phys,
6919 GFP_KERNEL);
6920 if (!ras_fwlog->lwpd.virt) {
6921 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6922 "6185 LWPD Memory Alloc Failed\n");
6923
6924 return -ENOMEM;
6925 }
6926
6927 ras_fwlog->fw_buffcount = fwlog_buff_count;
6928 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6929 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6930 GFP_KERNEL);
6931 if (!dmabuf) {
6932 rc = -ENOMEM;
6933 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6934 "6186 Memory Alloc failed FW logging");
6935 goto free_mem;
6936 }
6937
6938 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6939 LPFC_RAS_MAX_ENTRY_SIZE,
6940 &dmabuf->phys, GFP_KERNEL);
6941 if (!dmabuf->virt) {
6942 kfree(dmabuf);
6943 rc = -ENOMEM;
6944 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6945 "6187 DMA Alloc Failed FW logging");
6946 goto free_mem;
6947 }
6948 dmabuf->buffer_tag = i;
6949 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6950 }
6951
6952 free_mem:
6953 if (rc)
6954 lpfc_sli4_ras_dma_free(phba);
6955
6956 return rc;
6957 }
6958
6959
6960
6961
6962
6963
6964
6965
6966 static void
6967 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6968 {
6969 MAILBOX_t *mb;
6970 union lpfc_sli4_cfg_shdr *shdr;
6971 uint32_t shdr_status, shdr_add_status;
6972 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6973
6974 mb = &pmb->u.mb;
6975
6976 shdr = (union lpfc_sli4_cfg_shdr *)
6977 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6978 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6979 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6980
6981 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6982 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6983 "6188 FW LOG mailbox "
6984 "completed with status x%x add_status x%x,"
6985 " mbx status x%x\n",
6986 shdr_status, shdr_add_status, mb->mbxStatus);
6987
6988 ras_fwlog->ras_hwsupport = false;
6989 goto disable_ras;
6990 }
6991
6992 spin_lock_irq(&phba->hbalock);
6993 ras_fwlog->state = ACTIVE;
6994 spin_unlock_irq(&phba->hbalock);
6995 mempool_free(pmb, phba->mbox_mem_pool);
6996
6997 return;
6998
6999 disable_ras:
7000
7001 lpfc_sli4_ras_dma_free(phba);
7002 mempool_free(pmb, phba->mbox_mem_pool);
7003 }
7004
7005
7006
7007
7008
7009
7010
7011
7012
7013
7014 int
7015 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
7016 uint32_t fwlog_level,
7017 uint32_t fwlog_enable)
7018 {
7019 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
7020 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
7021 struct lpfc_dmabuf *dmabuf;
7022 LPFC_MBOXQ_t *mbox;
7023 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
7024 int rc = 0;
7025
7026 spin_lock_irq(&phba->hbalock);
7027 ras_fwlog->state = INACTIVE;
7028 spin_unlock_irq(&phba->hbalock);
7029
7030 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
7031 phba->cfg_ras_fwlog_buffsize);
7032 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
7033
7034
7035
7036
7037
7038 if (!ras_fwlog->lwpd.virt) {
7039 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
7040 if (rc) {
7041 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7042 "6189 FW Log Memory Allocation Failed");
7043 return rc;
7044 }
7045 }
7046
7047
7048 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7049 if (!mbox) {
7050 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7051 "6190 RAS MBX Alloc Failed");
7052 rc = -ENOMEM;
7053 goto mem_free;
7054 }
7055
7056 ras_fwlog->fw_loglevel = fwlog_level;
7057 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
7058 sizeof(struct lpfc_sli4_cfg_mhdr));
7059
7060 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
7061 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
7062 len, LPFC_SLI4_MBX_EMBED);
7063
7064 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
7065 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
7066 fwlog_enable);
7067 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
7068 ras_fwlog->fw_loglevel);
7069 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
7070 ras_fwlog->fw_buffcount);
7071 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
7072 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
7073
7074
7075 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
7076 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
7077
7078 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
7079 putPaddrLow(dmabuf->phys);
7080
7081 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
7082 putPaddrHigh(dmabuf->phys);
7083 }
7084
7085
7086 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
7087 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
7088
7089 spin_lock_irq(&phba->hbalock);
7090 ras_fwlog->state = REG_INPROGRESS;
7091 spin_unlock_irq(&phba->hbalock);
7092 mbox->vport = phba->pport;
7093 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
7094
7095 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7096
7097 if (rc == MBX_NOT_FINISHED) {
7098 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7099 "6191 FW-Log Mailbox failed. "
7100 "status %d mbxStatus : x%x", rc,
7101 bf_get(lpfc_mqe_status, &mbox->u.mqe));
7102 mempool_free(mbox, phba->mbox_mem_pool);
7103 rc = -EIO;
7104 goto mem_free;
7105 } else
7106 rc = 0;
7107 mem_free:
7108 if (rc)
7109 lpfc_sli4_ras_dma_free(phba);
7110
7111 return rc;
7112 }
7113
7114
7115
7116
7117
7118
7119
7120 void
7121 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
7122 {
7123
7124 if (lpfc_check_fwlog_support(phba))
7125 return;
7126
7127 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
7128 LPFC_RAS_ENABLE_LOGGING);
7129 }
7130
7131
7132
7133
7134
7135
7136
7137 int
7138 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
7139 {
7140 int i, rc, error = 0;
7141 uint16_t count, base;
7142 unsigned long longs;
7143
7144 if (!phba->sli4_hba.rpi_hdrs_in_use)
7145 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
7146 if (phba->sli4_hba.extents_in_use) {
7147
7148
7149
7150
7151
7152 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7153 LPFC_IDX_RSRC_RDY) {
7154
7155
7156
7157
7158
7159 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7160 LPFC_RSC_TYPE_FCOE_VFI);
7161 if (rc != 0)
7162 error++;
7163 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7164 LPFC_RSC_TYPE_FCOE_VPI);
7165 if (rc != 0)
7166 error++;
7167 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7168 LPFC_RSC_TYPE_FCOE_XRI);
7169 if (rc != 0)
7170 error++;
7171 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7172 LPFC_RSC_TYPE_FCOE_RPI);
7173 if (rc != 0)
7174 error++;
7175
7176
7177
7178
7179
7180
7181
7182 if (error) {
7183 lpfc_printf_log(phba, KERN_INFO,
7184 LOG_MBOX | LOG_INIT,
7185 "2931 Detected extent resource "
7186 "change. Reallocating all "
7187 "extents.\n");
7188 rc = lpfc_sli4_dealloc_extent(phba,
7189 LPFC_RSC_TYPE_FCOE_VFI);
7190 rc = lpfc_sli4_dealloc_extent(phba,
7191 LPFC_RSC_TYPE_FCOE_VPI);
7192 rc = lpfc_sli4_dealloc_extent(phba,
7193 LPFC_RSC_TYPE_FCOE_XRI);
7194 rc = lpfc_sli4_dealloc_extent(phba,
7195 LPFC_RSC_TYPE_FCOE_RPI);
7196 } else
7197 return 0;
7198 }
7199
7200 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7201 if (unlikely(rc))
7202 goto err_exit;
7203
7204 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7205 if (unlikely(rc))
7206 goto err_exit;
7207
7208 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7209 if (unlikely(rc))
7210 goto err_exit;
7211
7212 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7213 if (unlikely(rc))
7214 goto err_exit;
7215 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7216 LPFC_IDX_RSRC_RDY);
7217 return rc;
7218 } else {
7219
7220
7221
7222
7223
7224
7225
7226 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7227 LPFC_IDX_RSRC_RDY) {
7228 lpfc_sli4_dealloc_resource_identifiers(phba);
7229 lpfc_sli4_remove_rpis(phba);
7230 }
7231
7232 count = phba->sli4_hba.max_cfg_param.max_rpi;
7233 if (count <= 0) {
7234 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7235 "3279 Invalid provisioning of "
7236 "rpi:%d\n", count);
7237 rc = -EINVAL;
7238 goto err_exit;
7239 }
7240 base = phba->sli4_hba.max_cfg_param.rpi_base;
7241 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7242 phba->sli4_hba.rpi_bmask = kcalloc(longs,
7243 sizeof(unsigned long),
7244 GFP_KERNEL);
7245 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
7246 rc = -ENOMEM;
7247 goto err_exit;
7248 }
7249 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
7250 GFP_KERNEL);
7251 if (unlikely(!phba->sli4_hba.rpi_ids)) {
7252 rc = -ENOMEM;
7253 goto free_rpi_bmask;
7254 }
7255
7256 for (i = 0; i < count; i++)
7257 phba->sli4_hba.rpi_ids[i] = base + i;
7258
7259
7260 count = phba->sli4_hba.max_cfg_param.max_vpi;
7261 if (count <= 0) {
7262 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7263 "3280 Invalid provisioning of "
7264 "vpi:%d\n", count);
7265 rc = -EINVAL;
7266 goto free_rpi_ids;
7267 }
7268 base = phba->sli4_hba.max_cfg_param.vpi_base;
7269 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7270 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
7271 GFP_KERNEL);
7272 if (unlikely(!phba->vpi_bmask)) {
7273 rc = -ENOMEM;
7274 goto free_rpi_ids;
7275 }
7276 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
7277 GFP_KERNEL);
7278 if (unlikely(!phba->vpi_ids)) {
7279 rc = -ENOMEM;
7280 goto free_vpi_bmask;
7281 }
7282
7283 for (i = 0; i < count; i++)
7284 phba->vpi_ids[i] = base + i;
7285
7286
7287 count = phba->sli4_hba.max_cfg_param.max_xri;
7288 if (count <= 0) {
7289 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7290 "3281 Invalid provisioning of "
7291 "xri:%d\n", count);
7292 rc = -EINVAL;
7293 goto free_vpi_ids;
7294 }
7295 base = phba->sli4_hba.max_cfg_param.xri_base;
7296 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7297 phba->sli4_hba.xri_bmask = kcalloc(longs,
7298 sizeof(unsigned long),
7299 GFP_KERNEL);
7300 if (unlikely(!phba->sli4_hba.xri_bmask)) {
7301 rc = -ENOMEM;
7302 goto free_vpi_ids;
7303 }
7304 phba->sli4_hba.max_cfg_param.xri_used = 0;
7305 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
7306 GFP_KERNEL);
7307 if (unlikely(!phba->sli4_hba.xri_ids)) {
7308 rc = -ENOMEM;
7309 goto free_xri_bmask;
7310 }
7311
7312 for (i = 0; i < count; i++)
7313 phba->sli4_hba.xri_ids[i] = base + i;
7314
7315
7316 count = phba->sli4_hba.max_cfg_param.max_vfi;
7317 if (count <= 0) {
7318 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7319 "3282 Invalid provisioning of "
7320 "vfi:%d\n", count);
7321 rc = -EINVAL;
7322 goto free_xri_ids;
7323 }
7324 base = phba->sli4_hba.max_cfg_param.vfi_base;
7325 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7326 phba->sli4_hba.vfi_bmask = kcalloc(longs,
7327 sizeof(unsigned long),
7328 GFP_KERNEL);
7329 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
7330 rc = -ENOMEM;
7331 goto free_xri_ids;
7332 }
7333 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
7334 GFP_KERNEL);
7335 if (unlikely(!phba->sli4_hba.vfi_ids)) {
7336 rc = -ENOMEM;
7337 goto free_vfi_bmask;
7338 }
7339
7340 for (i = 0; i < count; i++)
7341 phba->sli4_hba.vfi_ids[i] = base + i;
7342
7343
7344
7345
7346
7347 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7348 LPFC_IDX_RSRC_RDY);
7349 return 0;
7350 }
7351
7352 free_vfi_bmask:
7353 kfree(phba->sli4_hba.vfi_bmask);
7354 phba->sli4_hba.vfi_bmask = NULL;
7355 free_xri_ids:
7356 kfree(phba->sli4_hba.xri_ids);
7357 phba->sli4_hba.xri_ids = NULL;
7358 free_xri_bmask:
7359 kfree(phba->sli4_hba.xri_bmask);
7360 phba->sli4_hba.xri_bmask = NULL;
7361 free_vpi_ids:
7362 kfree(phba->vpi_ids);
7363 phba->vpi_ids = NULL;
7364 free_vpi_bmask:
7365 kfree(phba->vpi_bmask);
7366 phba->vpi_bmask = NULL;
7367 free_rpi_ids:
7368 kfree(phba->sli4_hba.rpi_ids);
7369 phba->sli4_hba.rpi_ids = NULL;
7370 free_rpi_bmask:
7371 kfree(phba->sli4_hba.rpi_bmask);
7372 phba->sli4_hba.rpi_bmask = NULL;
7373 err_exit:
7374 return rc;
7375 }
7376
7377
7378
7379
7380
7381
7382
7383
7384 int
7385 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7386 {
7387 if (phba->sli4_hba.extents_in_use) {
7388 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7389 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7390 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7391 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7392 } else {
7393 kfree(phba->vpi_bmask);
7394 phba->sli4_hba.max_cfg_param.vpi_used = 0;
7395 kfree(phba->vpi_ids);
7396 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7397 kfree(phba->sli4_hba.xri_bmask);
7398 kfree(phba->sli4_hba.xri_ids);
7399 kfree(phba->sli4_hba.vfi_bmask);
7400 kfree(phba->sli4_hba.vfi_ids);
7401 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7402 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7403 }
7404
7405 return 0;
7406 }
7407
7408
7409
7410
7411
7412
7413
7414
7415
7416
7417
7418 int
7419 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7420 uint16_t *extnt_cnt, uint16_t *extnt_size)
7421 {
7422 bool emb;
7423 int rc = 0;
7424 uint16_t curr_blks = 0;
7425 uint32_t req_len, emb_len;
7426 uint32_t alloc_len, mbox_tmo;
7427 struct list_head *blk_list_head;
7428 struct lpfc_rsrc_blks *rsrc_blk;
7429 LPFC_MBOXQ_t *mbox;
7430 void *virtaddr = NULL;
7431 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7432 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7433 union lpfc_sli4_cfg_shdr *shdr;
7434
7435 switch (type) {
7436 case LPFC_RSC_TYPE_FCOE_VPI:
7437 blk_list_head = &phba->lpfc_vpi_blk_list;
7438 break;
7439 case LPFC_RSC_TYPE_FCOE_XRI:
7440 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7441 break;
7442 case LPFC_RSC_TYPE_FCOE_VFI:
7443 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7444 break;
7445 case LPFC_RSC_TYPE_FCOE_RPI:
7446 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7447 break;
7448 default:
7449 return -EIO;
7450 }
7451
7452
7453 list_for_each_entry(rsrc_blk, blk_list_head, list) {
7454 if (curr_blks == 0) {
7455
7456
7457
7458
7459
7460
7461
7462 *extnt_size = rsrc_blk->rsrc_size;
7463 }
7464 curr_blks++;
7465 }
7466
7467
7468
7469
7470
7471 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7472 sizeof(uint32_t);
7473
7474
7475
7476
7477
7478 emb = LPFC_SLI4_MBX_EMBED;
7479 req_len = emb_len;
7480 if (req_len > emb_len) {
7481 req_len = curr_blks * sizeof(uint16_t) +
7482 sizeof(union lpfc_sli4_cfg_shdr) +
7483 sizeof(uint32_t);
7484 emb = LPFC_SLI4_MBX_NEMBED;
7485 }
7486
7487 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7488 if (!mbox)
7489 return -ENOMEM;
7490 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7491
7492 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7493 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7494 req_len, emb);
7495 if (alloc_len < req_len) {
7496 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7497 "2983 Allocated DMA memory size (x%x) is "
7498 "less than the requested DMA memory "
7499 "size (x%x)\n", alloc_len, req_len);
7500 rc = -ENOMEM;
7501 goto err_exit;
7502 }
7503 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7504 if (unlikely(rc)) {
7505 rc = -EIO;
7506 goto err_exit;
7507 }
7508
7509 if (!phba->sli4_hba.intr_enable)
7510 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7511 else {
7512 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7513 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7514 }
7515
7516 if (unlikely(rc)) {
7517 rc = -EIO;
7518 goto err_exit;
7519 }
7520
7521
7522
7523
7524
7525
7526
7527 if (emb == LPFC_SLI4_MBX_EMBED) {
7528 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7529 shdr = &rsrc_ext->header.cfg_shdr;
7530 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7531 } else {
7532 virtaddr = mbox->sge_array->addr[0];
7533 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7534 shdr = &n_rsrc->cfg_shdr;
7535 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7536 }
7537
7538 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7539 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7540 "2984 Failed to read allocated resources "
7541 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7542 type,
7543 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7544 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7545 rc = -EIO;
7546 goto err_exit;
7547 }
7548 err_exit:
7549 lpfc_sli4_mbox_cmd_free(phba, mbox);
7550 return rc;
7551 }
7552
7553
7554
7555
7556
7557
7558
7559
7560
7561
7562
7563
7564
7565
7566
7567
7568
7569 static int
7570 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7571 struct list_head *sgl_list, int cnt)
7572 {
7573 struct lpfc_sglq *sglq_entry = NULL;
7574 struct lpfc_sglq *sglq_entry_next = NULL;
7575 struct lpfc_sglq *sglq_entry_first = NULL;
7576 int status, total_cnt;
7577 int post_cnt = 0, num_posted = 0, block_cnt = 0;
7578 int last_xritag = NO_XRI;
7579 LIST_HEAD(prep_sgl_list);
7580 LIST_HEAD(blck_sgl_list);
7581 LIST_HEAD(allc_sgl_list);
7582 LIST_HEAD(post_sgl_list);
7583 LIST_HEAD(free_sgl_list);
7584
7585 spin_lock_irq(&phba->hbalock);
7586 spin_lock(&phba->sli4_hba.sgl_list_lock);
7587 list_splice_init(sgl_list, &allc_sgl_list);
7588 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7589 spin_unlock_irq(&phba->hbalock);
7590
7591 total_cnt = cnt;
7592 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7593 &allc_sgl_list, list) {
7594 list_del_init(&sglq_entry->list);
7595 block_cnt++;
7596 if ((last_xritag != NO_XRI) &&
7597 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7598
7599 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7600 post_cnt = block_cnt - 1;
7601
7602 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7603 block_cnt = 1;
7604 } else {
7605
7606 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7607
7608 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7609 list_splice_init(&prep_sgl_list,
7610 &blck_sgl_list);
7611 post_cnt = block_cnt;
7612 block_cnt = 0;
7613 }
7614 }
7615 num_posted++;
7616
7617
7618 last_xritag = sglq_entry->sli4_xritag;
7619
7620
7621 if (num_posted == total_cnt) {
7622 if (post_cnt == 0) {
7623 list_splice_init(&prep_sgl_list,
7624 &blck_sgl_list);
7625 post_cnt = block_cnt;
7626 } else if (block_cnt == 1) {
7627 status = lpfc_sli4_post_sgl(phba,
7628 sglq_entry->phys, 0,
7629 sglq_entry->sli4_xritag);
7630 if (!status) {
7631
7632 list_add_tail(&sglq_entry->list,
7633 &post_sgl_list);
7634 } else {
7635
7636 lpfc_printf_log(phba, KERN_WARNING,
7637 LOG_SLI,
7638 "3159 Failed to post "
7639 "sgl, xritag:x%x\n",
7640 sglq_entry->sli4_xritag);
7641 list_add_tail(&sglq_entry->list,
7642 &free_sgl_list);
7643 total_cnt--;
7644 }
7645 }
7646 }
7647
7648
7649 if (post_cnt == 0)
7650 continue;
7651
7652
7653 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7654 post_cnt);
7655
7656 if (!status) {
7657
7658 list_splice_init(&blck_sgl_list, &post_sgl_list);
7659 } else {
7660
7661 sglq_entry_first = list_first_entry(&blck_sgl_list,
7662 struct lpfc_sglq,
7663 list);
7664 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7665 "3160 Failed to post sgl-list, "
7666 "xritag:x%x-x%x\n",
7667 sglq_entry_first->sli4_xritag,
7668 (sglq_entry_first->sli4_xritag +
7669 post_cnt - 1));
7670 list_splice_init(&blck_sgl_list, &free_sgl_list);
7671 total_cnt -= post_cnt;
7672 }
7673
7674
7675 if (block_cnt == 0)
7676 last_xritag = NO_XRI;
7677
7678
7679 post_cnt = 0;
7680 }
7681
7682
7683 lpfc_free_sgl_list(phba, &free_sgl_list);
7684
7685
7686 if (!list_empty(&post_sgl_list)) {
7687 spin_lock_irq(&phba->hbalock);
7688 spin_lock(&phba->sli4_hba.sgl_list_lock);
7689 list_splice_init(&post_sgl_list, sgl_list);
7690 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7691 spin_unlock_irq(&phba->hbalock);
7692 } else {
7693 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7694 "3161 Failure to post sgl to port.\n");
7695 return -EIO;
7696 }
7697
7698
7699 return total_cnt;
7700 }
7701
7702
7703
7704
7705
7706
7707
7708
7709
7710
7711
7712
7713
7714 static int
7715 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7716 {
7717 LIST_HEAD(post_nblist);
7718 int num_posted, rc = 0;
7719
7720
7721 lpfc_io_buf_flush(phba, &post_nblist);
7722
7723
7724 if (!list_empty(&post_nblist)) {
7725 num_posted = lpfc_sli4_post_io_sgl_list(
7726 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7727
7728 if (num_posted == 0)
7729 rc = -EIO;
7730 }
7731 return rc;
7732 }
7733
7734 static void
7735 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7736 {
7737 uint32_t len;
7738
7739 len = sizeof(struct lpfc_mbx_set_host_data) -
7740 sizeof(struct lpfc_sli4_cfg_mhdr);
7741 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7742 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7743 LPFC_SLI4_MBX_EMBED);
7744
7745 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7746 mbox->u.mqe.un.set_host_data.param_len =
7747 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7748 snprintf(mbox->u.mqe.un.set_host_data.un.data,
7749 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7750 "Linux %s v"LPFC_DRIVER_VERSION,
7751 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7752 }
7753
7754 int
7755 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7756 struct lpfc_queue *drq, int count, int idx)
7757 {
7758 int rc, i;
7759 struct lpfc_rqe hrqe;
7760 struct lpfc_rqe drqe;
7761 struct lpfc_rqb *rqbp;
7762 unsigned long flags;
7763 struct rqb_dmabuf *rqb_buffer;
7764 LIST_HEAD(rqb_buf_list);
7765
7766 rqbp = hrq->rqbp;
7767 for (i = 0; i < count; i++) {
7768 spin_lock_irqsave(&phba->hbalock, flags);
7769
7770 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7771 spin_unlock_irqrestore(&phba->hbalock, flags);
7772 break;
7773 }
7774 spin_unlock_irqrestore(&phba->hbalock, flags);
7775
7776 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7777 if (!rqb_buffer)
7778 break;
7779 rqb_buffer->hrq = hrq;
7780 rqb_buffer->drq = drq;
7781 rqb_buffer->idx = idx;
7782 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7783 }
7784
7785 spin_lock_irqsave(&phba->hbalock, flags);
7786 while (!list_empty(&rqb_buf_list)) {
7787 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7788 hbuf.list);
7789
7790 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7791 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7792 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7793 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7794 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7795 if (rc < 0) {
7796 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7797 "6421 Cannot post to HRQ %d: %x %x %x "
7798 "DRQ %x %x\n",
7799 hrq->queue_id,
7800 hrq->host_index,
7801 hrq->hba_index,
7802 hrq->entry_count,
7803 drq->host_index,
7804 drq->hba_index);
7805 rqbp->rqb_free_buffer(phba, rqb_buffer);
7806 } else {
7807 list_add_tail(&rqb_buffer->hbuf.list,
7808 &rqbp->rqb_buffer_list);
7809 rqbp->buffer_count++;
7810 }
7811 }
7812 spin_unlock_irqrestore(&phba->hbalock, flags);
7813 return 1;
7814 }
7815
7816 static void
7817 lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7818 {
7819 struct lpfc_vport *vport = pmb->vport;
7820 union lpfc_sli4_cfg_shdr *shdr;
7821 u32 shdr_status, shdr_add_status;
7822 u32 sig, acqe;
7823
7824
7825
7826
7827 shdr = (union lpfc_sli4_cfg_shdr *)
7828 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7829 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7830 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7831 if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
7833 "2516 CGN SET_FEATURE mbox failed with "
7834 "status x%x add_status x%x, mbx status x%x "
7835 "Reset Congestion to FPINs only\n",
7836 shdr_status, shdr_add_status,
7837 pmb->u.mb.mbxStatus);
7838
7839 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7840 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7841 goto out;
7842 }
7843
7844
7845 phba->cgn_acqe_cnt = 0;
7846
7847 acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
7848 &pmb->u.mqe.un.set_feature);
7849 sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
7850 &pmb->u.mqe.un.set_feature);
7851 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7852 "4620 SET_FEATURES Success: Freq: %ds %dms "
7853 " Reg: x%x x%x\n", acqe, sig,
7854 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7855 out:
7856 mempool_free(pmb, phba->mbox_mem_pool);
7857
7858
7859
7860
7861 lpfc_issue_els_rdf(vport, 0);
7862 }
7863
7864 int
7865 lpfc_config_cgn_signal(struct lpfc_hba *phba)
7866 {
7867 LPFC_MBOXQ_t *mboxq;
7868 u32 rc;
7869
7870 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7871 if (!mboxq)
7872 goto out_rdf;
7873
7874 lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
7875 mboxq->vport = phba->pport;
7876 mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
7877
7878 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7879 "4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
7880 "Reg: x%x x%x\n",
7881 phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
7882 phba->cgn_reg_signal, phba->cgn_reg_fpin);
7883
7884 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7885 if (rc == MBX_NOT_FINISHED)
7886 goto out;
7887 return 0;
7888
7889 out:
7890 mempool_free(mboxq, phba->mbox_mem_pool);
7891 out_rdf:
7892
7893 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7894 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7895 lpfc_issue_els_rdf(phba->pport, 0);
7896 return -EIO;
7897 }
7898
7899
7900
7901
7902
7903
7904
7905
7906
7907
7908
7909 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7910 {
7911 int i;
7912 struct lpfc_sli4_hdw_queue *hdwq;
7913 struct lpfc_queue *cq;
7914 struct lpfc_idle_stat *idle_stat;
7915 u64 wall;
7916
7917 for_each_present_cpu(i) {
7918 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7919 cq = hdwq->io_cq;
7920
7921
7922 if (cq->chann != i)
7923 continue;
7924
7925 idle_stat = &phba->sli4_hba.idle_stat[i];
7926
7927 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7928 idle_stat->prev_wall = wall;
7929
7930 if (phba->nvmet_support ||
7931 phba->cmf_active_mode != LPFC_CFG_OFF)
7932 cq->poll_mode = LPFC_QUEUE_WORK;
7933 else
7934 cq->poll_mode = LPFC_IRQ_POLL;
7935 }
7936
7937 if (!phba->nvmet_support)
7938 schedule_delayed_work(&phba->idle_stat_delay_work,
7939 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7940 }
7941
7942 static void lpfc_sli4_dip(struct lpfc_hba *phba)
7943 {
7944 uint32_t if_type;
7945
7946 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7947 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7948 if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7949 struct lpfc_register reg_data;
7950
7951 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7952 ®_data.word0))
7953 return;
7954
7955 if (bf_get(lpfc_sliport_status_dip, ®_data))
7956 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7957 "2904 Firmware Dump Image Present"
7958 " on Adapter");
7959 }
7960 }
7961
7962
7963
7964
7965
7966
7967
7968
7969 static int
7970 lpfc_cmf_setup(struct lpfc_hba *phba)
7971 {
7972 LPFC_MBOXQ_t *mboxq;
7973 struct lpfc_dmabuf *mp;
7974 struct lpfc_pc_sli4_params *sli4_params;
7975 int rc, cmf, mi_ver;
7976
7977 rc = lpfc_sli4_refresh_params(phba);
7978 if (unlikely(rc))
7979 return rc;
7980
7981 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7982 if (!mboxq)
7983 return -ENOMEM;
7984
7985 sli4_params = &phba->sli4_hba.pc_sli4_params;
7986
7987
7988 if (sli4_params->mi_ver) {
7989 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
7990 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7991 mi_ver = bf_get(lpfc_mbx_set_feature_mi,
7992 &mboxq->u.mqe.un.set_feature);
7993
7994 if (rc == MBX_SUCCESS) {
7995 if (mi_ver) {
7996 lpfc_printf_log(phba,
7997 KERN_WARNING, LOG_CGN_MGMT,
7998 "6215 MI is enabled\n");
7999 sli4_params->mi_ver = mi_ver;
8000 } else {
8001 lpfc_printf_log(phba,
8002 KERN_WARNING, LOG_CGN_MGMT,
8003 "6338 MI is disabled\n");
8004 sli4_params->mi_ver = 0;
8005 }
8006 } else {
8007
8008 lpfc_printf_log(phba, KERN_INFO,
8009 LOG_CGN_MGMT | LOG_INIT,
8010 "6245 Enable MI Mailbox x%x (x%x/x%x) "
8011 "failed, rc:x%x mi:x%x\n",
8012 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8013 lpfc_sli_config_mbox_subsys_get
8014 (phba, mboxq),
8015 lpfc_sli_config_mbox_opcode_get
8016 (phba, mboxq),
8017 rc, sli4_params->mi_ver);
8018 }
8019 } else {
8020 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8021 "6217 MI is disabled\n");
8022 }
8023
8024
8025 if (sli4_params->mi_ver)
8026 phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
8027
8028
8029 if (sli4_params->cmf) {
8030 lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
8031 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8032 cmf = bf_get(lpfc_mbx_set_feature_cmf,
8033 &mboxq->u.mqe.un.set_feature);
8034 if (rc == MBX_SUCCESS && cmf) {
8035 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8036 "6218 CMF is enabled: mode %d\n",
8037 phba->cmf_active_mode);
8038 } else {
8039 lpfc_printf_log(phba, KERN_WARNING,
8040 LOG_CGN_MGMT | LOG_INIT,
8041 "6219 Enable CMF Mailbox x%x (x%x/x%x) "
8042 "failed, rc:x%x dd:x%x\n",
8043 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8044 lpfc_sli_config_mbox_subsys_get
8045 (phba, mboxq),
8046 lpfc_sli_config_mbox_opcode_get
8047 (phba, mboxq),
8048 rc, cmf);
8049 sli4_params->cmf = 0;
8050 phba->cmf_active_mode = LPFC_CFG_OFF;
8051 goto no_cmf;
8052 }
8053
8054
8055 if (!phba->cgn_i) {
8056 mp = kmalloc(sizeof(*mp), GFP_KERNEL);
8057 if (mp)
8058 mp->virt = dma_alloc_coherent
8059 (&phba->pcidev->dev,
8060 sizeof(struct lpfc_cgn_info),
8061 &mp->phys, GFP_KERNEL);
8062 if (!mp || !mp->virt) {
8063 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8064 "2640 Failed to alloc memory "
8065 "for Congestion Info\n");
8066 kfree(mp);
8067 sli4_params->cmf = 0;
8068 phba->cmf_active_mode = LPFC_CFG_OFF;
8069 goto no_cmf;
8070 }
8071 phba->cgn_i = mp;
8072
8073
8074 lpfc_init_congestion_buf(phba);
8075 lpfc_init_congestion_stat(phba);
8076
8077
8078 atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
8079 atomic64_set(&phba->cgn_acqe_stat.warn, 0);
8080 }
8081
8082 rc = lpfc_sli4_cgn_params_read(phba);
8083 if (rc < 0) {
8084 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8085 "6242 Error reading Cgn Params (%d)\n",
8086 rc);
8087
8088 sli4_params->cmf = 0;
8089 } else if (!rc) {
8090 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8091 "6243 CGN Event empty object.\n");
8092
8093 sli4_params->cmf = 0;
8094 }
8095 } else {
8096 no_cmf:
8097 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8098 "6220 CMF is disabled\n");
8099 }
8100
8101
8102
8103
8104 if (sli4_params->cmf && sli4_params->mi_ver) {
8105 rc = lpfc_reg_congestion_buf(phba);
8106 if (rc) {
8107 dma_free_coherent(&phba->pcidev->dev,
8108 sizeof(struct lpfc_cgn_info),
8109 phba->cgn_i->virt, phba->cgn_i->phys);
8110 kfree(phba->cgn_i);
8111 phba->cgn_i = NULL;
8112
8113 phba->cmf_active_mode = LPFC_CFG_OFF;
8114 return 0;
8115 }
8116 }
8117 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8118 "6470 Setup MI version %d CMF %d mode %d\n",
8119 sli4_params->mi_ver, sli4_params->cmf,
8120 phba->cmf_active_mode);
8121
8122 mempool_free(mboxq, phba->mbox_mem_pool);
8123
8124
8125 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
8126 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
8127 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
8128 atomic_set(&phba->cgn_sync_warn_cnt, 0);
8129 atomic_set(&phba->cgn_driver_evt_cnt, 0);
8130 atomic_set(&phba->cgn_latency_evt_cnt, 0);
8131 atomic64_set(&phba->cgn_latency_evt, 0);
8132
8133 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
8134
8135
8136 if (!phba->rxtable) {
8137 phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
8138 sizeof(struct rxtable_entry),
8139 GFP_KERNEL);
8140 if (!phba->rxtable) {
8141 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8142 "2644 Failed to alloc memory "
8143 "for RX Monitor Buffer\n");
8144 return -ENOMEM;
8145 }
8146 }
8147 atomic_set(&phba->rxtable_idx_head, 0);
8148 atomic_set(&phba->rxtable_idx_tail, 0);
8149 return 0;
8150 }
8151
8152 static int
8153 lpfc_set_host_tm(struct lpfc_hba *phba)
8154 {
8155 LPFC_MBOXQ_t *mboxq;
8156 uint32_t len, rc;
8157 struct timespec64 cur_time;
8158 struct tm broken;
8159 uint32_t month, day, year;
8160 uint32_t hour, minute, second;
8161 struct lpfc_mbx_set_host_date_time *tm;
8162
8163 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8164 if (!mboxq)
8165 return -ENOMEM;
8166
8167 len = sizeof(struct lpfc_mbx_set_host_data) -
8168 sizeof(struct lpfc_sli4_cfg_mhdr);
8169 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8170 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
8171 LPFC_SLI4_MBX_EMBED);
8172
8173 mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
8174 mboxq->u.mqe.un.set_host_data.param_len =
8175 sizeof(struct lpfc_mbx_set_host_date_time);
8176 tm = &mboxq->u.mqe.un.set_host_data.un.tm;
8177 ktime_get_real_ts64(&cur_time);
8178 time64_to_tm(cur_time.tv_sec, 0, &broken);
8179 month = broken.tm_mon + 1;
8180 day = broken.tm_mday;
8181 year = broken.tm_year - 100;
8182 hour = broken.tm_hour;
8183 minute = broken.tm_min;
8184 second = broken.tm_sec;
8185 bf_set(lpfc_mbx_set_host_month, tm, month);
8186 bf_set(lpfc_mbx_set_host_day, tm, day);
8187 bf_set(lpfc_mbx_set_host_year, tm, year);
8188 bf_set(lpfc_mbx_set_host_hour, tm, hour);
8189 bf_set(lpfc_mbx_set_host_min, tm, minute);
8190 bf_set(lpfc_mbx_set_host_sec, tm, second);
8191
8192 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8193 mempool_free(mboxq, phba->mbox_mem_pool);
8194 return rc;
8195 }
8196
8197
8198
8199
8200
8201
8202
8203
8204
8205
8206 int
8207 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
8208 {
8209 int rc, i, cnt, len, dd;
8210 LPFC_MBOXQ_t *mboxq;
8211 struct lpfc_mqe *mqe;
8212 uint8_t *vpd;
8213 uint32_t vpd_size;
8214 uint32_t ftr_rsp = 0;
8215 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
8216 struct lpfc_vport *vport = phba->pport;
8217 struct lpfc_dmabuf *mp;
8218 struct lpfc_rqb *rqbp;
8219 u32 flg;
8220
8221
8222 rc = lpfc_pci_function_reset(phba);
8223 if (unlikely(rc))
8224 return -ENODEV;
8225
8226
8227 rc = lpfc_sli4_post_status_check(phba);
8228 if (unlikely(rc))
8229 return -ENODEV;
8230 else {
8231 spin_lock_irq(&phba->hbalock);
8232 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
8233 flg = phba->sli.sli_flag;
8234 spin_unlock_irq(&phba->hbalock);
8235
8236
8237
8238 for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
8239 msleep(20);
8240 spin_lock_irq(&phba->hbalock);
8241 flg = phba->sli.sli_flag;
8242 spin_unlock_irq(&phba->hbalock);
8243 }
8244 }
8245
8246 lpfc_sli4_dip(phba);
8247
8248
8249
8250
8251
8252 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8253 if (!mboxq)
8254 return -ENOMEM;
8255
8256
8257 vpd_size = SLI4_PAGE_SIZE;
8258 vpd = kzalloc(vpd_size, GFP_KERNEL);
8259 if (!vpd) {
8260 rc = -ENOMEM;
8261 goto out_free_mbox;
8262 }
8263
8264 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
8265 if (unlikely(rc)) {
8266 kfree(vpd);
8267 goto out_free_mbox;
8268 }
8269
8270 mqe = &mboxq->u.mqe;
8271 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
8272 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
8273 phba->hba_flag |= HBA_FCOE_MODE;
8274 phba->fcp_embed_io = 0;
8275 } else {
8276 phba->hba_flag &= ~HBA_FCOE_MODE;
8277 }
8278
8279 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
8280 LPFC_DCBX_CEE_MODE)
8281 phba->hba_flag |= HBA_FIP_SUPPORT;
8282 else
8283 phba->hba_flag &= ~HBA_FIP_SUPPORT;
8284
8285 phba->hba_flag &= ~HBA_IOQ_FLUSH;
8286
8287 if (phba->sli_rev != LPFC_SLI_REV4) {
8288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8289 "0376 READ_REV Error. SLI Level %d "
8290 "FCoE enabled %d\n",
8291 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
8292 rc = -EIO;
8293 kfree(vpd);
8294 goto out_free_mbox;
8295 }
8296
8297 rc = lpfc_set_host_tm(phba);
8298 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
8299 "6468 Set host date / time: Status x%x:\n", rc);
8300
8301
8302
8303
8304
8305
8306 if (phba->hba_flag & HBA_FCOE_MODE &&
8307 lpfc_sli4_read_fcoe_params(phba))
8308 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
8309 "2570 Failed to read FCoE parameters\n");
8310
8311
8312
8313
8314
8315 rc = lpfc_sli4_retrieve_pport_name(phba);
8316 if (!rc)
8317 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8318 "3080 Successful retrieving SLI4 device "
8319 "physical port name: %s.\n", phba->Port);
8320
8321 rc = lpfc_sli4_get_ctl_attr(phba);
8322 if (!rc)
8323 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8324 "8351 Successful retrieving SLI4 device "
8325 "CTL ATTR\n");
8326
8327
8328
8329
8330
8331
8332 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
8333 if (unlikely(!rc)) {
8334 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8335 "0377 Error %d parsing vpd. "
8336 "Using defaults.\n", rc);
8337 rc = 0;
8338 }
8339 kfree(vpd);
8340
8341
8342 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
8343 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
8344
8345
8346
8347
8348
8349 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8350 LPFC_SLI_INTF_IF_TYPE_6) &&
8351 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
8352 (phba->vpd.rev.smRev == 0) &&
8353 (phba->cfg_nvme_embed_cmd == 1))
8354 phba->cfg_nvme_embed_cmd = 0;
8355
8356 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
8357 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
8358 &mqe->un.read_rev);
8359 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
8360 &mqe->un.read_rev);
8361 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
8362 &mqe->un.read_rev);
8363 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
8364 &mqe->un.read_rev);
8365 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
8366 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
8367 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
8368 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
8369 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
8370 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
8371 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8372 "(%d):0380 READ_REV Status x%x "
8373 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
8374 mboxq->vport ? mboxq->vport->vpi : 0,
8375 bf_get(lpfc_mqe_status, mqe),
8376 phba->vpd.rev.opFwName,
8377 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
8378 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
8379
8380 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8381 LPFC_SLI_INTF_IF_TYPE_0) {
8382 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
8383 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8384 if (rc == MBX_SUCCESS) {
8385 phba->hba_flag |= HBA_RECOVERABLE_UE;
8386
8387 phba->eratt_poll_interval = 1;
8388 phba->sli4_hba.ue_to_sr = bf_get(
8389 lpfc_mbx_set_feature_UESR,
8390 &mboxq->u.mqe.un.set_feature);
8391 phba->sli4_hba.ue_to_rp = bf_get(
8392 lpfc_mbx_set_feature_UERP,
8393 &mboxq->u.mqe.un.set_feature);
8394 }
8395 }
8396
8397 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
8398
8399 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
8400 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8401 if (rc != MBX_SUCCESS)
8402 phba->mds_diags_support = 0;
8403 }
8404
8405
8406
8407
8408
8409 lpfc_request_features(phba, mboxq);
8410 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8411 if (unlikely(rc)) {
8412 rc = -EIO;
8413 goto out_free_mbox;
8414 }
8415
8416
8417 if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
8418 &mqe->un.req_ftrs))) {
8419 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
8420 phba->cfg_vmid_app_header = 0;
8421 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
8422 "1242 vmid feature not supported\n");
8423 }
8424
8425
8426
8427
8428
8429 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
8430 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8431 "0378 No support for fcpi mode.\n");
8432 ftr_rsp++;
8433 }
8434
8435
8436 if (phba->hba_flag & HBA_FCOE_MODE) {
8437 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
8438 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
8439 else
8440 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
8441 }
8442
8443
8444
8445
8446
8447
8448 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8449 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
8450 phba->cfg_enable_bg = 0;
8451 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
8452 ftr_rsp++;
8453 }
8454 }
8455
8456 if (phba->max_vpi && phba->cfg_enable_npiv &&
8457 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8458 ftr_rsp++;
8459
8460 if (ftr_rsp) {
8461 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8462 "0379 Feature Mismatch Data: x%08x %08x "
8463 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
8464 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
8465 phba->cfg_enable_npiv, phba->max_vpi);
8466 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
8467 phba->cfg_enable_bg = 0;
8468 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8469 phba->cfg_enable_npiv = 0;
8470 }
8471
8472
8473 spin_lock_irq(&phba->hbalock);
8474 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
8475 spin_unlock_irq(&phba->hbalock);
8476
8477
8478 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
8479 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8480 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
8481 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
8482 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8483 "6448 Dual Dump is enabled\n");
8484 else
8485 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
8486 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
8487 "rc:x%x dd:x%x\n",
8488 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8489 lpfc_sli_config_mbox_subsys_get(
8490 phba, mboxq),
8491 lpfc_sli_config_mbox_opcode_get(
8492 phba, mboxq),
8493 rc, dd);
8494
8495
8496
8497
8498 rc = lpfc_sli4_alloc_resource_identifiers(phba);
8499 if (rc) {
8500 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8501 "2920 Failed to alloc Resource IDs "
8502 "rc = x%x\n", rc);
8503 goto out_free_mbox;
8504 }
8505
8506 lpfc_set_host_data(phba, mboxq);
8507
8508 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8509 if (rc) {
8510 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8511 "2134 Failed to set host os driver version %x",
8512 rc);
8513 }
8514
8515
8516 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
8517 if (rc) {
8518 phba->link_state = LPFC_HBA_ERROR;
8519 rc = -ENOMEM;
8520 goto out_free_mbox;
8521 }
8522
8523 mboxq->vport = vport;
8524 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8525 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
8526 if (rc == MBX_SUCCESS) {
8527 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
8528 rc = 0;
8529 }
8530
8531
8532
8533
8534
8535
8536 lpfc_mbuf_free(phba, mp->virt, mp->phys);
8537 kfree(mp);
8538 mboxq->ctx_buf = NULL;
8539 if (unlikely(rc)) {
8540 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8541 "0382 READ_SPARAM command failed "
8542 "status %d, mbxStatus x%x\n",
8543 rc, bf_get(lpfc_mqe_status, mqe));
8544 phba->link_state = LPFC_HBA_ERROR;
8545 rc = -EIO;
8546 goto out_free_mbox;
8547 }
8548
8549 lpfc_update_vport_wwn(vport);
8550
8551
8552 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
8553 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
8554
8555
8556 rc = lpfc_sli4_queue_create(phba);
8557 if (rc) {
8558 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8559 "3089 Failed to allocate queues\n");
8560 rc = -ENODEV;
8561 goto out_free_mbox;
8562 }
8563
8564 rc = lpfc_sli4_queue_setup(phba);
8565 if (unlikely(rc)) {
8566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8567 "0381 Error %d during queue setup.\n ", rc);
8568 goto out_stop_timers;
8569 }
8570
8571 lpfc_sli4_setup(phba);
8572 lpfc_sli4_queue_init(phba);
8573
8574
8575 rc = lpfc_sli4_els_sgl_update(phba);
8576 if (unlikely(rc)) {
8577 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8578 "1400 Failed to update xri-sgl size and "
8579 "mapping: %d\n", rc);
8580 goto out_destroy_queue;
8581 }
8582
8583
8584 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
8585 phba->sli4_hba.els_xri_cnt);
8586 if (unlikely(rc < 0)) {
8587 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8588 "0582 Error %d during els sgl post "
8589 "operation\n", rc);
8590 rc = -ENODEV;
8591 goto out_destroy_queue;
8592 }
8593 phba->sli4_hba.els_xri_cnt = rc;
8594
8595 if (phba->nvmet_support) {
8596
8597 rc = lpfc_sli4_nvmet_sgl_update(phba);
8598 if (unlikely(rc)) {
8599 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8600 "6308 Failed to update nvmet-sgl size "
8601 "and mapping: %d\n", rc);
8602 goto out_destroy_queue;
8603 }
8604
8605
8606 rc = lpfc_sli4_repost_sgl_list(
8607 phba,
8608 &phba->sli4_hba.lpfc_nvmet_sgl_list,
8609 phba->sli4_hba.nvmet_xri_cnt);
8610 if (unlikely(rc < 0)) {
8611 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8612 "3117 Error %d during nvmet "
8613 "sgl post\n", rc);
8614 rc = -ENODEV;
8615 goto out_destroy_queue;
8616 }
8617 phba->sli4_hba.nvmet_xri_cnt = rc;
8618
8619
8620
8621
8622 cnt = phba->sli4_hba.nvmet_xri_cnt +
8623 phba->sli4_hba.max_cfg_param.max_xri;
8624 } else {
8625
8626 rc = lpfc_sli4_io_sgl_update(phba);
8627 if (unlikely(rc)) {
8628 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8629 "6082 Failed to update nvme-sgl size "
8630 "and mapping: %d\n", rc);
8631 goto out_destroy_queue;
8632 }
8633
8634
8635 rc = lpfc_sli4_repost_io_sgl_list(phba);
8636 if (unlikely(rc)) {
8637 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8638 "6116 Error %d during nvme sgl post "
8639 "operation\n", rc);
8640
8641
8642 rc = -ENODEV;
8643 goto out_destroy_queue;
8644 }
8645
8646
8647
8648 cnt = phba->sli4_hba.max_cfg_param.max_xri;
8649 }
8650
8651 if (!phba->sli.iocbq_lookup) {
8652
8653 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8654 "2821 initialize iocb list with %d entries\n",
8655 cnt);
8656 rc = lpfc_init_iocb_list(phba, cnt);
8657 if (rc) {
8658 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8659 "1413 Failed to init iocb list.\n");
8660 goto out_destroy_queue;
8661 }
8662 }
8663
8664 if (phba->nvmet_support)
8665 lpfc_nvmet_create_targetport(phba);
8666
8667 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
8668
8669 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
8670 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
8671 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
8672 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
8673 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
8674 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
8675 rqbp->buffer_count = 0;
8676
8677 lpfc_post_rq_buffer(
8678 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
8679 phba->sli4_hba.nvmet_mrq_data[i],
8680 phba->cfg_nvmet_mrq_post, i);
8681 }
8682 }
8683
8684
8685 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
8686 if (unlikely(rc)) {
8687 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8688 "0393 Error %d during rpi post operation\n",
8689 rc);
8690 rc = -ENODEV;
8691 goto out_free_iocblist;
8692 }
8693 lpfc_sli4_node_prep(phba);
8694
8695 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
8696 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
8697
8698
8699
8700 lpfc_reg_fcfi(phba, mboxq);
8701 mboxq->vport = phba->pport;
8702 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8703 if (rc != MBX_SUCCESS)
8704 goto out_unset_queue;
8705 rc = 0;
8706 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
8707 &mboxq->u.mqe.un.reg_fcfi);
8708 } else {
8709
8710
8711
8712 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
8713 mboxq->vport = phba->pport;
8714 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8715 if (rc != MBX_SUCCESS)
8716 goto out_unset_queue;
8717 rc = 0;
8718 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8719 &mboxq->u.mqe.un.reg_fcfi_mrq);
8720
8721
8722 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8723 mboxq->vport = phba->pport;
8724 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8725 if (rc != MBX_SUCCESS)
8726 goto out_unset_queue;
8727 rc = 0;
8728 }
8729
8730 lpfc_sli_read_link_ste(phba);
8731 }
8732
8733
8734
8735
8736 if (phba->nvmet_support == 0) {
8737 if (phba->sli4_hba.io_xri_cnt == 0) {
8738 len = lpfc_new_io_buf(
8739 phba, phba->sli4_hba.io_xri_max);
8740 if (len == 0) {
8741 rc = -ENOMEM;
8742 goto out_unset_queue;
8743 }
8744
8745 if (phba->cfg_xri_rebalancing)
8746 lpfc_create_multixri_pools(phba);
8747 }
8748 } else {
8749 phba->cfg_xri_rebalancing = 0;
8750 }
8751
8752
8753 spin_lock_irq(&phba->hbalock);
8754 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8755 spin_unlock_irq(&phba->hbalock);
8756
8757
8758 lpfc_sli4_rb_setup(phba);
8759
8760
8761 phba->fcf.fcf_flag = 0;
8762 phba->fcf.current_rec.flag = 0;
8763
8764
8765 mod_timer(&vport->els_tmofunc,
8766 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
8767
8768
8769 mod_timer(&phba->hb_tmofunc,
8770 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
8771 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
8772 phba->last_completion_time = jiffies;
8773
8774
8775 if (phba->cfg_auto_imax)
8776 queue_delayed_work(phba->wq, &phba->eq_delay_work,
8777 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
8778
8779
8780 lpfc_init_idle_stat_hb(phba);
8781
8782
8783 mod_timer(&phba->eratt_poll,
8784 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
8785
8786
8787 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
8788 rc = pci_enable_pcie_error_reporting(phba->pcidev);
8789 if (!rc) {
8790 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8791 "2829 This device supports "
8792 "Advanced Error Reporting (AER)\n");
8793 spin_lock_irq(&phba->hbalock);
8794 phba->hba_flag |= HBA_AER_ENABLED;
8795 spin_unlock_irq(&phba->hbalock);
8796 } else {
8797 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8798 "2830 This device does not support "
8799 "Advanced Error Reporting (AER)\n");
8800 phba->cfg_aer_support = 0;
8801 }
8802 rc = 0;
8803 }
8804
8805
8806
8807
8808
8809 spin_lock_irq(&phba->hbalock);
8810 phba->link_state = LPFC_LINK_DOWN;
8811
8812
8813 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
8814 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
8815 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
8816 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
8817 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
8818 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
8819 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
8820 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
8821 spin_unlock_irq(&phba->hbalock);
8822
8823
8824 lpfc_sli4_arm_cqeq_intr(phba);
8825
8826
8827 phba->sli4_hba.intr_enable = 1;
8828
8829
8830 lpfc_cmf_setup(phba);
8831
8832 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
8833 (phba->hba_flag & LINK_DISABLED)) {
8834 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8835 "3103 Adapter Link is disabled.\n");
8836 lpfc_down_link(phba, mboxq);
8837 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8838 if (rc != MBX_SUCCESS) {
8839 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8840 "3104 Adapter failed to issue "
8841 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
8842 goto out_io_buff_free;
8843 }
8844 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
8845
8846 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
8847 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
8848 if (rc)
8849 goto out_io_buff_free;
8850 }
8851 }
8852 mempool_free(mboxq, phba->mbox_mem_pool);
8853
8854
8855 lpfc_sli4_ras_setup(phba);
8856
8857 phba->hba_flag |= HBA_SETUP;
8858 return rc;
8859
8860 out_io_buff_free:
8861
8862 lpfc_io_free(phba);
8863 out_unset_queue:
8864
8865 lpfc_sli4_queue_unset(phba);
8866 out_free_iocblist:
8867 lpfc_free_iocb_list(phba);
8868 out_destroy_queue:
8869 lpfc_sli4_queue_destroy(phba);
8870 out_stop_timers:
8871 lpfc_stop_hba_timers(phba);
8872 out_free_mbox:
8873 mempool_free(mboxq, phba->mbox_mem_pool);
8874 return rc;
8875 }
8876
8877
8878
8879
8880
8881
8882
8883
8884
8885
8886
8887
8888
8889 void
8890 lpfc_mbox_timeout(struct timer_list *t)
8891 {
8892 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
8893 unsigned long iflag;
8894 uint32_t tmo_posted;
8895
8896 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
8897 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
8898 if (!tmo_posted)
8899 phba->pport->work_port_events |= WORKER_MBOX_TMO;
8900 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8901
8902 if (!tmo_posted)
8903 lpfc_worker_wake_up(phba);
8904 return;
8905 }
8906
8907
8908
8909
8910
8911
8912
8913
8914
8915 static bool
8916 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8917 {
8918
8919 uint32_t idx;
8920 struct lpfc_queue *mcq;
8921 struct lpfc_mcqe *mcqe;
8922 bool pending_completions = false;
8923 uint8_t qe_valid;
8924
8925 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8926 return false;
8927
8928
8929
8930 mcq = phba->sli4_hba.mbx_cq;
8931 idx = mcq->hba_index;
8932 qe_valid = mcq->qe_valid;
8933 while (bf_get_le32(lpfc_cqe_valid,
8934 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8935 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8936 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8937 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8938 pending_completions = true;
8939 break;
8940 }
8941 idx = (idx + 1) % mcq->entry_count;
8942 if (mcq->hba_index == idx)
8943 break;
8944
8945
8946 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8947 qe_valid = (qe_valid) ? 0 : 1;
8948 }
8949 return pending_completions;
8950
8951 }
8952
8953
8954
8955
8956
8957
8958
8959
8960
8961
8962
8963
8964 static bool
8965 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8966 {
8967 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8968 uint32_t eqidx;
8969 struct lpfc_queue *fpeq = NULL;
8970 struct lpfc_queue *eq;
8971 bool mbox_pending;
8972
8973 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8974 return false;
8975
8976
8977 if (sli4_hba->hdwq) {
8978 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8979 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8980 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8981 fpeq = eq;
8982 break;
8983 }
8984 }
8985 }
8986 if (!fpeq)
8987 return false;
8988
8989
8990
8991 sli4_hba->sli4_eq_clr_intr(fpeq);
8992
8993
8994
8995 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8996
8997
8998
8999
9000
9001
9002
9003
9004 if (mbox_pending)
9005
9006 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
9007 else
9008
9009 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
9010
9011 return mbox_pending;
9012
9013 }
9014
9015
9016
9017
9018
9019
9020
9021
9022
9023 void
9024 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
9025 {
9026 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
9027 MAILBOX_t *mb = NULL;
9028
9029 struct lpfc_sli *psli = &phba->sli;
9030
9031
9032 lpfc_sli4_process_missed_mbox_completions(phba);
9033
9034 if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
9035 return;
9036
9037 if (pmbox != NULL)
9038 mb = &pmbox->u.mb;
9039
9040
9041
9042
9043
9044 spin_lock_irq(&phba->hbalock);
9045 if (pmbox == NULL) {
9046 lpfc_printf_log(phba, KERN_WARNING,
9047 LOG_MBOX | LOG_SLI,
9048 "0353 Active Mailbox cleared - mailbox timeout "
9049 "exiting\n");
9050 spin_unlock_irq(&phba->hbalock);
9051 return;
9052 }
9053
9054
9055 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9056 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
9057 mb->mbxCommand,
9058 phba->pport->port_state,
9059 phba->sli.sli_flag,
9060 phba->sli.mbox_active);
9061 spin_unlock_irq(&phba->hbalock);
9062
9063
9064
9065
9066
9067 spin_lock_irq(&phba->pport->work_port_lock);
9068 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9069 spin_unlock_irq(&phba->pport->work_port_lock);
9070 spin_lock_irq(&phba->hbalock);
9071 phba->link_state = LPFC_LINK_UNKNOWN;
9072 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9073 spin_unlock_irq(&phba->hbalock);
9074
9075 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9076 "0345 Resetting board due to mailbox timeout\n");
9077
9078
9079 lpfc_reset_hba(phba);
9080 }
9081
9082
9083
9084
9085
9086
9087
9088
9089
9090
9091
9092
9093
9094
9095
9096
9097
9098
9099
9100
9101
9102
9103
9104
9105
9106
9107
9108 static int
9109 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
9110 uint32_t flag)
9111 {
9112 MAILBOX_t *mbx;
9113 struct lpfc_sli *psli = &phba->sli;
9114 uint32_t status, evtctr;
9115 uint32_t ha_copy, hc_copy;
9116 int i;
9117 unsigned long timeout;
9118 unsigned long drvr_flag = 0;
9119 uint32_t word0, ldata;
9120 void __iomem *to_slim;
9121 int processing_queue = 0;
9122
9123 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9124 if (!pmbox) {
9125 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9126
9127 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9128 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9129 return MBX_SUCCESS;
9130 }
9131 processing_queue = 1;
9132 pmbox = lpfc_mbox_get(phba);
9133 if (!pmbox) {
9134 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9135 return MBX_SUCCESS;
9136 }
9137 }
9138
9139 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
9140 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
9141 if(!pmbox->vport) {
9142 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9143 lpfc_printf_log(phba, KERN_ERR,
9144 LOG_MBOX | LOG_VPORT,
9145 "1806 Mbox x%x failed. No vport\n",
9146 pmbox->u.mb.mbxCommand);
9147 dump_stack();
9148 goto out_not_finished;
9149 }
9150 }
9151
9152
9153 if (unlikely(pci_channel_offline(phba->pcidev))) {
9154 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9155 goto out_not_finished;
9156 }
9157
9158
9159 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
9160 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9161 goto out_not_finished;
9162 }
9163
9164 psli = &phba->sli;
9165
9166 mbx = &pmbox->u.mb;
9167 status = MBX_SUCCESS;
9168
9169 if (phba->link_state == LPFC_HBA_ERROR) {
9170 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9171
9172
9173 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9174 "(%d):0311 Mailbox command x%x cannot "
9175 "issue Data: x%x x%x\n",
9176 pmbox->vport ? pmbox->vport->vpi : 0,
9177 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9178 goto out_not_finished;
9179 }
9180
9181 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9182 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
9183 !(hc_copy & HC_MBINT_ENA)) {
9184 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9185 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9186 "(%d):2528 Mailbox command x%x cannot "
9187 "issue Data: x%x x%x\n",
9188 pmbox->vport ? pmbox->vport->vpi : 0,
9189 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9190 goto out_not_finished;
9191 }
9192 }
9193
9194 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9195
9196
9197
9198
9199
9200 if (flag & MBX_POLL) {
9201 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9202
9203
9204 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9205 "(%d):2529 Mailbox command x%x "
9206 "cannot issue Data: x%x x%x\n",
9207 pmbox->vport ? pmbox->vport->vpi : 0,
9208 pmbox->u.mb.mbxCommand,
9209 psli->sli_flag, flag);
9210 goto out_not_finished;
9211 }
9212
9213 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
9214 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9215
9216 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9217 "(%d):2530 Mailbox command x%x "
9218 "cannot issue Data: x%x x%x\n",
9219 pmbox->vport ? pmbox->vport->vpi : 0,
9220 pmbox->u.mb.mbxCommand,
9221 psli->sli_flag, flag);
9222 goto out_not_finished;
9223 }
9224
9225
9226
9227
9228 lpfc_mbox_put(phba, pmbox);
9229
9230
9231 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9232 "(%d):0308 Mbox cmd issue - BUSY Data: "
9233 "x%x x%x x%x x%x\n",
9234 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
9235 mbx->mbxCommand,
9236 phba->pport ? phba->pport->port_state : 0xff,
9237 psli->sli_flag, flag);
9238
9239 psli->slistat.mbox_busy++;
9240 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9241
9242 if (pmbox->vport) {
9243 lpfc_debugfs_disc_trc(pmbox->vport,
9244 LPFC_DISC_TRC_MBOX_VPORT,
9245 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
9246 (uint32_t)mbx->mbxCommand,
9247 mbx->un.varWords[0], mbx->un.varWords[1]);
9248 }
9249 else {
9250 lpfc_debugfs_disc_trc(phba->pport,
9251 LPFC_DISC_TRC_MBOX,
9252 "MBOX Bsy: cmd:x%x mb:x%x x%x",
9253 (uint32_t)mbx->mbxCommand,
9254 mbx->un.varWords[0], mbx->un.varWords[1]);
9255 }
9256
9257 return MBX_BUSY;
9258 }
9259
9260 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9261
9262
9263 if (flag != MBX_POLL) {
9264 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
9265 (mbx->mbxCommand != MBX_KILL_BOARD)) {
9266 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9267 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9268
9269 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9270 "(%d):2531 Mailbox command x%x "
9271 "cannot issue Data: x%x x%x\n",
9272 pmbox->vport ? pmbox->vport->vpi : 0,
9273 pmbox->u.mb.mbxCommand,
9274 psli->sli_flag, flag);
9275 goto out_not_finished;
9276 }
9277
9278 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9279 1000);
9280 mod_timer(&psli->mbox_tmo, jiffies + timeout);
9281 }
9282
9283
9284 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9285 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
9286 "x%x\n",
9287 pmbox->vport ? pmbox->vport->vpi : 0,
9288 mbx->mbxCommand,
9289 phba->pport ? phba->pport->port_state : 0xff,
9290 psli->sli_flag, flag);
9291
9292 if (mbx->mbxCommand != MBX_HEARTBEAT) {
9293 if (pmbox->vport) {
9294 lpfc_debugfs_disc_trc(pmbox->vport,
9295 LPFC_DISC_TRC_MBOX_VPORT,
9296 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9297 (uint32_t)mbx->mbxCommand,
9298 mbx->un.varWords[0], mbx->un.varWords[1]);
9299 }
9300 else {
9301 lpfc_debugfs_disc_trc(phba->pport,
9302 LPFC_DISC_TRC_MBOX,
9303 "MBOX Send: cmd:x%x mb:x%x x%x",
9304 (uint32_t)mbx->mbxCommand,
9305 mbx->un.varWords[0], mbx->un.varWords[1]);
9306 }
9307 }
9308
9309 psli->slistat.mbox_cmd++;
9310 evtctr = psli->slistat.mbox_event;
9311
9312
9313 mbx->mbxOwner = OWN_CHIP;
9314
9315 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9316
9317 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
9318 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9319 = (uint8_t *)phba->mbox_ext
9320 - (uint8_t *)phba->mbox;
9321 }
9322
9323
9324 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
9325 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
9326 (uint8_t *)phba->mbox_ext,
9327 pmbox->in_ext_byte_len);
9328 }
9329
9330 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
9331 } else {
9332
9333 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
9334 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9335 = MAILBOX_HBA_EXT_OFFSET;
9336
9337
9338 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
9339 lpfc_memcpy_to_slim(phba->MBslimaddr +
9340 MAILBOX_HBA_EXT_OFFSET,
9341 pmbox->ctx_buf, pmbox->in_ext_byte_len);
9342
9343 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9344
9345 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
9346 MAILBOX_CMD_SIZE);
9347
9348
9349
9350 to_slim = phba->MBslimaddr + sizeof (uint32_t);
9351 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
9352 MAILBOX_CMD_SIZE - sizeof (uint32_t));
9353
9354
9355 ldata = *((uint32_t *)mbx);
9356 to_slim = phba->MBslimaddr;
9357 writel(ldata, to_slim);
9358 readl(to_slim);
9359
9360 if (mbx->mbxCommand == MBX_CONFIG_PORT)
9361
9362 psli->sli_flag |= LPFC_SLI_ACTIVE;
9363 }
9364
9365 wmb();
9366
9367 switch (flag) {
9368 case MBX_NOWAIT:
9369
9370 psli->mbox_active = pmbox;
9371
9372 writel(CA_MBATT, phba->CAregaddr);
9373 readl(phba->CAregaddr);
9374
9375 break;
9376
9377 case MBX_POLL:
9378
9379 psli->mbox_active = NULL;
9380
9381 writel(CA_MBATT, phba->CAregaddr);
9382 readl(phba->CAregaddr);
9383
9384 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9385
9386 word0 = *((uint32_t *)phba->mbox);
9387 word0 = le32_to_cpu(word0);
9388 } else {
9389
9390 if (lpfc_readl(phba->MBslimaddr, &word0)) {
9391 spin_unlock_irqrestore(&phba->hbalock,
9392 drvr_flag);
9393 goto out_not_finished;
9394 }
9395 }
9396
9397
9398 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9399 spin_unlock_irqrestore(&phba->hbalock,
9400 drvr_flag);
9401 goto out_not_finished;
9402 }
9403 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9404 1000) + jiffies;
9405 i = 0;
9406
9407 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
9408 (!(ha_copy & HA_MBATT) &&
9409 (phba->link_state > LPFC_WARM_START))) {
9410 if (time_after(jiffies, timeout)) {
9411 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9412 spin_unlock_irqrestore(&phba->hbalock,
9413 drvr_flag);
9414 goto out_not_finished;
9415 }
9416
9417
9418
9419 if (((word0 & OWN_CHIP) != OWN_CHIP)
9420 && (evtctr != psli->slistat.mbox_event))
9421 break;
9422
9423 if (i++ > 10) {
9424 spin_unlock_irqrestore(&phba->hbalock,
9425 drvr_flag);
9426 msleep(1);
9427 spin_lock_irqsave(&phba->hbalock, drvr_flag);
9428 }
9429
9430 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9431
9432 word0 = *((uint32_t *)phba->mbox);
9433 word0 = le32_to_cpu(word0);
9434 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
9435 MAILBOX_t *slimmb;
9436 uint32_t slimword0;
9437
9438 slimword0 = readl(phba->MBslimaddr);
9439 slimmb = (MAILBOX_t *) & slimword0;
9440 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
9441 && slimmb->mbxStatus) {
9442 psli->sli_flag &=
9443 ~LPFC_SLI_ACTIVE;
9444 word0 = slimword0;
9445 }
9446 }
9447 } else {
9448
9449 word0 = readl(phba->MBslimaddr);
9450 }
9451
9452 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9453 spin_unlock_irqrestore(&phba->hbalock,
9454 drvr_flag);
9455 goto out_not_finished;
9456 }
9457 }
9458
9459 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9460
9461 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
9462 MAILBOX_CMD_SIZE);
9463
9464 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9465 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
9466 pmbox->ctx_buf,
9467 pmbox->out_ext_byte_len);
9468 }
9469 } else {
9470
9471 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
9472 MAILBOX_CMD_SIZE);
9473
9474 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
9475 lpfc_memcpy_from_slim(
9476 pmbox->ctx_buf,
9477 phba->MBslimaddr +
9478 MAILBOX_HBA_EXT_OFFSET,
9479 pmbox->out_ext_byte_len);
9480 }
9481 }
9482
9483 writel(HA_MBATT, phba->HAregaddr);
9484 readl(phba->HAregaddr);
9485
9486 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9487 status = mbx->mbxStatus;
9488 }
9489
9490 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9491 return status;
9492
9493 out_not_finished:
9494 if (processing_queue) {
9495 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
9496 lpfc_mbox_cmpl_put(phba, pmbox);
9497 }
9498 return MBX_NOT_FINISHED;
9499 }
9500
9501
9502
9503
9504
9505
9506
9507
9508
9509
9510
9511
9512
9513 static int
9514 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
9515 {
9516 struct lpfc_sli *psli = &phba->sli;
9517 LPFC_MBOXQ_t *mboxq;
9518 int rc = 0;
9519 unsigned long timeout = 0;
9520 u32 sli_flag;
9521 u8 cmd, subsys, opcode;
9522
9523
9524 spin_lock_irq(&phba->hbalock);
9525 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9526
9527
9528
9529 if (phba->sli.mbox_active)
9530 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9531 phba->sli.mbox_active) *
9532 1000) + jiffies;
9533 spin_unlock_irq(&phba->hbalock);
9534
9535
9536 if (timeout)
9537 lpfc_sli4_process_missed_mbox_completions(phba);
9538
9539
9540 while (phba->sli.mbox_active) {
9541
9542 msleep(2);
9543 if (time_after(jiffies, timeout)) {
9544
9545
9546
9547
9548
9549
9550 spin_lock_irq(&phba->hbalock);
9551 if (phba->sli.mbox_active) {
9552 mboxq = phba->sli.mbox_active;
9553 cmd = mboxq->u.mb.mbxCommand;
9554 subsys = lpfc_sli_config_mbox_subsys_get(phba,
9555 mboxq);
9556 opcode = lpfc_sli_config_mbox_opcode_get(phba,
9557 mboxq);
9558 sli_flag = psli->sli_flag;
9559 spin_unlock_irq(&phba->hbalock);
9560 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9561 "2352 Mailbox command x%x "
9562 "(x%x/x%x) sli_flag x%x could "
9563 "not complete\n",
9564 cmd, subsys, opcode,
9565 sli_flag);
9566 } else {
9567 spin_unlock_irq(&phba->hbalock);
9568 }
9569
9570 rc = 1;
9571 break;
9572 }
9573 }
9574
9575
9576 if (rc) {
9577 spin_lock_irq(&phba->hbalock);
9578 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9579 spin_unlock_irq(&phba->hbalock);
9580 }
9581 return rc;
9582 }
9583
9584
9585
9586
9587
9588
9589
9590
9591
9592
9593
9594
9595 static void
9596 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
9597 {
9598 struct lpfc_sli *psli = &phba->sli;
9599
9600 spin_lock_irq(&phba->hbalock);
9601 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9602
9603 spin_unlock_irq(&phba->hbalock);
9604 return;
9605 }
9606
9607
9608
9609
9610
9611
9612 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9613 spin_unlock_irq(&phba->hbalock);
9614
9615
9616 lpfc_worker_wake_up(phba);
9617 }
9618
9619
9620
9621
9622
9623
9624
9625
9626
9627
9628
9629
9630 static int
9631 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9632 {
9633 uint32_t db_ready;
9634 unsigned long timeout;
9635 struct lpfc_register bmbx_reg;
9636
9637 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
9638 * 1000) + jiffies;
9639
9640 do {
9641 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
9642 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
9643 if (!db_ready)
9644 mdelay(2);
9645
9646 if (time_after(jiffies, timeout))
9647 return MBXERR_ERROR;
9648 } while (!db_ready);
9649
9650 return 0;
9651 }
9652
9653
9654
9655
9656
9657
9658
9659
9660
9661
9662
9663
9664
9665
9666
9667
9668
9669 static int
9670 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9671 {
9672 int rc = MBX_SUCCESS;
9673 unsigned long iflag;
9674 uint32_t mcqe_status;
9675 uint32_t mbx_cmnd;
9676 struct lpfc_sli *psli = &phba->sli;
9677 struct lpfc_mqe *mb = &mboxq->u.mqe;
9678 struct lpfc_bmbx_create *mbox_rgn;
9679 struct dma_address *dma_address;
9680
9681
9682
9683
9684
9685 spin_lock_irqsave(&phba->hbalock, iflag);
9686 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9687 spin_unlock_irqrestore(&phba->hbalock, iflag);
9688 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9689 "(%d):2532 Mailbox command x%x (x%x/x%x) "
9690 "cannot issue Data: x%x x%x\n",
9691 mboxq->vport ? mboxq->vport->vpi : 0,
9692 mboxq->u.mb.mbxCommand,
9693 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9694 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9695 psli->sli_flag, MBX_POLL);
9696 return MBXERR_ERROR;
9697 }
9698
9699 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9700 phba->sli.mbox_active = mboxq;
9701 spin_unlock_irqrestore(&phba->hbalock, iflag);
9702
9703
9704 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9705 if (rc)
9706 goto exit;
9707
9708
9709
9710
9711
9712 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
9713 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
9714 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
9715 sizeof(struct lpfc_mqe));
9716
9717
9718 dma_address = &phba->sli4_hba.bmbx.dma_address;
9719 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
9720
9721
9722 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9723 if (rc)
9724 goto exit;
9725
9726
9727 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
9728
9729
9730 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9731 if (rc)
9732 goto exit;
9733
9734
9735
9736
9737
9738
9739 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
9740 sizeof(struct lpfc_mqe));
9741 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
9742 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
9743 sizeof(struct lpfc_mcqe));
9744 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
9745
9746
9747
9748
9749
9750 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
9751 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9752 bf_set(lpfc_mqe_status, mb,
9753 (LPFC_MBX_ERROR_RANGE | mcqe_status));
9754 rc = MBXERR_ERROR;
9755 } else
9756 lpfc_sli4_swap_str(phba, mboxq);
9757
9758 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9759 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
9760 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
9761 " x%x x%x CQ: x%x x%x x%x x%x\n",
9762 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9763 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9764 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9765 bf_get(lpfc_mqe_status, mb),
9766 mb->un.mb_words[0], mb->un.mb_words[1],
9767 mb->un.mb_words[2], mb->un.mb_words[3],
9768 mb->un.mb_words[4], mb->un.mb_words[5],
9769 mb->un.mb_words[6], mb->un.mb_words[7],
9770 mb->un.mb_words[8], mb->un.mb_words[9],
9771 mb->un.mb_words[10], mb->un.mb_words[11],
9772 mb->un.mb_words[12], mboxq->mcqe.word0,
9773 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
9774 mboxq->mcqe.trailer);
9775 exit:
9776
9777 spin_lock_irqsave(&phba->hbalock, iflag);
9778 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9779 phba->sli.mbox_active = NULL;
9780 spin_unlock_irqrestore(&phba->hbalock, iflag);
9781 return rc;
9782 }
9783
9784
9785
9786
9787
9788
9789
9790
9791
9792
9793
9794
9795
9796 static int
9797 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
9798 uint32_t flag)
9799 {
9800 struct lpfc_sli *psli = &phba->sli;
9801 unsigned long iflags;
9802 int rc;
9803
9804
9805 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
9806
9807 rc = lpfc_mbox_dev_check(phba);
9808 if (unlikely(rc)) {
9809 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9810 "(%d):2544 Mailbox command x%x (x%x/x%x) "
9811 "cannot issue Data: x%x x%x\n",
9812 mboxq->vport ? mboxq->vport->vpi : 0,
9813 mboxq->u.mb.mbxCommand,
9814 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9815 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9816 psli->sli_flag, flag);
9817 goto out_not_finished;
9818 }
9819
9820
9821 if (!phba->sli4_hba.intr_enable) {
9822 if (flag == MBX_POLL)
9823 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9824 else
9825 rc = -EIO;
9826 if (rc != MBX_SUCCESS)
9827 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9828 "(%d):2541 Mailbox command x%x "
9829 "(x%x/x%x) failure: "
9830 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9831 "Data: x%x x%x\n",
9832 mboxq->vport ? mboxq->vport->vpi : 0,
9833 mboxq->u.mb.mbxCommand,
9834 lpfc_sli_config_mbox_subsys_get(phba,
9835 mboxq),
9836 lpfc_sli_config_mbox_opcode_get(phba,
9837 mboxq),
9838 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9839 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9840 bf_get(lpfc_mcqe_ext_status,
9841 &mboxq->mcqe),
9842 psli->sli_flag, flag);
9843 return rc;
9844 } else if (flag == MBX_POLL) {
9845 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9846 "(%d):2542 Try to issue mailbox command "
9847 "x%x (x%x/x%x) synchronously ahead of async "
9848 "mailbox command queue: x%x x%x\n",
9849 mboxq->vport ? mboxq->vport->vpi : 0,
9850 mboxq->u.mb.mbxCommand,
9851 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9852 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9853 psli->sli_flag, flag);
9854
9855 rc = lpfc_sli4_async_mbox_block(phba);
9856 if (!rc) {
9857
9858 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9859 if (rc != MBX_SUCCESS)
9860 lpfc_printf_log(phba, KERN_WARNING,
9861 LOG_MBOX | LOG_SLI,
9862 "(%d):2597 Sync Mailbox command "
9863 "x%x (x%x/x%x) failure: "
9864 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9865 "Data: x%x x%x\n",
9866 mboxq->vport ? mboxq->vport->vpi : 0,
9867 mboxq->u.mb.mbxCommand,
9868 lpfc_sli_config_mbox_subsys_get(phba,
9869 mboxq),
9870 lpfc_sli_config_mbox_opcode_get(phba,
9871 mboxq),
9872 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9873 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9874 bf_get(lpfc_mcqe_ext_status,
9875 &mboxq->mcqe),
9876 psli->sli_flag, flag);
9877
9878 lpfc_sli4_async_mbox_unblock(phba);
9879 }
9880 return rc;
9881 }
9882
9883
9884 rc = lpfc_mbox_cmd_check(phba, mboxq);
9885 if (rc) {
9886 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9887 "(%d):2543 Mailbox command x%x (x%x/x%x) "
9888 "cannot issue Data: x%x x%x\n",
9889 mboxq->vport ? mboxq->vport->vpi : 0,
9890 mboxq->u.mb.mbxCommand,
9891 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9892 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9893 psli->sli_flag, flag);
9894 goto out_not_finished;
9895 }
9896
9897
9898 psli->slistat.mbox_busy++;
9899 spin_lock_irqsave(&phba->hbalock, iflags);
9900 lpfc_mbox_put(phba, mboxq);
9901 spin_unlock_irqrestore(&phba->hbalock, iflags);
9902 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9903 "(%d):0354 Mbox cmd issue - Enqueue Data: "
9904 "x%x (x%x/x%x) x%x x%x x%x\n",
9905 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9906 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
9907 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9908 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9909 phba->pport->port_state,
9910 psli->sli_flag, MBX_NOWAIT);
9911
9912 lpfc_worker_wake_up(phba);
9913
9914 return MBX_BUSY;
9915
9916 out_not_finished:
9917 return MBX_NOT_FINISHED;
9918 }
9919
9920
9921
9922
9923
9924
9925
9926
9927
9928 int
9929 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9930 {
9931 struct lpfc_sli *psli = &phba->sli;
9932 LPFC_MBOXQ_t *mboxq;
9933 int rc = MBX_SUCCESS;
9934 unsigned long iflags;
9935 struct lpfc_mqe *mqe;
9936 uint32_t mbx_cmnd;
9937
9938
9939 if (unlikely(!phba->sli4_hba.intr_enable))
9940 return MBX_NOT_FINISHED;
9941
9942
9943 spin_lock_irqsave(&phba->hbalock, iflags);
9944 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9945 spin_unlock_irqrestore(&phba->hbalock, iflags);
9946 return MBX_NOT_FINISHED;
9947 }
9948 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9949 spin_unlock_irqrestore(&phba->hbalock, iflags);
9950 return MBX_NOT_FINISHED;
9951 }
9952 if (unlikely(phba->sli.mbox_active)) {
9953 spin_unlock_irqrestore(&phba->hbalock, iflags);
9954 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9955 "0384 There is pending active mailbox cmd\n");
9956 return MBX_NOT_FINISHED;
9957 }
9958
9959 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9960
9961
9962 mboxq = lpfc_mbox_get(phba);
9963
9964
9965 if (!mboxq) {
9966 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9967 spin_unlock_irqrestore(&phba->hbalock, iflags);
9968 return MBX_SUCCESS;
9969 }
9970 phba->sli.mbox_active = mboxq;
9971 spin_unlock_irqrestore(&phba->hbalock, iflags);
9972
9973
9974 rc = lpfc_mbox_dev_check(phba);
9975 if (unlikely(rc))
9976
9977 goto out_not_finished;
9978
9979
9980 mqe = &mboxq->u.mqe;
9981 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9982
9983
9984 mod_timer(&psli->mbox_tmo, (jiffies +
9985 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9986
9987 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9988 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9989 "x%x x%x\n",
9990 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9991 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9992 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9993 phba->pport->port_state, psli->sli_flag);
9994
9995 if (mbx_cmnd != MBX_HEARTBEAT) {
9996 if (mboxq->vport) {
9997 lpfc_debugfs_disc_trc(mboxq->vport,
9998 LPFC_DISC_TRC_MBOX_VPORT,
9999 "MBOX Send vport: cmd:x%x mb:x%x x%x",
10000 mbx_cmnd, mqe->un.mb_words[0],
10001 mqe->un.mb_words[1]);
10002 } else {
10003 lpfc_debugfs_disc_trc(phba->pport,
10004 LPFC_DISC_TRC_MBOX,
10005 "MBOX Send: cmd:x%x mb:x%x x%x",
10006 mbx_cmnd, mqe->un.mb_words[0],
10007 mqe->un.mb_words[1]);
10008 }
10009 }
10010 psli->slistat.mbox_cmd++;
10011
10012
10013 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
10014 if (rc != MBX_SUCCESS) {
10015 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10016 "(%d):2533 Mailbox command x%x (x%x/x%x) "
10017 "cannot issue Data: x%x x%x\n",
10018 mboxq->vport ? mboxq->vport->vpi : 0,
10019 mboxq->u.mb.mbxCommand,
10020 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10021 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10022 psli->sli_flag, MBX_NOWAIT);
10023 goto out_not_finished;
10024 }
10025
10026 return rc;
10027
10028 out_not_finished:
10029 spin_lock_irqsave(&phba->hbalock, iflags);
10030 if (phba->sli.mbox_active) {
10031 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
10032 __lpfc_mbox_cmpl_put(phba, mboxq);
10033
10034 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10035 phba->sli.mbox_active = NULL;
10036 }
10037 spin_unlock_irqrestore(&phba->hbalock, iflags);
10038
10039 return MBX_NOT_FINISHED;
10040 }
10041
10042
10043
10044
10045
10046
10047
10048
10049
10050
10051
10052
10053
10054 int
10055 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
10056 {
10057 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
10058 }
10059
10060
10061
10062
10063
10064
10065
10066
10067
10068
10069 int
10070 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10071 {
10072
10073 switch (dev_grp) {
10074 case LPFC_PCI_DEV_LP:
10075 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
10076 phba->lpfc_sli_handle_slow_ring_event =
10077 lpfc_sli_handle_slow_ring_event_s3;
10078 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
10079 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
10080 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
10081 break;
10082 case LPFC_PCI_DEV_OC:
10083 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
10084 phba->lpfc_sli_handle_slow_ring_event =
10085 lpfc_sli_handle_slow_ring_event_s4;
10086 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
10087 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
10088 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
10089 break;
10090 default:
10091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10092 "1420 Invalid HBA PCI-device group: 0x%x\n",
10093 dev_grp);
10094 return -ENODEV;
10095 }
10096 return 0;
10097 }
10098
10099
10100
10101
10102
10103
10104
10105
10106
10107
10108
10109
10110 void
10111 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10112 struct lpfc_iocbq *piocb)
10113 {
10114 if (phba->sli_rev == LPFC_SLI_REV4)
10115 lockdep_assert_held(&pring->ring_lock);
10116 else
10117 lockdep_assert_held(&phba->hbalock);
10118
10119 list_add_tail(&piocb->list, &pring->txq);
10120 }
10121
10122
10123
10124
10125
10126
10127
10128
10129
10130
10131
10132
10133
10134
10135
10136
10137
10138
10139 static struct lpfc_iocbq *
10140 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10141 struct lpfc_iocbq **piocb)
10142 {
10143 struct lpfc_iocbq * nextiocb;
10144
10145 lockdep_assert_held(&phba->hbalock);
10146
10147 nextiocb = lpfc_sli_ringtx_get(phba, pring);
10148 if (!nextiocb) {
10149 nextiocb = *piocb;
10150 *piocb = NULL;
10151 }
10152
10153 return nextiocb;
10154 }
10155
10156
10157
10158
10159
10160
10161
10162
10163
10164
10165
10166
10167
10168
10169
10170
10171
10172
10173
10174
10175
10176
10177
10178 static int
10179 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
10180 struct lpfc_iocbq *piocb, uint32_t flag)
10181 {
10182 struct lpfc_iocbq *nextiocb;
10183 IOCB_t *iocb;
10184 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
10185
10186 lockdep_assert_held(&phba->hbalock);
10187
10188 if (piocb->cmd_cmpl && (!piocb->vport) &&
10189 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
10190 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
10191 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10192 "1807 IOCB x%x failed. No vport\n",
10193 piocb->iocb.ulpCommand);
10194 dump_stack();
10195 return IOCB_ERROR;
10196 }
10197
10198
10199
10200 if (unlikely(pci_channel_offline(phba->pcidev)))
10201 return IOCB_ERROR;
10202
10203
10204 if (unlikely(phba->hba_flag & DEFER_ERATT))
10205 return IOCB_ERROR;
10206
10207
10208
10209
10210 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10211 return IOCB_ERROR;
10212
10213
10214
10215
10216
10217 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
10218 goto iocb_busy;
10219
10220 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
10221
10222
10223
10224
10225 switch (piocb->iocb.ulpCommand) {
10226 case CMD_QUE_RING_BUF_CN:
10227 case CMD_QUE_RING_BUF64_CN:
10228
10229
10230
10231
10232 if (piocb->cmd_cmpl)
10233 piocb->cmd_cmpl = NULL;
10234 fallthrough;
10235 case CMD_CREATE_XRI_CR:
10236 case CMD_CLOSE_XRI_CN:
10237 case CMD_CLOSE_XRI_CX:
10238 break;
10239 default:
10240 goto iocb_busy;
10241 }
10242
10243
10244
10245
10246
10247 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
10248 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
10249 goto iocb_busy;
10250 }
10251
10252 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
10253 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
10254 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
10255
10256 if (iocb)
10257 lpfc_sli_update_ring(phba, pring);
10258 else
10259 lpfc_sli_update_full_ring(phba, pring);
10260
10261 if (!piocb)
10262 return IOCB_SUCCESS;
10263
10264 goto out_busy;
10265
10266 iocb_busy:
10267 pring->stats.iocb_cmd_delay++;
10268
10269 out_busy:
10270
10271 if (!(flag & SLI_IOCB_RET_IOCB)) {
10272 __lpfc_sli_ringtx_put(phba, pring, piocb);
10273 return IOCB_SUCCESS;
10274 }
10275
10276 return IOCB_BUSY;
10277 }
10278
10279
10280
10281
10282
10283
10284
10285
10286
10287
10288
10289
10290
10291
10292
10293 static int
10294 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10295 struct lpfc_iocbq *piocb, uint32_t flag)
10296 {
10297 unsigned long iflags;
10298 int rc;
10299
10300 spin_lock_irqsave(&phba->hbalock, iflags);
10301 rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10302 spin_unlock_irqrestore(&phba->hbalock, iflags);
10303
10304 return rc;
10305 }
10306
10307
10308
10309
10310
10311
10312
10313
10314
10315
10316
10317
10318
10319
10320
10321 static int
10322 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10323 struct lpfc_iocbq *piocb, uint32_t flag)
10324 {
10325 int rc;
10326 struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
10327
10328 lpfc_prep_embed_io(phba, lpfc_cmd);
10329 rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10330 return rc;
10331 }
10332
10333 void
10334 lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
10335 {
10336 struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
10337 union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
10338 struct sli4_sge *sgl;
10339
10340
10341 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10342
10343 if (phba->fcp_embed_io) {
10344 struct fcp_cmnd *fcp_cmnd;
10345 u32 *ptr;
10346
10347 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10348
10349
10350 wqe->generic.bde.tus.f.bdeFlags =
10351 BUFF_TYPE_BDE_IMMED;
10352 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10353 wqe->generic.bde.addrHigh = 0;
10354 wqe->generic.bde.addrLow = 88;
10355
10356 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10357 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10358
10359
10360 ptr = &wqe->words[22];
10361 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10362 } else {
10363
10364 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
10365 wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
10366 wqe->generic.bde.addrHigh = sgl->addr_hi;
10367 wqe->generic.bde.addrLow = sgl->addr_lo;
10368
10369
10370 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10371 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10372 }
10373
10374
10375 if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
10376 if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
10377 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10378 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10379 (piocb->vmid_tag.cs_ctl_vmid));
10380 } else if (phba->cfg_vmid_app_header) {
10381 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
10382 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10383 wqe->words[31] = piocb->vmid_tag.app_id;
10384 }
10385 }
10386 }
10387
10388
10389
10390
10391
10392
10393
10394
10395
10396
10397
10398
10399
10400
10401
10402 static int
10403 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10404 struct lpfc_iocbq *piocb, uint32_t flag)
10405 {
10406 struct lpfc_sglq *sglq;
10407 union lpfc_wqe128 *wqe;
10408 struct lpfc_queue *wq;
10409 struct lpfc_sli_ring *pring;
10410 u32 ulp_command = get_job_cmnd(phba, piocb);
10411
10412
10413 if ((piocb->cmd_flag & LPFC_IO_FCP) ||
10414 (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
10415 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10416 } else {
10417 wq = phba->sli4_hba.els_wq;
10418 }
10419
10420
10421 pring = wq->pring;
10422
10423
10424
10425
10426
10427 lockdep_assert_held(&pring->ring_lock);
10428 wqe = &piocb->wqe;
10429 if (piocb->sli4_xritag == NO_XRI) {
10430 if (ulp_command == CMD_ABORT_XRI_CX)
10431 sglq = NULL;
10432 else {
10433 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10434 if (!sglq) {
10435 if (!(flag & SLI_IOCB_RET_IOCB)) {
10436 __lpfc_sli_ringtx_put(phba,
10437 pring,
10438 piocb);
10439 return IOCB_SUCCESS;
10440 } else {
10441 return IOCB_BUSY;
10442 }
10443 }
10444 }
10445 } else if (piocb->cmd_flag & LPFC_IO_FCP) {
10446
10447 sglq = NULL;
10448 }
10449 else {
10450
10451
10452
10453
10454 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10455 if (!sglq)
10456 return IOCB_ERROR;
10457 }
10458
10459 if (sglq) {
10460 piocb->sli4_lxritag = sglq->sli4_lxritag;
10461 piocb->sli4_xritag = sglq->sli4_xritag;
10462
10463
10464
10465
10466
10467 if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
10468 piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
10469 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10470 piocb->sli4_xritag);
10471
10472 bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
10473 piocb->sli4_xritag);
10474
10475 if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
10476 return IOCB_ERROR;
10477 }
10478
10479 if (lpfc_sli4_wq_put(wq, wqe))
10480 return IOCB_ERROR;
10481
10482 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10483
10484 return 0;
10485 }
10486
10487
10488
10489
10490
10491
10492
10493
10494
10495
10496
10497
10498
10499 int
10500 lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10501 struct lpfc_iocbq *piocb, uint32_t flag)
10502 {
10503 return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10504 }
10505
10506
10507
10508
10509
10510
10511
10512
10513
10514
10515
10516
10517 int
10518 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10519 struct lpfc_iocbq *piocb, uint32_t flag)
10520 {
10521 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10522 }
10523
10524 static void
10525 __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
10526 struct lpfc_vport *vport,
10527 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10528 u32 elscmd, u8 tmo, u8 expect_rsp)
10529 {
10530 struct lpfc_hba *phba = vport->phba;
10531 IOCB_t *cmd;
10532
10533 cmd = &cmdiocbq->iocb;
10534 memset(cmd, 0, sizeof(*cmd));
10535
10536 cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10537 cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10538 cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10539
10540 if (expect_rsp) {
10541 cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
10542 cmd->un.elsreq64.remoteID = did;
10543 cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
10544 cmd->ulpTimeout = tmo;
10545 } else {
10546 cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
10547 cmd->un.genreq64.xmit_els_remoteID = did;
10548 cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
10549 cmd->ulpPU = PARM_NPIV_DID;
10550 }
10551 cmd->ulpBdeCount = 1;
10552 cmd->ulpLe = 1;
10553 cmd->ulpClass = CLASS3;
10554
10555
10556 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
10557 if (expect_rsp) {
10558 cmd->un.elsreq64.myID = vport->fc_myDID;
10559
10560
10561 cmd->ulpContext = phba->vpi_ids[vport->vpi];
10562 }
10563
10564 cmd->ulpCt_h = 0;
10565
10566 if (elscmd == ELS_CMD_ECHO)
10567 cmd->ulpCt_l = 0;
10568 else
10569 cmd->ulpCt_l = 1;
10570 }
10571 }
10572
10573 static void
10574 __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
10575 struct lpfc_vport *vport,
10576 struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10577 u32 elscmd, u8 tmo, u8 expect_rsp)
10578 {
10579 struct lpfc_hba *phba = vport->phba;
10580 union lpfc_wqe128 *wqe;
10581 struct ulp_bde64_le *bde;
10582 u8 els_id;
10583
10584 wqe = &cmdiocbq->wqe;
10585 memset(wqe, 0, sizeof(*wqe));
10586
10587
10588 bde = (struct ulp_bde64_le *)&wqe->generic.bde;
10589 bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
10590 bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
10591 bde->type_size = cpu_to_le32(cmd_size);
10592 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10593
10594 if (expect_rsp) {
10595 bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
10596
10597
10598 wqe->els_req.payload_len = cmd_size;
10599 wqe->els_req.max_response_payload_len = FCELSSIZE;
10600
10601
10602 bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
10603
10604
10605 switch (elscmd) {
10606 case ELS_CMD_PLOGI:
10607 els_id = LPFC_ELS_ID_PLOGI;
10608 break;
10609 case ELS_CMD_FLOGI:
10610 els_id = LPFC_ELS_ID_FLOGI;
10611 break;
10612 case ELS_CMD_LOGO:
10613 els_id = LPFC_ELS_ID_LOGO;
10614 break;
10615 case ELS_CMD_FDISC:
10616 if (!vport->fc_myDID) {
10617 els_id = LPFC_ELS_ID_FDISC;
10618 break;
10619 }
10620 fallthrough;
10621 default:
10622 els_id = LPFC_ELS_ID_DEFAULT;
10623 break;
10624 }
10625
10626 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
10627 } else {
10628
10629 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
10630
10631
10632 wqe->xmit_els_rsp.response_payload_len = cmd_size;
10633
10634 bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
10635 CMD_XMIT_ELS_RSP64_WQE);
10636 }
10637
10638 bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
10639 bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
10640 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
10641
10642
10643
10644
10645
10646 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
10647 (vport->fc_flag & FC_PT2PT)) {
10648 if (expect_rsp) {
10649 bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
10650
10651
10652 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10653 phba->vpi_ids[vport->vpi]);
10654 }
10655
10656
10657 if (elscmd == ELS_CMD_ECHO)
10658 bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
10659 else
10660 bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
10661 }
10662 }
10663
10664 void
10665 lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10666 struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
10667 u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
10668 u8 expect_rsp)
10669 {
10670 phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
10671 elscmd, tmo, expect_rsp);
10672 }
10673
10674 static void
10675 __lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10676 u16 rpi, u32 num_entry, u8 tmo)
10677 {
10678 IOCB_t *cmd;
10679
10680 cmd = &cmdiocbq->iocb;
10681 memset(cmd, 0, sizeof(*cmd));
10682
10683 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10684 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10685 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10686 cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
10687
10688 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
10689 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
10690 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
10691
10692 cmd->ulpContext = rpi;
10693 cmd->ulpClass = CLASS3;
10694 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
10695 cmd->ulpBdeCount = 1;
10696 cmd->ulpLe = 1;
10697 cmd->ulpOwner = OWN_CHIP;
10698 cmd->ulpTimeout = tmo;
10699 }
10700
10701 static void
10702 __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10703 u16 rpi, u32 num_entry, u8 tmo)
10704 {
10705 union lpfc_wqe128 *cmdwqe;
10706 struct ulp_bde64_le *bde, *bpl;
10707 u32 xmit_len = 0, total_len = 0, size, type, i;
10708
10709 cmdwqe = &cmdiocbq->wqe;
10710 memset(cmdwqe, 0, sizeof(*cmdwqe));
10711
10712
10713 bpl = (struct ulp_bde64_le *)bmp->virt;
10714 for (i = 0; i < num_entry; i++) {
10715 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10716 total_len += size;
10717 }
10718 for (i = 0; i < num_entry; i++) {
10719 size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10720 type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
10721 if (type != ULP_BDE64_TYPE_BDE_64)
10722 break;
10723 xmit_len += size;
10724 }
10725
10726
10727 bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
10728 bde->addr_low = bpl->addr_low;
10729 bde->addr_high = bpl->addr_high;
10730 bde->type_size = cpu_to_le32(xmit_len);
10731 bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10732
10733
10734 cmdwqe->gen_req.request_payload_len = xmit_len;
10735
10736
10737 bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
10738 bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
10739 bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
10740 bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
10741
10742
10743 bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
10744
10745
10746 bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
10747 bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
10748 bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
10749 bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
10750
10751
10752 cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
10753 }
10754
10755 void
10756 lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10757 struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
10758 {
10759 phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
10760 }
10761
10762 static void
10763 __lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
10764 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
10765 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
10766 {
10767 IOCB_t *icmd;
10768
10769 icmd = &cmdiocbq->iocb;
10770 memset(icmd, 0, sizeof(*icmd));
10771
10772 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10773 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
10774 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10775 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
10776 icmd->un.xseq64.w5.hcsw.Fctl = LA;
10777 if (last_seq)
10778 icmd->un.xseq64.w5.hcsw.Fctl |= LS;
10779 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
10780 icmd->un.xseq64.w5.hcsw.Rctl = rctl;
10781 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
10782
10783 icmd->ulpBdeCount = 1;
10784 icmd->ulpLe = 1;
10785 icmd->ulpClass = CLASS3;
10786
10787 switch (cr_cx_cmd) {
10788 case CMD_XMIT_SEQUENCE64_CR:
10789 icmd->ulpContext = rpi;
10790 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
10791 break;
10792 case CMD_XMIT_SEQUENCE64_CX:
10793 icmd->ulpContext = ox_id;
10794 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
10795 break;
10796 default:
10797 break;
10798 }
10799 }
10800
10801 static void
10802 __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
10803 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
10804 u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
10805 {
10806 union lpfc_wqe128 *wqe;
10807 struct ulp_bde64 *bpl;
10808
10809 wqe = &cmdiocbq->wqe;
10810 memset(wqe, 0, sizeof(*wqe));
10811
10812
10813 bpl = (struct ulp_bde64 *)bmp->virt;
10814 wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
10815 wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
10816 wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
10817
10818
10819 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
10820 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
10821 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
10822 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
10823 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
10824
10825
10826 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
10827
10828 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
10829 CMD_XMIT_SEQUENCE64_WQE);
10830
10831
10832 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
10833
10834
10835 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
10836
10837
10838 if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
10839 wqe->xmit_sequence.xmit_len = full_size;
10840 else
10841 wqe->xmit_sequence.xmit_len =
10842 wqe->xmit_sequence.bde.tus.f.bdeSize;
10843 }
10844
10845 void
10846 lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10847 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
10848 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
10849 {
10850 phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
10851 rctl, last_seq, cr_cx_cmd);
10852 }
10853
10854 static void
10855 __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
10856 u16 iotag, u8 ulp_class, u16 cqid, bool ia,
10857 bool wqec)
10858 {
10859 IOCB_t *icmd = NULL;
10860
10861 icmd = &cmdiocbq->iocb;
10862 memset(icmd, 0, sizeof(*icmd));
10863
10864
10865 icmd->un.acxri.abortContextTag = ulp_context;
10866 icmd->un.acxri.abortIoTag = iotag;
10867
10868 if (ia) {
10869
10870 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
10871 } else {
10872
10873 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
10874
10875
10876 icmd->ulpClass = ulp_class;
10877 icmd->ulpCommand = CMD_ABORT_XRI_CN;
10878 }
10879
10880
10881 icmd->ulpLe = 1;
10882 }
10883
10884 static void
10885 __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
10886 u16 iotag, u8 ulp_class, u16 cqid, bool ia,
10887 bool wqec)
10888 {
10889 union lpfc_wqe128 *wqe;
10890
10891 wqe = &cmdiocbq->wqe;
10892 memset(wqe, 0, sizeof(*wqe));
10893
10894
10895 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
10896 if (ia)
10897 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
10898 else
10899 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
10900
10901
10902 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
10903
10904
10905 wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
10906
10907
10908 bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
10909
10910
10911 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
10912
10913
10914 if (wqec)
10915 bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
10916 bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
10917 bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
10918 }
10919
10920 void
10921 lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10922 u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
10923 bool ia, bool wqec)
10924 {
10925 phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
10926 cqid, ia, wqec);
10927 }
10928
10929
10930
10931
10932
10933
10934
10935
10936
10937
10938 int
10939 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10940 {
10941
10942 switch (dev_grp) {
10943 case LPFC_PCI_DEV_LP:
10944 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10945 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10946 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
10947 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
10948 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
10949 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
10950 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
10951 break;
10952 case LPFC_PCI_DEV_OC:
10953 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10954 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10955 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
10956 phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
10957 phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
10958 phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
10959 phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
10960 break;
10961 default:
10962 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10963 "1419 Invalid HBA PCI-device group: 0x%x\n",
10964 dev_grp);
10965 return -ENODEV;
10966 }
10967 return 0;
10968 }
10969
10970
10971
10972
10973
10974
10975
10976
10977
10978
10979
10980 struct lpfc_sli_ring *
10981 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10982 {
10983 struct lpfc_io_buf *lpfc_cmd;
10984
10985 if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10986 if (unlikely(!phba->sli4_hba.hdwq))
10987 return NULL;
10988
10989
10990
10991
10992 if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
10993 lpfc_cmd = piocb->io_buf;
10994 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10995 }
10996 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10997 } else {
10998 if (unlikely(!phba->sli4_hba.els_wq))
10999 return NULL;
11000 piocb->hba_wqidx = 0;
11001 return phba->sli4_hba.els_wq->pring;
11002 }
11003 }
11004
11005
11006
11007
11008
11009
11010
11011
11012
11013
11014
11015
11016
11017
11018 int
11019 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11020 struct lpfc_iocbq *piocb, uint32_t flag)
11021 {
11022 struct lpfc_sli_ring *pring;
11023 struct lpfc_queue *eq;
11024 unsigned long iflags;
11025 int rc;
11026
11027
11028 if (unlikely(pci_channel_offline(phba->pcidev)))
11029 return IOCB_ERROR;
11030
11031 if (phba->sli_rev == LPFC_SLI_REV4) {
11032 lpfc_sli_prep_wqe(phba, piocb);
11033
11034 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
11035
11036 pring = lpfc_sli4_calc_ring(phba, piocb);
11037 if (unlikely(pring == NULL))
11038 return IOCB_ERROR;
11039
11040 spin_lock_irqsave(&pring->ring_lock, iflags);
11041 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11042 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11043
11044 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
11045 } else {
11046
11047 spin_lock_irqsave(&phba->hbalock, iflags);
11048 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11049 spin_unlock_irqrestore(&phba->hbalock, iflags);
11050 }
11051 return rc;
11052 }
11053
11054
11055
11056
11057
11058
11059
11060
11061
11062
11063
11064
11065 static int
11066 lpfc_extra_ring_setup( struct lpfc_hba *phba)
11067 {
11068 struct lpfc_sli *psli;
11069 struct lpfc_sli_ring *pring;
11070
11071 psli = &phba->sli;
11072
11073
11074
11075
11076 pring = &psli->sli3_ring[LPFC_FCP_RING];
11077 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11078 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11079 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11080 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11081
11082
11083 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
11084
11085 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11086 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11087 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11088 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11089
11090
11091 pring->iotag_max = 4096;
11092 pring->num_mask = 1;
11093 pring->prt[0].profile = 0;
11094 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
11095 pring->prt[0].type = phba->cfg_multi_ring_type;
11096 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
11097 return 0;
11098 }
11099
11100 static void
11101 lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
11102 struct lpfc_nodelist *ndlp)
11103 {
11104 unsigned long iflags;
11105 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
11106
11107 spin_lock_irqsave(&phba->hbalock, iflags);
11108 if (!list_empty(&evtp->evt_listp)) {
11109 spin_unlock_irqrestore(&phba->hbalock, iflags);
11110 return;
11111 }
11112
11113
11114 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
11115 if (!evtp->evt_arg1) {
11116 spin_unlock_irqrestore(&phba->hbalock, iflags);
11117 return;
11118 }
11119 evtp->evt = LPFC_EVT_RECOVER_PORT;
11120 list_add_tail(&evtp->evt_listp, &phba->work_list);
11121 spin_unlock_irqrestore(&phba->hbalock, iflags);
11122
11123 lpfc_worker_wake_up(phba);
11124 }
11125
11126
11127
11128
11129
11130
11131
11132
11133
11134
11135
11136
11137
11138 static void
11139 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
11140 struct lpfc_iocbq *iocbq)
11141 {
11142 struct lpfc_nodelist *ndlp = NULL;
11143 uint16_t rpi = 0, vpi = 0;
11144 struct lpfc_vport *vport = NULL;
11145
11146
11147 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
11148 rpi = iocbq->iocb.ulpContext;
11149
11150 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11151 "3092 Port generated ABTS async event "
11152 "on vpi %d rpi %d status 0x%x\n",
11153 vpi, rpi, iocbq->iocb.ulpStatus);
11154
11155 vport = lpfc_find_vport_by_vpid(phba, vpi);
11156 if (!vport)
11157 goto err_exit;
11158 ndlp = lpfc_findnode_rpi(vport, rpi);
11159 if (!ndlp)
11160 goto err_exit;
11161
11162 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
11163 lpfc_sli_abts_recover_port(vport, ndlp);
11164 return;
11165
11166 err_exit:
11167 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11168 "3095 Event Context not found, no "
11169 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
11170 vpi, rpi, iocbq->iocb.ulpStatus,
11171 iocbq->iocb.ulpContext);
11172 }
11173
11174
11175
11176
11177
11178
11179
11180
11181
11182
11183
11184 void
11185 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
11186 struct lpfc_nodelist *ndlp,
11187 struct sli4_wcqe_xri_aborted *axri)
11188 {
11189 uint32_t ext_status = 0;
11190
11191 if (!ndlp) {
11192 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11193 "3115 Node Context not found, driver "
11194 "ignoring abts err event\n");
11195 return;
11196 }
11197
11198 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11199 "3116 Port generated FCP XRI ABORT event on "
11200 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
11201 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
11202 bf_get(lpfc_wcqe_xa_xri, axri),
11203 bf_get(lpfc_wcqe_xa_status, axri),
11204 axri->parameter);
11205
11206
11207
11208
11209
11210
11211 ext_status = axri->parameter & IOERR_PARAM_MASK;
11212 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
11213 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
11214 lpfc_sli_post_recovery_event(phba, ndlp);
11215 }
11216
11217
11218
11219
11220
11221
11222
11223
11224
11225
11226
11227
11228
11229
11230 static void
11231 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
11232 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
11233 {
11234 IOCB_t *icmd;
11235 uint16_t evt_code;
11236 struct temp_event temp_event_data;
11237 struct Scsi_Host *shost;
11238 uint32_t *iocb_w;
11239
11240 icmd = &iocbq->iocb;
11241 evt_code = icmd->un.asyncstat.evt_code;
11242
11243 switch (evt_code) {
11244 case ASYNC_TEMP_WARN:
11245 case ASYNC_TEMP_SAFE:
11246 temp_event_data.data = (uint32_t) icmd->ulpContext;
11247 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
11248 if (evt_code == ASYNC_TEMP_WARN) {
11249 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
11250 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11251 "0347 Adapter is very hot, please take "
11252 "corrective action. temperature : %d Celsius\n",
11253 (uint32_t) icmd->ulpContext);
11254 } else {
11255 temp_event_data.event_code = LPFC_NORMAL_TEMP;
11256 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11257 "0340 Adapter temperature is OK now. "
11258 "temperature : %d Celsius\n",
11259 (uint32_t) icmd->ulpContext);
11260 }
11261
11262
11263 shost = lpfc_shost_from_vport(phba->pport);
11264 fc_host_post_vendor_event(shost, fc_get_event_number(),
11265 sizeof(temp_event_data), (char *) &temp_event_data,
11266 LPFC_NL_VENDOR_ID);
11267 break;
11268 case ASYNC_STATUS_CN:
11269 lpfc_sli_abts_err_handler(phba, iocbq);
11270 break;
11271 default:
11272 iocb_w = (uint32_t *) icmd;
11273 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11274 "0346 Ring %d handler: unexpected ASYNC_STATUS"
11275 " evt_code 0x%x\n"
11276 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
11277 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
11278 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
11279 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
11280 pring->ringno, icmd->un.asyncstat.evt_code,
11281 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
11282 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
11283 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
11284 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
11285
11286 break;
11287 }
11288 }
11289
11290
11291
11292
11293
11294
11295
11296
11297
11298
11299
11300
11301
11302 int
11303 lpfc_sli4_setup(struct lpfc_hba *phba)
11304 {
11305 struct lpfc_sli_ring *pring;
11306
11307 pring = phba->sli4_hba.els_wq->pring;
11308 pring->num_mask = LPFC_MAX_RING_MASK;
11309 pring->prt[0].profile = 0;
11310 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11311 pring->prt[0].type = FC_TYPE_ELS;
11312 pring->prt[0].lpfc_sli_rcv_unsol_event =
11313 lpfc_els_unsol_event;
11314 pring->prt[1].profile = 0;
11315 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11316 pring->prt[1].type = FC_TYPE_ELS;
11317 pring->prt[1].lpfc_sli_rcv_unsol_event =
11318 lpfc_els_unsol_event;
11319 pring->prt[2].profile = 0;
11320
11321 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11322
11323 pring->prt[2].type = FC_TYPE_CT;
11324 pring->prt[2].lpfc_sli_rcv_unsol_event =
11325 lpfc_ct_unsol_event;
11326 pring->prt[3].profile = 0;
11327
11328 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11329
11330 pring->prt[3].type = FC_TYPE_CT;
11331 pring->prt[3].lpfc_sli_rcv_unsol_event =
11332 lpfc_ct_unsol_event;
11333 return 0;
11334 }
11335
11336
11337
11338
11339
11340
11341
11342
11343
11344
11345
11346
11347 int
11348 lpfc_sli_setup(struct lpfc_hba *phba)
11349 {
11350 int i, totiocbsize = 0;
11351 struct lpfc_sli *psli = &phba->sli;
11352 struct lpfc_sli_ring *pring;
11353
11354 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
11355 psli->sli_flag = 0;
11356
11357 psli->iocbq_lookup = NULL;
11358 psli->iocbq_lookup_len = 0;
11359 psli->last_iotag = 0;
11360
11361 for (i = 0; i < psli->num_rings; i++) {
11362 pring = &psli->sli3_ring[i];
11363 switch (i) {
11364 case LPFC_FCP_RING:
11365
11366 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
11367 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
11368 pring->sli.sli3.numCiocb +=
11369 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11370 pring->sli.sli3.numRiocb +=
11371 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11372 pring->sli.sli3.numCiocb +=
11373 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11374 pring->sli.sli3.numRiocb +=
11375 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11376 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11377 SLI3_IOCB_CMD_SIZE :
11378 SLI2_IOCB_CMD_SIZE;
11379 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11380 SLI3_IOCB_RSP_SIZE :
11381 SLI2_IOCB_RSP_SIZE;
11382 pring->iotag_ctr = 0;
11383 pring->iotag_max =
11384 (phba->cfg_hba_queue_depth * 2);
11385 pring->fast_iotag = pring->iotag_max;
11386 pring->num_mask = 0;
11387 break;
11388 case LPFC_EXTRA_RING:
11389
11390 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
11391 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
11392 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11393 SLI3_IOCB_CMD_SIZE :
11394 SLI2_IOCB_CMD_SIZE;
11395 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11396 SLI3_IOCB_RSP_SIZE :
11397 SLI2_IOCB_RSP_SIZE;
11398 pring->iotag_max = phba->cfg_hba_queue_depth;
11399 pring->num_mask = 0;
11400 break;
11401 case LPFC_ELS_RING:
11402
11403 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
11404 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
11405 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11406 SLI3_IOCB_CMD_SIZE :
11407 SLI2_IOCB_CMD_SIZE;
11408 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11409 SLI3_IOCB_RSP_SIZE :
11410 SLI2_IOCB_RSP_SIZE;
11411 pring->fast_iotag = 0;
11412 pring->iotag_ctr = 0;
11413 pring->iotag_max = 4096;
11414 pring->lpfc_sli_rcv_async_status =
11415 lpfc_sli_async_event_handler;
11416 pring->num_mask = LPFC_MAX_RING_MASK;
11417 pring->prt[0].profile = 0;
11418 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11419 pring->prt[0].type = FC_TYPE_ELS;
11420 pring->prt[0].lpfc_sli_rcv_unsol_event =
11421 lpfc_els_unsol_event;
11422 pring->prt[1].profile = 0;
11423 pring->prt[1].rctl = FC_RCTL_ELS_REP;
11424 pring->prt[1].type = FC_TYPE_ELS;
11425 pring->prt[1].lpfc_sli_rcv_unsol_event =
11426 lpfc_els_unsol_event;
11427 pring->prt[2].profile = 0;
11428
11429 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11430
11431 pring->prt[2].type = FC_TYPE_CT;
11432 pring->prt[2].lpfc_sli_rcv_unsol_event =
11433 lpfc_ct_unsol_event;
11434 pring->prt[3].profile = 0;
11435
11436 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11437
11438 pring->prt[3].type = FC_TYPE_CT;
11439 pring->prt[3].lpfc_sli_rcv_unsol_event =
11440 lpfc_ct_unsol_event;
11441 break;
11442 }
11443 totiocbsize += (pring->sli.sli3.numCiocb *
11444 pring->sli.sli3.sizeCiocb) +
11445 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
11446 }
11447 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
11448
11449 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
11450 "SLI2 SLIM Data: x%x x%lx\n",
11451 phba->brd_no, totiocbsize,
11452 (unsigned long) MAX_SLIM_IOCB_SIZE);
11453 }
11454 if (phba->cfg_multi_ring_support == 2)
11455 lpfc_extra_ring_setup(phba);
11456
11457 return 0;
11458 }
11459
11460
11461
11462
11463
11464
11465
11466
11467
11468
11469
11470
11471 void
11472 lpfc_sli4_queue_init(struct lpfc_hba *phba)
11473 {
11474 struct lpfc_sli *psli;
11475 struct lpfc_sli_ring *pring;
11476 int i;
11477
11478 psli = &phba->sli;
11479 spin_lock_irq(&phba->hbalock);
11480 INIT_LIST_HEAD(&psli->mboxq);
11481 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11482
11483 for (i = 0; i < phba->cfg_hdw_queue; i++) {
11484 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11485 pring->flag = 0;
11486 pring->ringno = LPFC_FCP_RING;
11487 pring->txcmplq_cnt = 0;
11488 INIT_LIST_HEAD(&pring->txq);
11489 INIT_LIST_HEAD(&pring->txcmplq);
11490 INIT_LIST_HEAD(&pring->iocb_continueq);
11491 spin_lock_init(&pring->ring_lock);
11492 }
11493 pring = phba->sli4_hba.els_wq->pring;
11494 pring->flag = 0;
11495 pring->ringno = LPFC_ELS_RING;
11496 pring->txcmplq_cnt = 0;
11497 INIT_LIST_HEAD(&pring->txq);
11498 INIT_LIST_HEAD(&pring->txcmplq);
11499 INIT_LIST_HEAD(&pring->iocb_continueq);
11500 spin_lock_init(&pring->ring_lock);
11501
11502 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11503 pring = phba->sli4_hba.nvmels_wq->pring;
11504 pring->flag = 0;
11505 pring->ringno = LPFC_ELS_RING;
11506 pring->txcmplq_cnt = 0;
11507 INIT_LIST_HEAD(&pring->txq);
11508 INIT_LIST_HEAD(&pring->txcmplq);
11509 INIT_LIST_HEAD(&pring->iocb_continueq);
11510 spin_lock_init(&pring->ring_lock);
11511 }
11512
11513 spin_unlock_irq(&phba->hbalock);
11514 }
11515
11516
11517
11518
11519
11520
11521
11522
11523
11524
11525
11526
11527 void
11528 lpfc_sli_queue_init(struct lpfc_hba *phba)
11529 {
11530 struct lpfc_sli *psli;
11531 struct lpfc_sli_ring *pring;
11532 int i;
11533
11534 psli = &phba->sli;
11535 spin_lock_irq(&phba->hbalock);
11536 INIT_LIST_HEAD(&psli->mboxq);
11537 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11538
11539 for (i = 0; i < psli->num_rings; i++) {
11540 pring = &psli->sli3_ring[i];
11541 pring->ringno = i;
11542 pring->sli.sli3.next_cmdidx = 0;
11543 pring->sli.sli3.local_getidx = 0;
11544 pring->sli.sli3.cmdidx = 0;
11545 INIT_LIST_HEAD(&pring->iocb_continueq);
11546 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11547 INIT_LIST_HEAD(&pring->postbufq);
11548 pring->flag = 0;
11549 INIT_LIST_HEAD(&pring->txq);
11550 INIT_LIST_HEAD(&pring->txcmplq);
11551 spin_lock_init(&pring->ring_lock);
11552 }
11553 spin_unlock_irq(&phba->hbalock);
11554 }
11555
11556
11557
11558
11559
11560
11561
11562
11563
11564
11565
11566
11567
11568
11569
11570
11571 static void
11572 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11573 {
11574 LIST_HEAD(completions);
11575 struct lpfc_sli *psli = &phba->sli;
11576 LPFC_MBOXQ_t *pmb;
11577 unsigned long iflag;
11578
11579
11580 local_bh_disable();
11581
11582
11583 spin_lock_irqsave(&phba->hbalock, iflag);
11584
11585
11586 list_splice_init(&phba->sli.mboxq, &completions);
11587
11588 if (psli->mbox_active) {
11589 list_add_tail(&psli->mbox_active->list, &completions);
11590 psli->mbox_active = NULL;
11591 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11592 }
11593
11594 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11595 spin_unlock_irqrestore(&phba->hbalock, iflag);
11596
11597
11598 local_bh_enable();
11599
11600
11601 while (!list_empty(&completions)) {
11602 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11603 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11604 if (pmb->mbox_cmpl)
11605 pmb->mbox_cmpl(phba, pmb);
11606 }
11607 }
11608
11609
11610
11611
11612
11613
11614
11615
11616
11617
11618
11619
11620
11621
11622
11623
11624
11625
11626 int
11627 lpfc_sli_host_down(struct lpfc_vport *vport)
11628 {
11629 LIST_HEAD(completions);
11630 struct lpfc_hba *phba = vport->phba;
11631 struct lpfc_sli *psli = &phba->sli;
11632 struct lpfc_queue *qp = NULL;
11633 struct lpfc_sli_ring *pring;
11634 struct lpfc_iocbq *iocb, *next_iocb;
11635 int i;
11636 unsigned long flags = 0;
11637 uint16_t prev_pring_flag;
11638
11639 lpfc_cleanup_discovery_resources(vport);
11640
11641 spin_lock_irqsave(&phba->hbalock, flags);
11642
11643
11644
11645
11646
11647
11648 if (phba->sli_rev != LPFC_SLI_REV4) {
11649 for (i = 0; i < psli->num_rings; i++) {
11650 pring = &psli->sli3_ring[i];
11651 prev_pring_flag = pring->flag;
11652
11653 if (pring->ringno == LPFC_ELS_RING) {
11654 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11655
11656 set_bit(LPFC_DATA_READY, &phba->data_flags);
11657 }
11658 list_for_each_entry_safe(iocb, next_iocb,
11659 &pring->txq, list) {
11660 if (iocb->vport != vport)
11661 continue;
11662 list_move_tail(&iocb->list, &completions);
11663 }
11664 list_for_each_entry_safe(iocb, next_iocb,
11665 &pring->txcmplq, list) {
11666 if (iocb->vport != vport)
11667 continue;
11668 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11669 NULL);
11670 }
11671 pring->flag = prev_pring_flag;
11672 }
11673 } else {
11674 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11675 pring = qp->pring;
11676 if (!pring)
11677 continue;
11678 if (pring == phba->sli4_hba.els_wq->pring) {
11679 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11680
11681 set_bit(LPFC_DATA_READY, &phba->data_flags);
11682 }
11683 prev_pring_flag = pring->flag;
11684 spin_lock(&pring->ring_lock);
11685 list_for_each_entry_safe(iocb, next_iocb,
11686 &pring->txq, list) {
11687 if (iocb->vport != vport)
11688 continue;
11689 list_move_tail(&iocb->list, &completions);
11690 }
11691 spin_unlock(&pring->ring_lock);
11692 list_for_each_entry_safe(iocb, next_iocb,
11693 &pring->txcmplq, list) {
11694 if (iocb->vport != vport)
11695 continue;
11696 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11697 NULL);
11698 }
11699 pring->flag = prev_pring_flag;
11700 }
11701 }
11702 spin_unlock_irqrestore(&phba->hbalock, flags);
11703
11704
11705 lpfc_issue_hb_tmo(phba);
11706
11707
11708 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11709 IOERR_SLI_DOWN);
11710 return 1;
11711 }
11712
11713
11714
11715
11716
11717
11718
11719
11720
11721
11722
11723
11724
11725
11726
11727
11728 int
11729 lpfc_sli_hba_down(struct lpfc_hba *phba)
11730 {
11731 LIST_HEAD(completions);
11732 struct lpfc_sli *psli = &phba->sli;
11733 struct lpfc_queue *qp = NULL;
11734 struct lpfc_sli_ring *pring;
11735 struct lpfc_dmabuf *buf_ptr;
11736 unsigned long flags = 0;
11737 int i;
11738
11739
11740 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
11741
11742 lpfc_hba_down_prep(phba);
11743
11744
11745 local_bh_disable();
11746
11747 lpfc_fabric_abort_hba(phba);
11748
11749 spin_lock_irqsave(&phba->hbalock, flags);
11750
11751
11752
11753
11754
11755 if (phba->sli_rev != LPFC_SLI_REV4) {
11756 for (i = 0; i < psli->num_rings; i++) {
11757 pring = &psli->sli3_ring[i];
11758
11759 if (pring->ringno == LPFC_ELS_RING) {
11760 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11761
11762 set_bit(LPFC_DATA_READY, &phba->data_flags);
11763 }
11764 list_splice_init(&pring->txq, &completions);
11765 }
11766 } else {
11767 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11768 pring = qp->pring;
11769 if (!pring)
11770 continue;
11771 spin_lock(&pring->ring_lock);
11772 list_splice_init(&pring->txq, &completions);
11773 spin_unlock(&pring->ring_lock);
11774 if (pring == phba->sli4_hba.els_wq->pring) {
11775 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11776
11777 set_bit(LPFC_DATA_READY, &phba->data_flags);
11778 }
11779 }
11780 }
11781 spin_unlock_irqrestore(&phba->hbalock, flags);
11782
11783
11784 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11785 IOERR_SLI_DOWN);
11786
11787 spin_lock_irqsave(&phba->hbalock, flags);
11788 list_splice_init(&phba->elsbuf, &completions);
11789 phba->elsbuf_cnt = 0;
11790 phba->elsbuf_prev_cnt = 0;
11791 spin_unlock_irqrestore(&phba->hbalock, flags);
11792
11793 while (!list_empty(&completions)) {
11794 list_remove_head(&completions, buf_ptr,
11795 struct lpfc_dmabuf, list);
11796 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
11797 kfree(buf_ptr);
11798 }
11799
11800
11801 local_bh_enable();
11802
11803
11804 del_timer_sync(&psli->mbox_tmo);
11805
11806 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
11807 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11808 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
11809
11810 return 1;
11811 }
11812
11813
11814
11815
11816
11817
11818
11819
11820
11821
11822
11823
11824
11825 void
11826 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
11827 {
11828 uint32_t *src = srcp;
11829 uint32_t *dest = destp;
11830 uint32_t ldata;
11831 int i;
11832
11833 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
11834 ldata = *src;
11835 ldata = le32_to_cpu(ldata);
11836 *dest = ldata;
11837 src++;
11838 dest++;
11839 }
11840 }
11841
11842
11843
11844
11845
11846
11847
11848
11849
11850
11851
11852
11853 void
11854 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
11855 {
11856 uint32_t *src = srcp;
11857 uint32_t *dest = destp;
11858 uint32_t ldata;
11859 int i;
11860
11861 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
11862 ldata = *src;
11863 ldata = be32_to_cpu(ldata);
11864 *dest = ldata;
11865 src++;
11866 dest++;
11867 }
11868 }
11869
11870
11871
11872
11873
11874
11875
11876
11877
11878
11879
11880 int
11881 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11882 struct lpfc_dmabuf *mp)
11883 {
11884
11885
11886 spin_lock_irq(&phba->hbalock);
11887 list_add_tail(&mp->list, &pring->postbufq);
11888 pring->postbufq_cnt++;
11889 spin_unlock_irq(&phba->hbalock);
11890 return 0;
11891 }
11892
11893
11894
11895
11896
11897
11898
11899
11900
11901
11902
11903
11904 uint32_t
11905 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
11906 {
11907 spin_lock_irq(&phba->hbalock);
11908 phba->buffer_tag_count++;
11909
11910
11911
11912
11913 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11914 spin_unlock_irq(&phba->hbalock);
11915 return phba->buffer_tag_count;
11916 }
11917
11918
11919
11920
11921
11922
11923
11924
11925
11926
11927
11928
11929
11930
11931
11932
11933 struct lpfc_dmabuf *
11934 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11935 uint32_t tag)
11936 {
11937 struct lpfc_dmabuf *mp, *next_mp;
11938 struct list_head *slp = &pring->postbufq;
11939
11940
11941 spin_lock_irq(&phba->hbalock);
11942 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11943 if (mp->buffer_tag == tag) {
11944 list_del_init(&mp->list);
11945 pring->postbufq_cnt--;
11946 spin_unlock_irq(&phba->hbalock);
11947 return mp;
11948 }
11949 }
11950
11951 spin_unlock_irq(&phba->hbalock);
11952 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11953 "0402 Cannot find virtual addr for buffer tag on "
11954 "ring %d Data x%lx x%px x%px x%x\n",
11955 pring->ringno, (unsigned long) tag,
11956 slp->next, slp->prev, pring->postbufq_cnt);
11957
11958 return NULL;
11959 }
11960
11961
11962
11963
11964
11965
11966
11967
11968
11969
11970
11971
11972
11973
11974
11975
11976
11977 struct lpfc_dmabuf *
11978 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11979 dma_addr_t phys)
11980 {
11981 struct lpfc_dmabuf *mp, *next_mp;
11982 struct list_head *slp = &pring->postbufq;
11983
11984
11985 spin_lock_irq(&phba->hbalock);
11986 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11987 if (mp->phys == phys) {
11988 list_del_init(&mp->list);
11989 pring->postbufq_cnt--;
11990 spin_unlock_irq(&phba->hbalock);
11991 return mp;
11992 }
11993 }
11994
11995 spin_unlock_irq(&phba->hbalock);
11996 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11997 "0410 Cannot find virtual addr for mapped buf on "
11998 "ring %d Data x%llx x%px x%px x%x\n",
11999 pring->ringno, (unsigned long long)phys,
12000 slp->next, slp->prev, pring->postbufq_cnt);
12001 return NULL;
12002 }
12003
12004
12005
12006
12007
12008
12009
12010
12011
12012
12013
12014
12015 static void
12016 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12017 struct lpfc_iocbq *rspiocb)
12018 {
12019 u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
12020 u32 ulp_word4 = get_job_word4(phba, rspiocb);
12021 u8 cmnd = get_job_cmnd(phba, cmdiocb);
12022
12023 if (ulp_status) {
12024
12025
12026
12027
12028 if (phba->sli_rev < LPFC_SLI_REV4) {
12029 if (cmnd == CMD_ABORT_XRI_CX &&
12030 ulp_status == IOSTAT_LOCAL_REJECT &&
12031 ulp_word4 == IOERR_ABORT_REQUESTED) {
12032 goto release_iocb;
12033 }
12034 }
12035
12036 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
12037 "0327 Cannot abort els iocb x%px "
12038 "with io cmd xri %x abort tag : x%x, "
12039 "abort status %x abort code %x\n",
12040 cmdiocb, get_job_abtsiotag(phba, cmdiocb),
12041 (phba->sli_rev == LPFC_SLI_REV4) ?
12042 get_wqe_reqtag(cmdiocb) :
12043 cmdiocb->iocb.un.acxri.abortContextTag,
12044 ulp_status, ulp_word4);
12045
12046 }
12047 release_iocb:
12048 lpfc_sli_release_iocbq(phba, cmdiocb);
12049 return;
12050 }
12051
12052
12053
12054
12055
12056
12057
12058
12059
12060
12061
12062
12063 void
12064 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12065 struct lpfc_iocbq *rspiocb)
12066 {
12067 struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
12068 IOCB_t *irsp;
12069 LPFC_MBOXQ_t *mbox;
12070 u32 ulp_command, ulp_status, ulp_word4, iotag;
12071
12072 ulp_command = get_job_cmnd(phba, cmdiocb);
12073 ulp_status = get_job_ulpstatus(phba, rspiocb);
12074 ulp_word4 = get_job_word4(phba, rspiocb);
12075
12076 if (phba->sli_rev == LPFC_SLI_REV4) {
12077 iotag = get_wqe_reqtag(cmdiocb);
12078 } else {
12079 irsp = &rspiocb->iocb;
12080 iotag = irsp->ulpIoTag;
12081
12082
12083
12084
12085
12086 if (cmdiocb->context_un.mbox) {
12087 mbox = cmdiocb->context_un.mbox;
12088 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
12089 cmdiocb->context_un.mbox = NULL;
12090 }
12091 }
12092
12093
12094 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
12095 "0139 Ignoring ELS cmd code x%x completion Data: "
12096 "x%x x%x x%x x%px\n",
12097 ulp_command, ulp_status, ulp_word4, iotag,
12098 cmdiocb->ndlp);
12099
12100
12101
12102
12103 if (ulp_command == CMD_GEN_REQUEST64_CR)
12104 lpfc_ct_free_iocb(phba, cmdiocb);
12105 else
12106 lpfc_els_free_iocb(phba, cmdiocb);
12107
12108 lpfc_nlp_put(ndlp);
12109 }
12110
12111
12112
12113
12114
12115
12116
12117
12118
12119
12120
12121
12122
12123
12124
12125
12126 int
12127 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12128 struct lpfc_iocbq *cmdiocb, void *cmpl)
12129 {
12130 struct lpfc_vport *vport = cmdiocb->vport;
12131 struct lpfc_iocbq *abtsiocbp;
12132 int retval = IOCB_ERROR;
12133 unsigned long iflags;
12134 struct lpfc_nodelist *ndlp = NULL;
12135 u32 ulp_command = get_job_cmnd(phba, cmdiocb);
12136 u16 ulp_context, iotag;
12137 bool ia;
12138
12139
12140
12141
12142
12143
12144 if (ulp_command == CMD_ABORT_XRI_WQE ||
12145 ulp_command == CMD_ABORT_XRI_CN ||
12146 ulp_command == CMD_CLOSE_XRI_CN ||
12147 cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
12148 return IOCB_ABORTING;
12149
12150 if (!pring) {
12151 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12152 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12153 else
12154 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12155 return retval;
12156 }
12157
12158
12159
12160
12161
12162 if ((vport->load_flag & FC_UNLOADING) &&
12163 pring->ringno == LPFC_ELS_RING) {
12164 if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12165 cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12166 else
12167 cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12168 return retval;
12169 }
12170
12171
12172 abtsiocbp = __lpfc_sli_get_iocbq(phba);
12173 if (abtsiocbp == NULL)
12174 return IOCB_NORESOURCE;
12175
12176
12177
12178
12179 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
12180
12181 if (phba->sli_rev == LPFC_SLI_REV4) {
12182 ulp_context = cmdiocb->sli4_xritag;
12183 iotag = abtsiocbp->iotag;
12184 } else {
12185 iotag = cmdiocb->iocb.ulpIoTag;
12186 if (pring->ringno == LPFC_ELS_RING) {
12187 ndlp = cmdiocb->ndlp;
12188 ulp_context = ndlp->nlp_rpi;
12189 } else {
12190 ulp_context = cmdiocb->iocb.ulpContext;
12191 }
12192 }
12193
12194 if (phba->link_state < LPFC_LINK_UP ||
12195 (phba->sli_rev == LPFC_SLI_REV4 &&
12196 phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
12197 (phba->link_flag & LS_EXTERNAL_LOOPBACK))
12198 ia = true;
12199 else
12200 ia = false;
12201
12202 lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
12203 cmdiocb->iocb.ulpClass,
12204 LPFC_WQE_CQ_ID_DEFAULT, ia, false);
12205
12206 abtsiocbp->vport = vport;
12207
12208
12209 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
12210 if (cmdiocb->cmd_flag & LPFC_IO_FCP)
12211 abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
12212
12213 if (cmdiocb->cmd_flag & LPFC_IO_FOF)
12214 abtsiocbp->cmd_flag |= LPFC_IO_FOF;
12215
12216 if (cmpl)
12217 abtsiocbp->cmd_cmpl = cmpl;
12218 else
12219 abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
12220 abtsiocbp->vport = vport;
12221
12222 if (phba->sli_rev == LPFC_SLI_REV4) {
12223 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
12224 if (unlikely(pring == NULL))
12225 goto abort_iotag_exit;
12226
12227 spin_lock_irqsave(&pring->ring_lock, iflags);
12228 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12229 abtsiocbp, 0);
12230 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12231 } else {
12232 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12233 abtsiocbp, 0);
12234 }
12235
12236 abort_iotag_exit:
12237
12238 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
12239 "0339 Abort IO XRI x%x, Original iotag x%x, "
12240 "abort tag x%x Cmdjob : x%px Abortjob : x%px "
12241 "retval x%x\n",
12242 ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
12243 cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
12244 retval);
12245 if (retval) {
12246 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
12247 __lpfc_sli_release_iocbq(phba, abtsiocbp);
12248 }
12249
12250
12251
12252
12253
12254
12255 return retval;
12256 }
12257
12258
12259
12260
12261
12262
12263
12264 void
12265 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
12266 {
12267 struct lpfc_sli *psli = &phba->sli;
12268 struct lpfc_sli_ring *pring;
12269 struct lpfc_queue *qp = NULL;
12270 int i;
12271
12272 if (phba->sli_rev != LPFC_SLI_REV4) {
12273 for (i = 0; i < psli->num_rings; i++) {
12274 pring = &psli->sli3_ring[i];
12275 lpfc_sli_abort_iocb_ring(phba, pring);
12276 }
12277 return;
12278 }
12279 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12280 pring = qp->pring;
12281 if (!pring)
12282 continue;
12283 lpfc_sli_abort_iocb_ring(phba, pring);
12284 }
12285 }
12286
12287
12288
12289
12290
12291
12292
12293
12294
12295
12296
12297
12298
12299
12300 static int
12301 lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
12302 struct lpfc_vport *vport)
12303 {
12304 u8 ulp_command;
12305
12306
12307 if (!iocbq || iocbq->vport != vport)
12308 return -ENODEV;
12309
12310
12311
12312
12313 ulp_command = get_job_cmnd(vport->phba, iocbq);
12314 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12315 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
12316 (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12317 (ulp_command == CMD_ABORT_XRI_CN ||
12318 ulp_command == CMD_CLOSE_XRI_CN ||
12319 ulp_command == CMD_ABORT_XRI_WQE))
12320 return -EINVAL;
12321
12322 return 0;
12323 }
12324
12325
12326
12327
12328
12329
12330
12331
12332
12333
12334
12335
12336
12337
12338
12339
12340
12341
12342
12343
12344
12345
12346
12347
12348
12349 static int
12350 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
12351 uint16_t tgt_id, uint64_t lun_id,
12352 lpfc_ctx_cmd ctx_cmd)
12353 {
12354 struct lpfc_io_buf *lpfc_cmd;
12355 int rc = 1;
12356
12357 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12358
12359 if (lpfc_cmd->pCmd == NULL)
12360 return rc;
12361
12362 switch (ctx_cmd) {
12363 case LPFC_CTX_LUN:
12364 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12365 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
12366 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
12367 rc = 0;
12368 break;
12369 case LPFC_CTX_TGT:
12370 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12371 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
12372 rc = 0;
12373 break;
12374 case LPFC_CTX_HOST:
12375 rc = 0;
12376 break;
12377 default:
12378 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
12379 __func__, ctx_cmd);
12380 break;
12381 }
12382
12383 return rc;
12384 }
12385
12386
12387
12388
12389
12390
12391
12392
12393
12394
12395
12396
12397
12398
12399
12400
12401
12402
12403
12404
12405 int
12406 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
12407 lpfc_ctx_cmd ctx_cmd)
12408 {
12409 struct lpfc_hba *phba = vport->phba;
12410 struct lpfc_iocbq *iocbq;
12411 int sum, i;
12412 unsigned long iflags;
12413 u8 ulp_command;
12414
12415 spin_lock_irqsave(&phba->hbalock, iflags);
12416 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
12417 iocbq = phba->sli.iocbq_lookup[i];
12418
12419 if (!iocbq || iocbq->vport != vport)
12420 continue;
12421 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12422 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
12423 continue;
12424
12425
12426 ulp_command = get_job_cmnd(phba, iocbq);
12427 if (ulp_command == CMD_ABORT_XRI_CN ||
12428 ulp_command == CMD_CLOSE_XRI_CN ||
12429 ulp_command == CMD_ABORT_XRI_WQE) {
12430 sum++;
12431 continue;
12432 }
12433
12434 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12435 ctx_cmd) == 0)
12436 sum++;
12437 }
12438 spin_unlock_irqrestore(&phba->hbalock, iflags);
12439
12440 return sum;
12441 }
12442
12443
12444
12445
12446
12447
12448
12449
12450
12451
12452
12453 void
12454 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12455 struct lpfc_iocbq *rspiocb)
12456 {
12457 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12458 "3096 ABORT_XRI_CX completing on rpi x%x "
12459 "original iotag x%x, abort cmd iotag x%x "
12460 "status 0x%x, reason 0x%x\n",
12461 (phba->sli_rev == LPFC_SLI_REV4) ?
12462 cmdiocb->sli4_xritag :
12463 cmdiocb->iocb.un.acxri.abortContextTag,
12464 get_job_abtsiotag(phba, cmdiocb),
12465 cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
12466 get_job_word4(phba, rspiocb));
12467 lpfc_sli_release_iocbq(phba, cmdiocb);
12468 return;
12469 }
12470
12471
12472
12473
12474
12475
12476
12477
12478
12479
12480
12481
12482
12483
12484
12485
12486
12487
12488
12489
12490
12491
12492
12493
12494
12495
12496
12497 int
12498 lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
12499 lpfc_ctx_cmd abort_cmd)
12500 {
12501 struct lpfc_hba *phba = vport->phba;
12502 struct lpfc_sli_ring *pring = NULL;
12503 struct lpfc_iocbq *iocbq;
12504 int errcnt = 0, ret_val = 0;
12505 unsigned long iflags;
12506 int i;
12507
12508
12509 if (phba->hba_flag & HBA_IOQ_FLUSH)
12510 return errcnt;
12511
12512 for (i = 1; i <= phba->sli.last_iotag; i++) {
12513 iocbq = phba->sli.iocbq_lookup[i];
12514
12515 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12516 continue;
12517
12518 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12519 abort_cmd) != 0)
12520 continue;
12521
12522 spin_lock_irqsave(&phba->hbalock, iflags);
12523 if (phba->sli_rev == LPFC_SLI_REV3) {
12524 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12525 } else if (phba->sli_rev == LPFC_SLI_REV4) {
12526 pring = lpfc_sli4_calc_ring(phba, iocbq);
12527 }
12528 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12529 lpfc_sli_abort_fcp_cmpl);
12530 spin_unlock_irqrestore(&phba->hbalock, iflags);
12531 if (ret_val != IOCB_SUCCESS)
12532 errcnt++;
12533 }
12534
12535 return errcnt;
12536 }
12537
12538
12539
12540
12541
12542
12543
12544
12545
12546
12547
12548
12549
12550
12551
12552
12553
12554
12555
12556
12557
12558
12559
12560
12561
12562
12563
12564 int
12565 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12566 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12567 {
12568 struct lpfc_hba *phba = vport->phba;
12569 struct lpfc_io_buf *lpfc_cmd;
12570 struct lpfc_iocbq *abtsiocbq;
12571 struct lpfc_nodelist *ndlp = NULL;
12572 struct lpfc_iocbq *iocbq;
12573 int sum, i, ret_val;
12574 unsigned long iflags;
12575 struct lpfc_sli_ring *pring_s4 = NULL;
12576 u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
12577 bool ia;
12578
12579 spin_lock_irqsave(&phba->hbalock, iflags);
12580
12581
12582 if (phba->hba_flag & HBA_IOQ_FLUSH) {
12583 spin_unlock_irqrestore(&phba->hbalock, iflags);
12584 return 0;
12585 }
12586 sum = 0;
12587
12588 for (i = 1; i <= phba->sli.last_iotag; i++) {
12589 iocbq = phba->sli.iocbq_lookup[i];
12590
12591 if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12592 continue;
12593
12594 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12595 cmd) != 0)
12596 continue;
12597
12598
12599 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12600 spin_lock(&lpfc_cmd->buf_lock);
12601
12602 if (!lpfc_cmd->pCmd) {
12603 spin_unlock(&lpfc_cmd->buf_lock);
12604 continue;
12605 }
12606
12607 if (phba->sli_rev == LPFC_SLI_REV4) {
12608 pring_s4 =
12609 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12610 if (!pring_s4) {
12611 spin_unlock(&lpfc_cmd->buf_lock);
12612 continue;
12613 }
12614
12615 spin_lock(&pring_s4->ring_lock);
12616 }
12617
12618
12619
12620
12621
12622 if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12623 !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
12624 if (phba->sli_rev == LPFC_SLI_REV4)
12625 spin_unlock(&pring_s4->ring_lock);
12626 spin_unlock(&lpfc_cmd->buf_lock);
12627 continue;
12628 }
12629
12630
12631 abtsiocbq = __lpfc_sli_get_iocbq(phba);
12632 if (!abtsiocbq) {
12633 if (phba->sli_rev == LPFC_SLI_REV4)
12634 spin_unlock(&pring_s4->ring_lock);
12635 spin_unlock(&lpfc_cmd->buf_lock);
12636 continue;
12637 }
12638
12639 if (phba->sli_rev == LPFC_SLI_REV4) {
12640 iotag = abtsiocbq->iotag;
12641 ulp_context = iocbq->sli4_xritag;
12642 cqid = lpfc_cmd->hdwq->io_cq_map;
12643 } else {
12644 iotag = iocbq->iocb.ulpIoTag;
12645 if (pring->ringno == LPFC_ELS_RING) {
12646 ndlp = iocbq->ndlp;
12647 ulp_context = ndlp->nlp_rpi;
12648 } else {
12649 ulp_context = iocbq->iocb.ulpContext;
12650 }
12651 }
12652
12653 ndlp = lpfc_cmd->rdata->pnode;
12654
12655 if (lpfc_is_link_up(phba) &&
12656 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
12657 !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
12658 ia = false;
12659 else
12660 ia = true;
12661
12662 lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
12663 iocbq->iocb.ulpClass, cqid,
12664 ia, false);
12665
12666 abtsiocbq->vport = vport;
12667
12668
12669 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12670 if (iocbq->cmd_flag & LPFC_IO_FCP)
12671 abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
12672 if (iocbq->cmd_flag & LPFC_IO_FOF)
12673 abtsiocbq->cmd_flag |= LPFC_IO_FOF;
12674
12675
12676 abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
12677
12678
12679
12680
12681
12682 iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
12683
12684 if (phba->sli_rev == LPFC_SLI_REV4) {
12685 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12686 abtsiocbq, 0);
12687 spin_unlock(&pring_s4->ring_lock);
12688 } else {
12689 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12690 abtsiocbq, 0);
12691 }
12692
12693 spin_unlock(&lpfc_cmd->buf_lock);
12694
12695 if (ret_val == IOCB_ERROR)
12696 __lpfc_sli_release_iocbq(phba, abtsiocbq);
12697 else
12698 sum++;
12699 }
12700 spin_unlock_irqrestore(&phba->hbalock, iflags);
12701 return sum;
12702 }
12703
12704
12705
12706
12707
12708
12709
12710
12711
12712
12713
12714
12715
12716
12717
12718
12719
12720
12721 static void
12722 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12723 struct lpfc_iocbq *cmdiocbq,
12724 struct lpfc_iocbq *rspiocbq)
12725 {
12726 wait_queue_head_t *pdone_q;
12727 unsigned long iflags;
12728 struct lpfc_io_buf *lpfc_cmd;
12729 size_t offset = offsetof(struct lpfc_iocbq, wqe);
12730
12731 spin_lock_irqsave(&phba->hbalock, iflags);
12732 if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
12733
12734
12735
12736
12737
12738
12739
12740 spin_unlock_irqrestore(&phba->hbalock, iflags);
12741 cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
12742 cmdiocbq->wait_cmd_cmpl = NULL;
12743 if (cmdiocbq->cmd_cmpl)
12744 cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
12745 else
12746 lpfc_sli_release_iocbq(phba, cmdiocbq);
12747 return;
12748 }
12749
12750
12751 cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
12752 if (cmdiocbq->rsp_iocb && rspiocbq)
12753 memcpy((char *)cmdiocbq->rsp_iocb + offset,
12754 (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
12755
12756
12757 if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
12758 !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
12759 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
12760 cur_iocbq);
12761 if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
12762 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
12763 else
12764 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
12765 }
12766
12767 pdone_q = cmdiocbq->context_un.wait_queue;
12768 if (pdone_q)
12769 wake_up(pdone_q);
12770 spin_unlock_irqrestore(&phba->hbalock, iflags);
12771 return;
12772 }
12773
12774
12775
12776
12777
12778
12779
12780
12781
12782
12783
12784
12785
12786 static int
12787 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
12788 struct lpfc_iocbq *piocbq, uint32_t flag)
12789 {
12790 unsigned long iflags;
12791 int ret;
12792
12793 spin_lock_irqsave(&phba->hbalock, iflags);
12794 ret = piocbq->cmd_flag & flag;
12795 spin_unlock_irqrestore(&phba->hbalock, iflags);
12796 return ret;
12797
12798 }
12799
12800
12801
12802
12803
12804
12805
12806
12807
12808
12809
12810
12811
12812
12813
12814
12815
12816
12817
12818
12819
12820
12821
12822
12823
12824
12825
12826
12827
12828
12829
12830
12831
12832
12833
12834
12835
12836 int
12837 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
12838 uint32_t ring_number,
12839 struct lpfc_iocbq *piocb,
12840 struct lpfc_iocbq *prspiocbq,
12841 uint32_t timeout)
12842 {
12843 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
12844 long timeleft, timeout_req = 0;
12845 int retval = IOCB_SUCCESS;
12846 uint32_t creg_val;
12847 struct lpfc_iocbq *iocb;
12848 int txq_cnt = 0;
12849 int txcmplq_cnt = 0;
12850 struct lpfc_sli_ring *pring;
12851 unsigned long iflags;
12852 bool iocb_completed = true;
12853
12854 if (phba->sli_rev >= LPFC_SLI_REV4) {
12855 lpfc_sli_prep_wqe(phba, piocb);
12856
12857 pring = lpfc_sli4_calc_ring(phba, piocb);
12858 } else
12859 pring = &phba->sli.sli3_ring[ring_number];
12860
12861
12862
12863
12864 if (prspiocbq) {
12865 if (piocb->rsp_iocb)
12866 return IOCB_ERROR;
12867 piocb->rsp_iocb = prspiocbq;
12868 }
12869
12870 piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
12871 piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
12872 piocb->context_un.wait_queue = &done_q;
12873 piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
12874
12875 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12876 if (lpfc_readl(phba->HCregaddr, &creg_val))
12877 return IOCB_ERROR;
12878 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12879 writel(creg_val, phba->HCregaddr);
12880 readl(phba->HCregaddr);
12881 }
12882
12883 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12884 SLI_IOCB_RET_IOCB);
12885 if (retval == IOCB_SUCCESS) {
12886 timeout_req = msecs_to_jiffies(timeout * 1000);
12887 timeleft = wait_event_timeout(done_q,
12888 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
12889 timeout_req);
12890 spin_lock_irqsave(&phba->hbalock, iflags);
12891 if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
12892
12893
12894
12895
12896
12897
12898 iocb_completed = false;
12899 piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
12900 }
12901 spin_unlock_irqrestore(&phba->hbalock, iflags);
12902 if (iocb_completed) {
12903 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12904 "0331 IOCB wake signaled\n");
12905
12906
12907
12908
12909
12910 } else if (timeleft == 0) {
12911 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12912 "0338 IOCB wait timeout error - no "
12913 "wake response Data x%x\n", timeout);
12914 retval = IOCB_TIMEDOUT;
12915 } else {
12916 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12917 "0330 IOCB wake NOT set, "
12918 "Data x%x x%lx\n",
12919 timeout, (timeleft / jiffies));
12920 retval = IOCB_TIMEDOUT;
12921 }
12922 } else if (retval == IOCB_BUSY) {
12923 if (phba->cfg_log_verbose & LOG_SLI) {
12924 list_for_each_entry(iocb, &pring->txq, list) {
12925 txq_cnt++;
12926 }
12927 list_for_each_entry(iocb, &pring->txcmplq, list) {
12928 txcmplq_cnt++;
12929 }
12930 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12931 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12932 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12933 }
12934 return retval;
12935 } else {
12936 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12937 "0332 IOCB wait issue failed, Data x%x\n",
12938 retval);
12939 retval = IOCB_ERROR;
12940 }
12941
12942 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12943 if (lpfc_readl(phba->HCregaddr, &creg_val))
12944 return IOCB_ERROR;
12945 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12946 writel(creg_val, phba->HCregaddr);
12947 readl(phba->HCregaddr);
12948 }
12949
12950 if (prspiocbq)
12951 piocb->rsp_iocb = NULL;
12952
12953 piocb->context_un.wait_queue = NULL;
12954 piocb->cmd_cmpl = NULL;
12955 return retval;
12956 }
12957
12958
12959
12960
12961
12962
12963
12964
12965
12966
12967
12968
12969
12970
12971
12972
12973
12974
12975
12976
12977
12978
12979
12980
12981
12982
12983
12984 int
12985 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12986 uint32_t timeout)
12987 {
12988 struct completion mbox_done;
12989 int retval;
12990 unsigned long flag;
12991
12992 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12993
12994 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12995
12996
12997 init_completion(&mbox_done);
12998 pmboxq->context3 = &mbox_done;
12999
13000 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
13001 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
13002 wait_for_completion_timeout(&mbox_done,
13003 msecs_to_jiffies(timeout * 1000));
13004
13005 spin_lock_irqsave(&phba->hbalock, flag);
13006 pmboxq->context3 = NULL;
13007
13008
13009
13010
13011 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
13012 retval = MBX_SUCCESS;
13013 } else {
13014 retval = MBX_TIMEOUT;
13015 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13016 }
13017 spin_unlock_irqrestore(&phba->hbalock, flag);
13018 }
13019 return retval;
13020 }
13021
13022
13023
13024
13025
13026
13027
13028
13029
13030
13031
13032
13033
13034
13035
13036
13037
13038 void
13039 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
13040 {
13041 struct lpfc_sli *psli = &phba->sli;
13042 unsigned long timeout;
13043
13044 if (mbx_action == LPFC_MBX_NO_WAIT) {
13045
13046 msleep(100);
13047 lpfc_sli_mbox_sys_flush(phba);
13048 return;
13049 }
13050 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
13051
13052
13053 local_bh_disable();
13054
13055 spin_lock_irq(&phba->hbalock);
13056 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13057
13058 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
13059
13060
13061
13062 if (phba->sli.mbox_active)
13063 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
13064 phba->sli.mbox_active) *
13065 1000) + jiffies;
13066 spin_unlock_irq(&phba->hbalock);
13067
13068
13069 local_bh_enable();
13070
13071 while (phba->sli.mbox_active) {
13072
13073 msleep(2);
13074 if (time_after(jiffies, timeout))
13075
13076
13077
13078 break;
13079 }
13080 } else {
13081 spin_unlock_irq(&phba->hbalock);
13082
13083
13084 local_bh_enable();
13085 }
13086
13087 lpfc_sli_mbox_sys_flush(phba);
13088 }
13089
13090
13091
13092
13093
13094
13095
13096
13097
13098
13099
13100
13101 static int
13102 lpfc_sli_eratt_read(struct lpfc_hba *phba)
13103 {
13104 uint32_t ha_copy;
13105
13106
13107 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13108 goto unplug_err;
13109
13110 if (ha_copy & HA_ERATT) {
13111
13112 if (lpfc_sli_read_hs(phba))
13113 goto unplug_err;
13114
13115
13116 if ((HS_FFER1 & phba->work_hs) &&
13117 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13118 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
13119 phba->hba_flag |= DEFER_ERATT;
13120
13121 writel(0, phba->HCregaddr);
13122 readl(phba->HCregaddr);
13123 }
13124
13125
13126 phba->work_ha |= HA_ERATT;
13127
13128 phba->hba_flag |= HBA_ERATT_HANDLED;
13129 return 1;
13130 }
13131 return 0;
13132
13133 unplug_err:
13134
13135 phba->work_hs |= UNPLUG_ERR;
13136
13137 phba->work_ha |= HA_ERATT;
13138
13139 phba->hba_flag |= HBA_ERATT_HANDLED;
13140 return 1;
13141 }
13142
13143
13144
13145
13146
13147
13148
13149
13150
13151
13152
13153
13154 static int
13155 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
13156 {
13157 uint32_t uerr_sta_hi, uerr_sta_lo;
13158 uint32_t if_type, portsmphr;
13159 struct lpfc_register portstat_reg;
13160 u32 logmask;
13161
13162
13163
13164
13165
13166 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
13167 switch (if_type) {
13168 case LPFC_SLI_INTF_IF_TYPE_0:
13169 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
13170 &uerr_sta_lo) ||
13171 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
13172 &uerr_sta_hi)) {
13173 phba->work_hs |= UNPLUG_ERR;
13174 phba->work_ha |= HA_ERATT;
13175 phba->hba_flag |= HBA_ERATT_HANDLED;
13176 return 1;
13177 }
13178 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
13179 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
13180 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13181 "1423 HBA Unrecoverable error: "
13182 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
13183 "ue_mask_lo_reg=0x%x, "
13184 "ue_mask_hi_reg=0x%x\n",
13185 uerr_sta_lo, uerr_sta_hi,
13186 phba->sli4_hba.ue_mask_lo,
13187 phba->sli4_hba.ue_mask_hi);
13188 phba->work_status[0] = uerr_sta_lo;
13189 phba->work_status[1] = uerr_sta_hi;
13190 phba->work_ha |= HA_ERATT;
13191 phba->hba_flag |= HBA_ERATT_HANDLED;
13192 return 1;
13193 }
13194 break;
13195 case LPFC_SLI_INTF_IF_TYPE_2:
13196 case LPFC_SLI_INTF_IF_TYPE_6:
13197 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
13198 &portstat_reg.word0) ||
13199 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
13200 &portsmphr)){
13201 phba->work_hs |= UNPLUG_ERR;
13202 phba->work_ha |= HA_ERATT;
13203 phba->hba_flag |= HBA_ERATT_HANDLED;
13204 return 1;
13205 }
13206 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
13207 phba->work_status[0] =
13208 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
13209 phba->work_status[1] =
13210 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
13211 logmask = LOG_TRACE_EVENT;
13212 if (phba->work_status[0] ==
13213 SLIPORT_ERR1_REG_ERR_CODE_2 &&
13214 phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
13215 logmask = LOG_SLI;
13216 lpfc_printf_log(phba, KERN_ERR, logmask,
13217 "2885 Port Status Event: "
13218 "port status reg 0x%x, "
13219 "port smphr reg 0x%x, "
13220 "error 1=0x%x, error 2=0x%x\n",
13221 portstat_reg.word0,
13222 portsmphr,
13223 phba->work_status[0],
13224 phba->work_status[1]);
13225 phba->work_ha |= HA_ERATT;
13226 phba->hba_flag |= HBA_ERATT_HANDLED;
13227 return 1;
13228 }
13229 break;
13230 case LPFC_SLI_INTF_IF_TYPE_1:
13231 default:
13232 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13233 "2886 HBA Error Attention on unsupported "
13234 "if type %d.", if_type);
13235 return 1;
13236 }
13237
13238 return 0;
13239 }
13240
13241
13242
13243
13244
13245
13246
13247
13248
13249
13250
13251 int
13252 lpfc_sli_check_eratt(struct lpfc_hba *phba)
13253 {
13254 uint32_t ha_copy;
13255
13256
13257
13258
13259 if (phba->link_flag & LS_IGNORE_ERATT)
13260 return 0;
13261
13262
13263 spin_lock_irq(&phba->hbalock);
13264 if (phba->hba_flag & HBA_ERATT_HANDLED) {
13265
13266 spin_unlock_irq(&phba->hbalock);
13267 return 0;
13268 }
13269
13270
13271
13272
13273
13274 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13275 spin_unlock_irq(&phba->hbalock);
13276 return 0;
13277 }
13278
13279
13280 if (unlikely(pci_channel_offline(phba->pcidev))) {
13281 spin_unlock_irq(&phba->hbalock);
13282 return 0;
13283 }
13284
13285 switch (phba->sli_rev) {
13286 case LPFC_SLI_REV2:
13287 case LPFC_SLI_REV3:
13288
13289 ha_copy = lpfc_sli_eratt_read(phba);
13290 break;
13291 case LPFC_SLI_REV4:
13292
13293 ha_copy = lpfc_sli4_eratt_read(phba);
13294 break;
13295 default:
13296 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13297 "0299 Invalid SLI revision (%d)\n",
13298 phba->sli_rev);
13299 ha_copy = 0;
13300 break;
13301 }
13302 spin_unlock_irq(&phba->hbalock);
13303
13304 return ha_copy;
13305 }
13306
13307
13308
13309
13310
13311
13312
13313
13314
13315
13316
13317 static inline int
13318 lpfc_intr_state_check(struct lpfc_hba *phba)
13319 {
13320
13321 if (unlikely(pci_channel_offline(phba->pcidev)))
13322 return -EIO;
13323
13324
13325 phba->sli.slistat.sli_intr++;
13326
13327
13328 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
13329 return -EIO;
13330
13331 return 0;
13332 }
13333
13334
13335
13336
13337
13338
13339
13340
13341
13342
13343
13344
13345
13346
13347
13348
13349
13350
13351
13352
13353
13354
13355 irqreturn_t
13356 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
13357 {
13358 struct lpfc_hba *phba;
13359 uint32_t ha_copy, hc_copy;
13360 uint32_t work_ha_copy;
13361 unsigned long status;
13362 unsigned long iflag;
13363 uint32_t control;
13364
13365 MAILBOX_t *mbox, *pmbox;
13366 struct lpfc_vport *vport;
13367 struct lpfc_nodelist *ndlp;
13368 struct lpfc_dmabuf *mp;
13369 LPFC_MBOXQ_t *pmb;
13370 int rc;
13371
13372
13373
13374
13375
13376 phba = (struct lpfc_hba *)dev_id;
13377
13378 if (unlikely(!phba))
13379 return IRQ_NONE;
13380
13381
13382
13383
13384
13385 if (phba->intr_type == MSIX) {
13386
13387 if (lpfc_intr_state_check(phba))
13388 return IRQ_NONE;
13389
13390 spin_lock_irqsave(&phba->hbalock, iflag);
13391 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13392 goto unplug_error;
13393
13394
13395
13396 if (phba->link_flag & LS_IGNORE_ERATT)
13397 ha_copy &= ~HA_ERATT;
13398
13399 if (ha_copy & HA_ERATT) {
13400 if (phba->hba_flag & HBA_ERATT_HANDLED)
13401
13402 ha_copy &= ~HA_ERATT;
13403 else
13404
13405 phba->hba_flag |= HBA_ERATT_HANDLED;
13406 }
13407
13408
13409
13410
13411
13412 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13413 spin_unlock_irqrestore(&phba->hbalock, iflag);
13414 return IRQ_NONE;
13415 }
13416
13417
13418 if (lpfc_readl(phba->HCregaddr, &hc_copy))
13419 goto unplug_error;
13420
13421 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
13422 HC_LAINT_ENA | HC_ERINT_ENA),
13423 phba->HCregaddr);
13424 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
13425 phba->HAregaddr);
13426 writel(hc_copy, phba->HCregaddr);
13427 readl(phba->HAregaddr);
13428 spin_unlock_irqrestore(&phba->hbalock, iflag);
13429 } else
13430 ha_copy = phba->ha_copy;
13431
13432 work_ha_copy = ha_copy & phba->work_ha_mask;
13433
13434 if (work_ha_copy) {
13435 if (work_ha_copy & HA_LATT) {
13436 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
13437
13438
13439
13440
13441 spin_lock_irqsave(&phba->hbalock, iflag);
13442 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
13443 if (lpfc_readl(phba->HCregaddr, &control))
13444 goto unplug_error;
13445 control &= ~HC_LAINT_ENA;
13446 writel(control, phba->HCregaddr);
13447 readl(phba->HCregaddr);
13448 spin_unlock_irqrestore(&phba->hbalock, iflag);
13449 }
13450 else
13451 work_ha_copy &= ~HA_LATT;
13452 }
13453
13454 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
13455
13456
13457
13458
13459 status = (work_ha_copy &
13460 (HA_RXMASK << (4*LPFC_ELS_RING)));
13461 status >>= (4*LPFC_ELS_RING);
13462 if (status & HA_RXMASK) {
13463 spin_lock_irqsave(&phba->hbalock, iflag);
13464 if (lpfc_readl(phba->HCregaddr, &control))
13465 goto unplug_error;
13466
13467 lpfc_debugfs_slow_ring_trc(phba,
13468 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
13469 control, status,
13470 (uint32_t)phba->sli.slistat.sli_intr);
13471
13472 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
13473 lpfc_debugfs_slow_ring_trc(phba,
13474 "ISR Disable ring:"
13475 "pwork:x%x hawork:x%x wait:x%x",
13476 phba->work_ha, work_ha_copy,
13477 (uint32_t)((unsigned long)
13478 &phba->work_waitq));
13479
13480 control &=
13481 ~(HC_R0INT_ENA << LPFC_ELS_RING);
13482 writel(control, phba->HCregaddr);
13483 readl(phba->HCregaddr);
13484 }
13485 else {
13486 lpfc_debugfs_slow_ring_trc(phba,
13487 "ISR slow ring: pwork:"
13488 "x%x hawork:x%x wait:x%x",
13489 phba->work_ha, work_ha_copy,
13490 (uint32_t)((unsigned long)
13491 &phba->work_waitq));
13492 }
13493 spin_unlock_irqrestore(&phba->hbalock, iflag);
13494 }
13495 }
13496 spin_lock_irqsave(&phba->hbalock, iflag);
13497 if (work_ha_copy & HA_ERATT) {
13498 if (lpfc_sli_read_hs(phba))
13499 goto unplug_error;
13500
13501
13502
13503
13504 if ((HS_FFER1 & phba->work_hs) &&
13505 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13506 HS_FFER6 | HS_FFER7 | HS_FFER8) &
13507 phba->work_hs)) {
13508 phba->hba_flag |= DEFER_ERATT;
13509
13510 writel(0, phba->HCregaddr);
13511 readl(phba->HCregaddr);
13512 }
13513 }
13514
13515 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
13516 pmb = phba->sli.mbox_active;
13517 pmbox = &pmb->u.mb;
13518 mbox = phba->mbox;
13519 vport = pmb->vport;
13520
13521
13522 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
13523 if (pmbox->mbxOwner != OWN_HOST) {
13524 spin_unlock_irqrestore(&phba->hbalock, iflag);
13525
13526
13527
13528
13529 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13530 "(%d):0304 Stray Mailbox "
13531 "Interrupt mbxCommand x%x "
13532 "mbxStatus x%x\n",
13533 (vport ? vport->vpi : 0),
13534 pmbox->mbxCommand,
13535 pmbox->mbxStatus);
13536
13537 work_ha_copy &= ~HA_MBATT;
13538 } else {
13539 phba->sli.mbox_active = NULL;
13540 spin_unlock_irqrestore(&phba->hbalock, iflag);
13541 phba->last_completion_time = jiffies;
13542 del_timer(&phba->sli.mbox_tmo);
13543 if (pmb->mbox_cmpl) {
13544 lpfc_sli_pcimem_bcopy(mbox, pmbox,
13545 MAILBOX_CMD_SIZE);
13546 if (pmb->out_ext_byte_len &&
13547 pmb->ctx_buf)
13548 lpfc_sli_pcimem_bcopy(
13549 phba->mbox_ext,
13550 pmb->ctx_buf,
13551 pmb->out_ext_byte_len);
13552 }
13553 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13554 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13555
13556 lpfc_debugfs_disc_trc(vport,
13557 LPFC_DISC_TRC_MBOX_VPORT,
13558 "MBOX dflt rpi: : "
13559 "status:x%x rpi:x%x",
13560 (uint32_t)pmbox->mbxStatus,
13561 pmbox->un.varWords[0], 0);
13562
13563 if (!pmbox->mbxStatus) {
13564 mp = (struct lpfc_dmabuf *)
13565 (pmb->ctx_buf);
13566 ndlp = (struct lpfc_nodelist *)
13567 pmb->ctx_ndlp;
13568
13569
13570
13571
13572
13573
13574 lpfc_unreg_login(phba,
13575 vport->vpi,
13576 pmbox->un.varWords[0],
13577 pmb);
13578 pmb->mbox_cmpl =
13579 lpfc_mbx_cmpl_dflt_rpi;
13580 pmb->ctx_buf = mp;
13581 pmb->ctx_ndlp = ndlp;
13582 pmb->vport = vport;
13583 rc = lpfc_sli_issue_mbox(phba,
13584 pmb,
13585 MBX_NOWAIT);
13586 if (rc != MBX_BUSY)
13587 lpfc_printf_log(phba,
13588 KERN_ERR,
13589 LOG_TRACE_EVENT,
13590 "0350 rc should have"
13591 "been MBX_BUSY\n");
13592 if (rc != MBX_NOT_FINISHED)
13593 goto send_current_mbox;
13594 }
13595 }
13596 spin_lock_irqsave(
13597 &phba->pport->work_port_lock,
13598 iflag);
13599 phba->pport->work_port_events &=
13600 ~WORKER_MBOX_TMO;
13601 spin_unlock_irqrestore(
13602 &phba->pport->work_port_lock,
13603 iflag);
13604
13605
13606
13607
13608 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13609
13610 phba->sli.mbox_active = NULL;
13611 phba->sli.sli_flag &=
13612 ~LPFC_SLI_MBOX_ACTIVE;
13613 if (pmb->mbox_cmpl)
13614 pmb->mbox_cmpl(phba, pmb);
13615 } else {
13616
13617 lpfc_mbox_cmpl_put(phba, pmb);
13618 }
13619 }
13620 } else
13621 spin_unlock_irqrestore(&phba->hbalock, iflag);
13622
13623 if ((work_ha_copy & HA_MBATT) &&
13624 (phba->sli.mbox_active == NULL)) {
13625 send_current_mbox:
13626
13627 do {
13628 rc = lpfc_sli_issue_mbox(phba, NULL,
13629 MBX_NOWAIT);
13630 } while (rc == MBX_NOT_FINISHED);
13631 if (rc != MBX_SUCCESS)
13632 lpfc_printf_log(phba, KERN_ERR,
13633 LOG_TRACE_EVENT,
13634 "0349 rc should be "
13635 "MBX_SUCCESS\n");
13636 }
13637
13638 spin_lock_irqsave(&phba->hbalock, iflag);
13639 phba->work_ha |= work_ha_copy;
13640 spin_unlock_irqrestore(&phba->hbalock, iflag);
13641 lpfc_worker_wake_up(phba);
13642 }
13643 return IRQ_HANDLED;
13644 unplug_error:
13645 spin_unlock_irqrestore(&phba->hbalock, iflag);
13646 return IRQ_HANDLED;
13647
13648 }
13649
13650
13651
13652
13653
13654
13655
13656
13657
13658
13659
13660
13661
13662
13663
13664
13665
13666
13667
13668
13669 irqreturn_t
13670 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13671 {
13672 struct lpfc_hba *phba;
13673 uint32_t ha_copy;
13674 unsigned long status;
13675 unsigned long iflag;
13676 struct lpfc_sli_ring *pring;
13677
13678
13679
13680
13681 phba = (struct lpfc_hba *) dev_id;
13682
13683 if (unlikely(!phba))
13684 return IRQ_NONE;
13685
13686
13687
13688
13689
13690 if (phba->intr_type == MSIX) {
13691
13692 if (lpfc_intr_state_check(phba))
13693 return IRQ_NONE;
13694
13695 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13696 return IRQ_HANDLED;
13697
13698 spin_lock_irqsave(&phba->hbalock, iflag);
13699
13700
13701
13702
13703 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13704 spin_unlock_irqrestore(&phba->hbalock, iflag);
13705 return IRQ_NONE;
13706 }
13707 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13708 phba->HAregaddr);
13709 readl(phba->HAregaddr);
13710 spin_unlock_irqrestore(&phba->hbalock, iflag);
13711 } else
13712 ha_copy = phba->ha_copy;
13713
13714
13715
13716
13717 ha_copy &= ~(phba->work_ha_mask);
13718
13719 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13720 status >>= (4*LPFC_FCP_RING);
13721 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13722 if (status & HA_RXMASK)
13723 lpfc_sli_handle_fast_ring_event(phba, pring, status);
13724
13725 if (phba->cfg_multi_ring_support == 2) {
13726
13727
13728
13729
13730 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13731 status >>= (4*LPFC_EXTRA_RING);
13732 if (status & HA_RXMASK) {
13733 lpfc_sli_handle_fast_ring_event(phba,
13734 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
13735 status);
13736 }
13737 }
13738 return IRQ_HANDLED;
13739 }
13740
13741
13742
13743
13744
13745
13746
13747
13748
13749
13750
13751
13752
13753
13754
13755
13756
13757
13758 irqreturn_t
13759 lpfc_sli_intr_handler(int irq, void *dev_id)
13760 {
13761 struct lpfc_hba *phba;
13762 irqreturn_t sp_irq_rc, fp_irq_rc;
13763 unsigned long status1, status2;
13764 uint32_t hc_copy;
13765
13766
13767
13768
13769
13770 phba = (struct lpfc_hba *) dev_id;
13771
13772 if (unlikely(!phba))
13773 return IRQ_NONE;
13774
13775
13776 if (lpfc_intr_state_check(phba))
13777 return IRQ_NONE;
13778
13779 spin_lock(&phba->hbalock);
13780 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
13781 spin_unlock(&phba->hbalock);
13782 return IRQ_HANDLED;
13783 }
13784
13785 if (unlikely(!phba->ha_copy)) {
13786 spin_unlock(&phba->hbalock);
13787 return IRQ_NONE;
13788 } else if (phba->ha_copy & HA_ERATT) {
13789 if (phba->hba_flag & HBA_ERATT_HANDLED)
13790
13791 phba->ha_copy &= ~HA_ERATT;
13792 else
13793
13794 phba->hba_flag |= HBA_ERATT_HANDLED;
13795 }
13796
13797
13798
13799
13800 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13801 spin_unlock(&phba->hbalock);
13802 return IRQ_NONE;
13803 }
13804
13805
13806 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
13807 spin_unlock(&phba->hbalock);
13808 return IRQ_HANDLED;
13809 }
13810 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
13811 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
13812 phba->HCregaddr);
13813 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
13814 writel(hc_copy, phba->HCregaddr);
13815 readl(phba->HAregaddr);
13816 spin_unlock(&phba->hbalock);
13817
13818
13819
13820
13821
13822
13823 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
13824
13825
13826 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
13827 status2 >>= (4*LPFC_ELS_RING);
13828
13829 if (status1 || (status2 & HA_RXMASK))
13830 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
13831 else
13832 sp_irq_rc = IRQ_NONE;
13833
13834
13835
13836
13837
13838
13839 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13840 status1 >>= (4*LPFC_FCP_RING);
13841
13842
13843 if (phba->cfg_multi_ring_support == 2) {
13844 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13845 status2 >>= (4*LPFC_EXTRA_RING);
13846 } else
13847 status2 = 0;
13848
13849 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
13850 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
13851 else
13852 fp_irq_rc = IRQ_NONE;
13853
13854
13855 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
13856 }
13857
13858
13859
13860
13861
13862
13863
13864
13865 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
13866 {
13867 struct lpfc_cq_event *cq_event;
13868 unsigned long iflags;
13869
13870
13871 spin_lock_irqsave(&phba->hbalock, iflags);
13872 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
13873 spin_unlock_irqrestore(&phba->hbalock, iflags);
13874
13875
13876 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13877 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
13878
13879 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
13880 cq_event, struct lpfc_cq_event, list);
13881 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13882 iflags);
13883
13884 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
13885
13886
13887 lpfc_sli4_cq_event_release(phba, cq_event);
13888 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13889 iflags);
13890 }
13891 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13892 }
13893
13894
13895
13896
13897
13898
13899
13900
13901
13902
13903
13904
13905 static struct lpfc_iocbq *
13906 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
13907 struct lpfc_iocbq *irspiocbq)
13908 {
13909 struct lpfc_sli_ring *pring;
13910 struct lpfc_iocbq *cmdiocbq;
13911 struct lpfc_wcqe_complete *wcqe;
13912 unsigned long iflags;
13913
13914 pring = lpfc_phba_elsring(phba);
13915 if (unlikely(!pring))
13916 return NULL;
13917
13918 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13919 spin_lock_irqsave(&pring->ring_lock, iflags);
13920 pring->stats.iocb_event++;
13921
13922 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13923 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13924 if (unlikely(!cmdiocbq)) {
13925 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13926 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13927 "0386 ELS complete with no corresponding "
13928 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13929 wcqe->word0, wcqe->total_data_placed,
13930 wcqe->parameter, wcqe->word3);
13931 lpfc_sli_release_iocbq(phba, irspiocbq);
13932 return NULL;
13933 }
13934
13935 memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
13936 memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
13937
13938
13939 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13940 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13941
13942 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13943 spin_lock_irqsave(&phba->hbalock, iflags);
13944 irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
13945 spin_unlock_irqrestore(&phba->hbalock, iflags);
13946 }
13947
13948 return irspiocbq;
13949 }
13950
13951 inline struct lpfc_cq_event *
13952 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13953 {
13954 struct lpfc_cq_event *cq_event;
13955
13956
13957 cq_event = lpfc_sli4_cq_event_alloc(phba);
13958 if (!cq_event) {
13959 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13960 "0602 Failed to alloc CQ_EVENT entry\n");
13961 return NULL;
13962 }
13963
13964
13965 memcpy(&cq_event->cqe, entry, size);
13966 return cq_event;
13967 }
13968
13969
13970
13971
13972
13973
13974
13975
13976
13977
13978
13979 static bool
13980 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13981 {
13982 struct lpfc_cq_event *cq_event;
13983 unsigned long iflags;
13984
13985 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13986 "0392 Async Event: word0:x%x, word1:x%x, "
13987 "word2:x%x, word3:x%x\n", mcqe->word0,
13988 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13989
13990 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13991 if (!cq_event)
13992 return false;
13993
13994 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
13995 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13996 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
13997
13998
13999 spin_lock_irqsave(&phba->hbalock, iflags);
14000 phba->hba_flag |= ASYNC_EVENT;
14001 spin_unlock_irqrestore(&phba->hbalock, iflags);
14002
14003 return true;
14004 }
14005
14006
14007
14008
14009
14010
14011
14012
14013
14014
14015
14016 static bool
14017 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14018 {
14019 uint32_t mcqe_status;
14020 MAILBOX_t *mbox, *pmbox;
14021 struct lpfc_mqe *mqe;
14022 struct lpfc_vport *vport;
14023 struct lpfc_nodelist *ndlp;
14024 struct lpfc_dmabuf *mp;
14025 unsigned long iflags;
14026 LPFC_MBOXQ_t *pmb;
14027 bool workposted = false;
14028 int rc;
14029
14030
14031 if (!bf_get(lpfc_trailer_completed, mcqe))
14032 goto out_no_mqe_complete;
14033
14034
14035 spin_lock_irqsave(&phba->hbalock, iflags);
14036 pmb = phba->sli.mbox_active;
14037 if (unlikely(!pmb)) {
14038 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14039 "1832 No pending MBOX command to handle\n");
14040 spin_unlock_irqrestore(&phba->hbalock, iflags);
14041 goto out_no_mqe_complete;
14042 }
14043 spin_unlock_irqrestore(&phba->hbalock, iflags);
14044 mqe = &pmb->u.mqe;
14045 pmbox = (MAILBOX_t *)&pmb->u.mqe;
14046 mbox = phba->mbox;
14047 vport = pmb->vport;
14048
14049
14050 phba->last_completion_time = jiffies;
14051 del_timer(&phba->sli.mbox_tmo);
14052
14053
14054 if (pmb->mbox_cmpl && mbox)
14055 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
14056
14057
14058
14059
14060
14061 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
14062 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
14063 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
14064 bf_set(lpfc_mqe_status, mqe,
14065 (LPFC_MBX_ERROR_RANGE | mcqe_status));
14066 }
14067 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
14068 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
14069 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
14070 "MBOX dflt rpi: status:x%x rpi:x%x",
14071 mcqe_status,
14072 pmbox->un.varWords[0], 0);
14073 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
14074 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
14075 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
14076
14077
14078
14079
14080
14081
14082 spin_lock_irqsave(&ndlp->lock, iflags);
14083 ndlp->nlp_flag |= NLP_UNREG_INP;
14084 spin_unlock_irqrestore(&ndlp->lock, iflags);
14085 lpfc_unreg_login(phba, vport->vpi,
14086 pmbox->un.varWords[0], pmb);
14087 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
14088 pmb->ctx_buf = mp;
14089
14090
14091
14092
14093
14094
14095 pmb->ctx_ndlp = ndlp;
14096 pmb->vport = vport;
14097 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
14098 if (rc != MBX_BUSY)
14099 lpfc_printf_log(phba, KERN_ERR,
14100 LOG_TRACE_EVENT,
14101 "0385 rc should "
14102 "have been MBX_BUSY\n");
14103 if (rc != MBX_NOT_FINISHED)
14104 goto send_current_mbox;
14105 }
14106 }
14107 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
14108 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
14109 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
14110
14111
14112 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
14113 spin_lock_irqsave(&phba->hbalock, iflags);
14114
14115 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14116 phba->sli.mbox_active = NULL;
14117 if (bf_get(lpfc_trailer_consumed, mcqe))
14118 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14119 spin_unlock_irqrestore(&phba->hbalock, iflags);
14120
14121
14122 lpfc_sli4_post_async_mbox(phba);
14123
14124
14125 if (pmb->mbox_cmpl)
14126 pmb->mbox_cmpl(phba, pmb);
14127 return false;
14128 }
14129
14130
14131 spin_lock_irqsave(&phba->hbalock, iflags);
14132 __lpfc_mbox_cmpl_put(phba, pmb);
14133 phba->work_ha |= HA_MBATT;
14134 spin_unlock_irqrestore(&phba->hbalock, iflags);
14135 workposted = true;
14136
14137 send_current_mbox:
14138 spin_lock_irqsave(&phba->hbalock, iflags);
14139
14140 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14141
14142 phba->sli.mbox_active = NULL;
14143 if (bf_get(lpfc_trailer_consumed, mcqe))
14144 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14145 spin_unlock_irqrestore(&phba->hbalock, iflags);
14146
14147 lpfc_worker_wake_up(phba);
14148 return workposted;
14149
14150 out_no_mqe_complete:
14151 spin_lock_irqsave(&phba->hbalock, iflags);
14152 if (bf_get(lpfc_trailer_consumed, mcqe))
14153 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14154 spin_unlock_irqrestore(&phba->hbalock, iflags);
14155 return false;
14156 }
14157
14158
14159
14160
14161
14162
14163
14164
14165
14166
14167
14168
14169
14170 static bool
14171 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14172 struct lpfc_cqe *cqe)
14173 {
14174 struct lpfc_mcqe mcqe;
14175 bool workposted;
14176
14177 cq->CQ_mbox++;
14178
14179
14180 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
14181
14182
14183 if (!bf_get(lpfc_trailer_async, &mcqe))
14184 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
14185 else
14186 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
14187 return workposted;
14188 }
14189
14190
14191
14192
14193
14194
14195
14196
14197
14198
14199
14200 static bool
14201 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14202 struct lpfc_wcqe_complete *wcqe)
14203 {
14204 struct lpfc_iocbq *irspiocbq;
14205 unsigned long iflags;
14206 struct lpfc_sli_ring *pring = cq->pring;
14207 int txq_cnt = 0;
14208 int txcmplq_cnt = 0;
14209
14210
14211 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14212
14213 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14214 "0357 ELS CQE error: status=x%x: "
14215 "CQE: %08x %08x %08x %08x\n",
14216 bf_get(lpfc_wcqe_c_status, wcqe),
14217 wcqe->word0, wcqe->total_data_placed,
14218 wcqe->parameter, wcqe->word3);
14219 }
14220
14221
14222 irspiocbq = lpfc_sli_get_iocbq(phba);
14223 if (!irspiocbq) {
14224 if (!list_empty(&pring->txq))
14225 txq_cnt++;
14226 if (!list_empty(&pring->txcmplq))
14227 txcmplq_cnt++;
14228 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14229 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
14230 "els_txcmplq_cnt=%d\n",
14231 txq_cnt, phba->iocb_cnt,
14232 txcmplq_cnt);
14233 return false;
14234 }
14235
14236
14237 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
14238 spin_lock_irqsave(&phba->hbalock, iflags);
14239 list_add_tail(&irspiocbq->cq_event.list,
14240 &phba->sli4_hba.sp_queue_event);
14241 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14242 spin_unlock_irqrestore(&phba->hbalock, iflags);
14243
14244 return true;
14245 }
14246
14247
14248
14249
14250
14251
14252
14253
14254
14255 static void
14256 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
14257 struct lpfc_wcqe_release *wcqe)
14258 {
14259
14260 if (unlikely(!phba->sli4_hba.els_wq))
14261 return;
14262
14263 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
14264 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
14265 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14266 else
14267 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14268 "2579 Slow-path wqe consume event carries "
14269 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
14270 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
14271 phba->sli4_hba.els_wq->queue_id);
14272 }
14273
14274
14275
14276
14277
14278
14279
14280
14281
14282
14283
14284 static bool
14285 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
14286 struct lpfc_queue *cq,
14287 struct sli4_wcqe_xri_aborted *wcqe)
14288 {
14289 bool workposted = false;
14290 struct lpfc_cq_event *cq_event;
14291 unsigned long iflags;
14292
14293 switch (cq->subtype) {
14294 case LPFC_IO:
14295 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
14296 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14297
14298 if (phba->nvmet_support)
14299 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
14300 }
14301 workposted = false;
14302 break;
14303 case LPFC_NVME_LS:
14304 case LPFC_ELS:
14305 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
14306 if (!cq_event) {
14307 workposted = false;
14308 break;
14309 }
14310 cq_event->hdwq = cq->hdwq;
14311 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14312 iflags);
14313 list_add_tail(&cq_event->list,
14314 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
14315
14316 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
14317 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14318 iflags);
14319 workposted = true;
14320 break;
14321 default:
14322 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14323 "0603 Invalid CQ subtype %d: "
14324 "%08x %08x %08x %08x\n",
14325 cq->subtype, wcqe->word0, wcqe->parameter,
14326 wcqe->word2, wcqe->word3);
14327 workposted = false;
14328 break;
14329 }
14330 return workposted;
14331 }
14332
14333 #define FC_RCTL_MDS_DIAGS 0xF4
14334
14335
14336
14337
14338
14339
14340
14341
14342
14343
14344 static bool
14345 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
14346 {
14347 bool workposted = false;
14348 struct fc_frame_header *fc_hdr;
14349 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
14350 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
14351 struct lpfc_nvmet_tgtport *tgtp;
14352 struct hbq_dmabuf *dma_buf;
14353 uint32_t status, rq_id;
14354 unsigned long iflags;
14355
14356
14357 if (unlikely(!hrq) || unlikely(!drq))
14358 return workposted;
14359
14360 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14361 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14362 else
14363 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14364 if (rq_id != hrq->queue_id)
14365 goto out;
14366
14367 status = bf_get(lpfc_rcqe_status, rcqe);
14368 switch (status) {
14369 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14370 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14371 "2537 Receive Frame Truncated!!\n");
14372 fallthrough;
14373 case FC_STATUS_RQ_SUCCESS:
14374 spin_lock_irqsave(&phba->hbalock, iflags);
14375 lpfc_sli4_rq_release(hrq, drq);
14376 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14377 if (!dma_buf) {
14378 hrq->RQ_no_buf_found++;
14379 spin_unlock_irqrestore(&phba->hbalock, iflags);
14380 goto out;
14381 }
14382 hrq->RQ_rcv_buf++;
14383 hrq->RQ_buf_posted--;
14384 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
14385
14386 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14387
14388 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
14389 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
14390 spin_unlock_irqrestore(&phba->hbalock, iflags);
14391
14392 if (!(phba->pport->load_flag & FC_UNLOADING))
14393 lpfc_sli4_handle_mds_loopback(phba->pport,
14394 dma_buf);
14395 else
14396 lpfc_in_buf_free(phba, &dma_buf->dbuf);
14397 break;
14398 }
14399
14400
14401 list_add_tail(&dma_buf->cq_event.list,
14402 &phba->sli4_hba.sp_queue_event);
14403
14404 phba->hba_flag |= HBA_SP_QUEUE_EVT;
14405 spin_unlock_irqrestore(&phba->hbalock, iflags);
14406 workposted = true;
14407 break;
14408 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14409 if (phba->nvmet_support) {
14410 tgtp = phba->targetport->private;
14411 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14412 "6402 RQE Error x%x, posted %d err_cnt "
14413 "%d: %x %x %x\n",
14414 status, hrq->RQ_buf_posted,
14415 hrq->RQ_no_posted_buf,
14416 atomic_read(&tgtp->rcv_fcp_cmd_in),
14417 atomic_read(&tgtp->rcv_fcp_cmd_out),
14418 atomic_read(&tgtp->xmt_fcp_release));
14419 }
14420 fallthrough;
14421
14422 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14423 hrq->RQ_no_posted_buf++;
14424
14425 spin_lock_irqsave(&phba->hbalock, iflags);
14426 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
14427 spin_unlock_irqrestore(&phba->hbalock, iflags);
14428 workposted = true;
14429 break;
14430 }
14431 out:
14432 return workposted;
14433 }
14434
14435
14436
14437
14438
14439
14440
14441
14442
14443
14444
14445
14446 static bool
14447 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14448 struct lpfc_cqe *cqe)
14449 {
14450 struct lpfc_cqe cqevt;
14451 bool workposted = false;
14452
14453
14454 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14455
14456
14457 switch (bf_get(lpfc_cqe_code, &cqevt)) {
14458 case CQE_CODE_COMPL_WQE:
14459
14460 phba->last_completion_time = jiffies;
14461 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14462 (struct lpfc_wcqe_complete *)&cqevt);
14463 break;
14464 case CQE_CODE_RELEASE_WQE:
14465
14466 lpfc_sli4_sp_handle_rel_wcqe(phba,
14467 (struct lpfc_wcqe_release *)&cqevt);
14468 break;
14469 case CQE_CODE_XRI_ABORTED:
14470
14471 phba->last_completion_time = jiffies;
14472 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14473 (struct sli4_wcqe_xri_aborted *)&cqevt);
14474 break;
14475 case CQE_CODE_RECEIVE:
14476 case CQE_CODE_RECEIVE_V1:
14477
14478 phba->last_completion_time = jiffies;
14479 workposted = lpfc_sli4_sp_handle_rcqe(phba,
14480 (struct lpfc_rcqe *)&cqevt);
14481 break;
14482 default:
14483 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14484 "0388 Not a valid WCQE code: x%x\n",
14485 bf_get(lpfc_cqe_code, &cqevt));
14486 break;
14487 }
14488 return workposted;
14489 }
14490
14491
14492
14493
14494
14495
14496
14497
14498
14499
14500
14501
14502
14503
14504
14505 static void
14506 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14507 struct lpfc_queue *speq)
14508 {
14509 struct lpfc_queue *cq = NULL, *childq;
14510 uint16_t cqid;
14511 int ret = 0;
14512
14513
14514 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14515
14516 list_for_each_entry(childq, &speq->child_list, list) {
14517 if (childq->queue_id == cqid) {
14518 cq = childq;
14519 break;
14520 }
14521 }
14522 if (unlikely(!cq)) {
14523 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14524 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14525 "0365 Slow-path CQ identifier "
14526 "(%d) does not exist\n", cqid);
14527 return;
14528 }
14529
14530
14531 cq->assoc_qp = speq;
14532
14533 if (is_kdump_kernel())
14534 ret = queue_work(phba->wq, &cq->spwork);
14535 else
14536 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14537
14538 if (!ret)
14539 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14540 "0390 Cannot schedule queue work "
14541 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14542 cqid, cq->queue_id, raw_smp_processor_id());
14543 }
14544
14545
14546
14547
14548
14549
14550
14551
14552
14553
14554
14555
14556
14557
14558
14559
14560
14561
14562
14563
14564
14565
14566 static bool
14567 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14568 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14569 struct lpfc_cqe *), unsigned long *delay,
14570 enum lpfc_poll_mode poll_mode)
14571 {
14572 struct lpfc_cqe *cqe;
14573 bool workposted = false;
14574 int count = 0, consumed = 0;
14575 bool arm = true;
14576
14577
14578 *delay = 0;
14579
14580 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14581 goto rearm_and_exit;
14582
14583
14584 cq->q_flag = 0;
14585 cqe = lpfc_sli4_cq_get(cq);
14586 while (cqe) {
14587 workposted |= handler(phba, cq, cqe);
14588 __lpfc_sli4_consume_cqe(phba, cq, cqe);
14589
14590 consumed++;
14591 if (!(++count % cq->max_proc_limit))
14592 break;
14593
14594 if (!(count % cq->notify_interval)) {
14595 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14596 LPFC_QUEUE_NOARM);
14597 consumed = 0;
14598 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14599 }
14600
14601 if (count == LPFC_NVMET_CQ_NOTIFY)
14602 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14603
14604 cqe = lpfc_sli4_cq_get(cq);
14605 }
14606 if (count >= phba->cfg_cq_poll_threshold) {
14607 *delay = 1;
14608 arm = false;
14609 }
14610
14611
14612 if (poll_mode == LPFC_IRQ_POLL)
14613 irq_poll_complete(&cq->iop);
14614
14615
14616 if (count > cq->CQ_max_cqe)
14617 cq->CQ_max_cqe = count;
14618
14619 cq->assoc_qp->EQ_cqe_cnt += count;
14620
14621
14622 if (unlikely(count == 0))
14623 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14624 "0369 No entry from completion queue "
14625 "qid=%d\n", cq->queue_id);
14626
14627 xchg(&cq->queue_claimed, 0);
14628
14629 rearm_and_exit:
14630 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14631 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14632
14633 return workposted;
14634 }
14635
14636
14637
14638
14639
14640
14641
14642
14643
14644
14645
14646
14647
14648
14649
14650
14651 static void
14652 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14653 {
14654 struct lpfc_hba *phba = cq->phba;
14655 unsigned long delay;
14656 bool workposted = false;
14657 int ret = 0;
14658
14659
14660 switch (cq->type) {
14661 case LPFC_MCQ:
14662 workposted |= __lpfc_sli4_process_cq(phba, cq,
14663 lpfc_sli4_sp_handle_mcqe,
14664 &delay, LPFC_QUEUE_WORK);
14665 break;
14666 case LPFC_WCQ:
14667 if (cq->subtype == LPFC_IO)
14668 workposted |= __lpfc_sli4_process_cq(phba, cq,
14669 lpfc_sli4_fp_handle_cqe,
14670 &delay, LPFC_QUEUE_WORK);
14671 else
14672 workposted |= __lpfc_sli4_process_cq(phba, cq,
14673 lpfc_sli4_sp_handle_cqe,
14674 &delay, LPFC_QUEUE_WORK);
14675 break;
14676 default:
14677 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14678 "0370 Invalid completion queue type (%d)\n",
14679 cq->type);
14680 return;
14681 }
14682
14683 if (delay) {
14684 if (is_kdump_kernel())
14685 ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14686 delay);
14687 else
14688 ret = queue_delayed_work_on(cq->chann, phba->wq,
14689 &cq->sched_spwork, delay);
14690 if (!ret)
14691 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14692 "0394 Cannot schedule queue work "
14693 "for cqid=%d on CPU %d\n",
14694 cq->queue_id, cq->chann);
14695 }
14696
14697
14698 if (workposted)
14699 lpfc_worker_wake_up(phba);
14700 }
14701
14702
14703
14704
14705
14706
14707
14708
14709 static void
14710 lpfc_sli4_sp_process_cq(struct work_struct *work)
14711 {
14712 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14713
14714 __lpfc_sli4_sp_process_cq(cq);
14715 }
14716
14717
14718
14719
14720
14721
14722
14723 static void
14724 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14725 {
14726 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14727 struct lpfc_queue, sched_spwork);
14728
14729 __lpfc_sli4_sp_process_cq(cq);
14730 }
14731
14732
14733
14734
14735
14736
14737
14738
14739
14740
14741 static void
14742 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14743 struct lpfc_wcqe_complete *wcqe)
14744 {
14745 struct lpfc_sli_ring *pring = cq->pring;
14746 struct lpfc_iocbq *cmdiocbq;
14747 unsigned long iflags;
14748
14749
14750 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14751
14752
14753
14754 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
14755 IOSTAT_LOCAL_REJECT)) &&
14756 ((wcqe->parameter & IOERR_PARAM_MASK) ==
14757 IOERR_NO_RESOURCES))
14758 phba->lpfc_rampdown_queue_depth(phba);
14759
14760
14761 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14762 "0373 FCP CQE cmpl: status=x%x: "
14763 "CQE: %08x %08x %08x %08x\n",
14764 bf_get(lpfc_wcqe_c_status, wcqe),
14765 wcqe->word0, wcqe->total_data_placed,
14766 wcqe->parameter, wcqe->word3);
14767 }
14768
14769
14770 spin_lock_irqsave(&pring->ring_lock, iflags);
14771 pring->stats.iocb_event++;
14772 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14773 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14774 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14775 if (unlikely(!cmdiocbq)) {
14776 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14777 "0374 FCP complete with no corresponding "
14778 "cmdiocb: iotag (%d)\n",
14779 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14780 return;
14781 }
14782 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14783 cmdiocbq->isr_timestamp = cq->isr_timestamp;
14784 #endif
14785 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
14786 spin_lock_irqsave(&phba->hbalock, iflags);
14787 cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
14788 spin_unlock_irqrestore(&phba->hbalock, iflags);
14789 }
14790
14791 if (cmdiocbq->cmd_cmpl) {
14792
14793 if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
14794 cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
14795 spin_lock_irqsave(&phba->hbalock, iflags);
14796 cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
14797 spin_unlock_irqrestore(&phba->hbalock, iflags);
14798 }
14799
14800
14801 memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
14802 sizeof(struct lpfc_wcqe_complete));
14803 cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
14804 } else {
14805 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14806 "0375 FCP cmdiocb not callback function "
14807 "iotag: (%d)\n",
14808 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14809 }
14810 }
14811
14812
14813
14814
14815
14816
14817
14818
14819
14820
14821 static void
14822 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14823 struct lpfc_wcqe_release *wcqe)
14824 {
14825 struct lpfc_queue *childwq;
14826 bool wqid_matched = false;
14827 uint16_t hba_wqid;
14828
14829
14830 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
14831 list_for_each_entry(childwq, &cq->child_list, list) {
14832 if (childwq->queue_id == hba_wqid) {
14833 lpfc_sli4_wq_release(childwq,
14834 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14835 if (childwq->q_flag & HBA_NVMET_WQFULL)
14836 lpfc_nvmet_wqfull_process(phba, childwq);
14837 wqid_matched = true;
14838 break;
14839 }
14840 }
14841
14842 if (wqid_matched != true)
14843 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14844 "2580 Fast-path wqe consume event carries "
14845 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
14846 }
14847
14848
14849
14850
14851
14852
14853
14854
14855
14856
14857
14858 static bool
14859 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14860 struct lpfc_rcqe *rcqe)
14861 {
14862 bool workposted = false;
14863 struct lpfc_queue *hrq;
14864 struct lpfc_queue *drq;
14865 struct rqb_dmabuf *dma_buf;
14866 struct fc_frame_header *fc_hdr;
14867 struct lpfc_nvmet_tgtport *tgtp;
14868 uint32_t status, rq_id;
14869 unsigned long iflags;
14870 uint32_t fctl, idx;
14871
14872 if ((phba->nvmet_support == 0) ||
14873 (phba->sli4_hba.nvmet_cqset == NULL))
14874 return workposted;
14875
14876 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14877 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14878 drq = phba->sli4_hba.nvmet_mrq_data[idx];
14879
14880
14881 if (unlikely(!hrq) || unlikely(!drq))
14882 return workposted;
14883
14884 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14885 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14886 else
14887 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14888
14889 if ((phba->nvmet_support == 0) ||
14890 (rq_id != hrq->queue_id))
14891 return workposted;
14892
14893 status = bf_get(lpfc_rcqe_status, rcqe);
14894 switch (status) {
14895 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14896 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14897 "6126 Receive Frame Truncated!!\n");
14898 fallthrough;
14899 case FC_STATUS_RQ_SUCCESS:
14900 spin_lock_irqsave(&phba->hbalock, iflags);
14901 lpfc_sli4_rq_release(hrq, drq);
14902 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14903 if (!dma_buf) {
14904 hrq->RQ_no_buf_found++;
14905 spin_unlock_irqrestore(&phba->hbalock, iflags);
14906 goto out;
14907 }
14908 spin_unlock_irqrestore(&phba->hbalock, iflags);
14909 hrq->RQ_rcv_buf++;
14910 hrq->RQ_buf_posted--;
14911 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14912
14913
14914 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14915 fc_hdr->fh_f_ctl[1] << 8 |
14916 fc_hdr->fh_f_ctl[2]);
14917 if (((fctl &
14918 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14919 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14920 (fc_hdr->fh_seq_cnt != 0))
14921 goto drop;
14922
14923 if (fc_hdr->fh_type == FC_TYPE_FCP) {
14924 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14925 lpfc_nvmet_unsol_fcp_event(
14926 phba, idx, dma_buf, cq->isr_timestamp,
14927 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
14928 return false;
14929 }
14930 drop:
14931 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
14932 break;
14933 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14934 if (phba->nvmet_support) {
14935 tgtp = phba->targetport->private;
14936 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14937 "6401 RQE Error x%x, posted %d err_cnt "
14938 "%d: %x %x %x\n",
14939 status, hrq->RQ_buf_posted,
14940 hrq->RQ_no_posted_buf,
14941 atomic_read(&tgtp->rcv_fcp_cmd_in),
14942 atomic_read(&tgtp->rcv_fcp_cmd_out),
14943 atomic_read(&tgtp->xmt_fcp_release));
14944 }
14945 fallthrough;
14946
14947 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14948 hrq->RQ_no_posted_buf++;
14949
14950 break;
14951 }
14952 out:
14953 return workposted;
14954 }
14955
14956
14957
14958
14959
14960
14961
14962
14963
14964
14965
14966
14967 static bool
14968 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14969 struct lpfc_cqe *cqe)
14970 {
14971 struct lpfc_wcqe_release wcqe;
14972 bool workposted = false;
14973
14974
14975 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14976
14977
14978 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14979 case CQE_CODE_COMPL_WQE:
14980 case CQE_CODE_NVME_ERSP:
14981 cq->CQ_wq++;
14982
14983 phba->last_completion_time = jiffies;
14984 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14985 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14986 (struct lpfc_wcqe_complete *)&wcqe);
14987 break;
14988 case CQE_CODE_RELEASE_WQE:
14989 cq->CQ_release_wqe++;
14990
14991 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14992 (struct lpfc_wcqe_release *)&wcqe);
14993 break;
14994 case CQE_CODE_XRI_ABORTED:
14995 cq->CQ_xri_aborted++;
14996
14997 phba->last_completion_time = jiffies;
14998 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14999 (struct sli4_wcqe_xri_aborted *)&wcqe);
15000 break;
15001 case CQE_CODE_RECEIVE_V1:
15002 case CQE_CODE_RECEIVE:
15003 phba->last_completion_time = jiffies;
15004 if (cq->subtype == LPFC_NVMET) {
15005 workposted = lpfc_sli4_nvmet_handle_rcqe(
15006 phba, cq, (struct lpfc_rcqe *)&wcqe);
15007 }
15008 break;
15009 default:
15010 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15011 "0144 Not a valid CQE code: x%x\n",
15012 bf_get(lpfc_wcqe_c_code, &wcqe));
15013 break;
15014 }
15015 return workposted;
15016 }
15017
15018
15019
15020
15021
15022
15023
15024
15025
15026
15027
15028
15029
15030
15031
15032 static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
15033 struct lpfc_queue *cq, uint16_t cqid)
15034 {
15035 int ret = 0;
15036
15037 switch (cq->poll_mode) {
15038 case LPFC_IRQ_POLL:
15039
15040 if (phba->cmf_active_mode == LPFC_CFG_OFF) {
15041 irq_poll_sched(&cq->iop);
15042 break;
15043 }
15044 fallthrough;
15045 case LPFC_QUEUE_WORK:
15046 default:
15047 if (is_kdump_kernel())
15048 ret = queue_work(phba->wq, &cq->irqwork);
15049 else
15050 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
15051 if (!ret)
15052 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15053 "0383 Cannot schedule queue work "
15054 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
15055 cqid, cq->queue_id,
15056 raw_smp_processor_id());
15057 }
15058 }
15059
15060
15061
15062
15063
15064
15065
15066
15067
15068
15069
15070
15071
15072
15073 static void
15074 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
15075 struct lpfc_eqe *eqe)
15076 {
15077 struct lpfc_queue *cq = NULL;
15078 uint32_t qidx = eq->hdwq;
15079 uint16_t cqid, id;
15080
15081 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
15082 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15083 "0366 Not a valid completion "
15084 "event: majorcode=x%x, minorcode=x%x\n",
15085 bf_get_le32(lpfc_eqe_major_code, eqe),
15086 bf_get_le32(lpfc_eqe_minor_code, eqe));
15087 return;
15088 }
15089
15090
15091 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
15092
15093
15094 if (cqid <= phba->sli4_hba.cq_max) {
15095 cq = phba->sli4_hba.cq_lookup[cqid];
15096 if (cq)
15097 goto work_cq;
15098 }
15099
15100
15101 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
15102 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
15103 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
15104
15105 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
15106 goto process_cq;
15107 }
15108 }
15109
15110 if (phba->sli4_hba.nvmels_cq &&
15111 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
15112
15113 cq = phba->sli4_hba.nvmels_cq;
15114 }
15115
15116
15117 if (cq == NULL) {
15118 lpfc_sli4_sp_handle_eqe(phba, eqe,
15119 phba->sli4_hba.hdwq[qidx].hba_eq);
15120 return;
15121 }
15122
15123 process_cq:
15124 if (unlikely(cqid != cq->queue_id)) {
15125 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15126 "0368 Miss-matched fast-path completion "
15127 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
15128 cqid, cq->queue_id);
15129 return;
15130 }
15131
15132 work_cq:
15133 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
15134 if (phba->ktime_on)
15135 cq->isr_timestamp = ktime_get_ns();
15136 else
15137 cq->isr_timestamp = 0;
15138 #endif
15139 lpfc_sli4_sched_cq_work(phba, cq, cqid);
15140 }
15141
15142
15143
15144
15145
15146
15147
15148
15149
15150
15151
15152
15153
15154
15155
15156
15157
15158 static void
15159 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
15160 enum lpfc_poll_mode poll_mode)
15161 {
15162 struct lpfc_hba *phba = cq->phba;
15163 unsigned long delay;
15164 bool workposted = false;
15165 int ret = 0;
15166
15167
15168 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
15169 &delay, poll_mode);
15170
15171 if (delay) {
15172 if (is_kdump_kernel())
15173 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
15174 delay);
15175 else
15176 ret = queue_delayed_work_on(cq->chann, phba->wq,
15177 &cq->sched_irqwork, delay);
15178 if (!ret)
15179 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15180 "0367 Cannot schedule queue work "
15181 "for cqid=%d on CPU %d\n",
15182 cq->queue_id, cq->chann);
15183 }
15184
15185
15186 if (workposted)
15187 lpfc_worker_wake_up(phba);
15188 }
15189
15190
15191
15192
15193
15194
15195
15196
15197 static void
15198 lpfc_sli4_hba_process_cq(struct work_struct *work)
15199 {
15200 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
15201
15202 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15203 }
15204
15205
15206
15207
15208
15209
15210
15211 static void
15212 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
15213 {
15214 struct lpfc_queue *cq = container_of(to_delayed_work(work),
15215 struct lpfc_queue, sched_irqwork);
15216
15217 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
15218 }
15219
15220
15221
15222
15223
15224
15225
15226
15227
15228
15229
15230
15231
15232
15233
15234
15235
15236
15237
15238
15239
15240
15241
15242
15243
15244
15245
15246 irqreturn_t
15247 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
15248 {
15249 struct lpfc_hba *phba;
15250 struct lpfc_hba_eq_hdl *hba_eq_hdl;
15251 struct lpfc_queue *fpeq;
15252 unsigned long iflag;
15253 int ecount = 0;
15254 int hba_eqidx;
15255 struct lpfc_eq_intr_info *eqi;
15256
15257
15258 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
15259 phba = hba_eq_hdl->phba;
15260 hba_eqidx = hba_eq_hdl->idx;
15261
15262 if (unlikely(!phba))
15263 return IRQ_NONE;
15264 if (unlikely(!phba->sli4_hba.hdwq))
15265 return IRQ_NONE;
15266
15267
15268 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
15269 if (unlikely(!fpeq))
15270 return IRQ_NONE;
15271
15272
15273 if (unlikely(lpfc_intr_state_check(phba))) {
15274
15275 spin_lock_irqsave(&phba->hbalock, iflag);
15276 if (phba->link_state < LPFC_LINK_DOWN)
15277
15278 lpfc_sli4_eqcq_flush(phba, fpeq);
15279 spin_unlock_irqrestore(&phba->hbalock, iflag);
15280 return IRQ_NONE;
15281 }
15282
15283 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
15284 eqi->icnt++;
15285
15286 fpeq->last_cpu = raw_smp_processor_id();
15287
15288 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
15289 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
15290 phba->cfg_auto_imax &&
15291 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
15292 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
15293 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
15294
15295
15296 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
15297
15298 if (unlikely(ecount == 0)) {
15299 fpeq->EQ_no_entry++;
15300 if (phba->intr_type == MSIX)
15301
15302 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15303 "0358 MSI-X interrupt with no EQE\n");
15304 else
15305
15306 return IRQ_NONE;
15307 }
15308
15309 return IRQ_HANDLED;
15310 }
15311
15312
15313
15314
15315
15316
15317
15318
15319
15320
15321
15322
15323
15324
15325
15326
15327
15328
15329 irqreturn_t
15330 lpfc_sli4_intr_handler(int irq, void *dev_id)
15331 {
15332 struct lpfc_hba *phba;
15333 irqreturn_t hba_irq_rc;
15334 bool hba_handled = false;
15335 int qidx;
15336
15337
15338 phba = (struct lpfc_hba *)dev_id;
15339
15340 if (unlikely(!phba))
15341 return IRQ_NONE;
15342
15343
15344
15345
15346 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
15347 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
15348 &phba->sli4_hba.hba_eq_hdl[qidx]);
15349 if (hba_irq_rc == IRQ_HANDLED)
15350 hba_handled |= true;
15351 }
15352
15353 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
15354 }
15355
15356 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
15357 {
15358 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
15359 struct lpfc_queue *eq;
15360 int i = 0;
15361
15362 rcu_read_lock();
15363
15364 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
15365 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
15366 if (!list_empty(&phba->poll_list))
15367 mod_timer(&phba->cpuhp_poll_timer,
15368 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15369
15370 rcu_read_unlock();
15371 }
15372
15373 inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
15374 {
15375 struct lpfc_hba *phba = eq->phba;
15376 int i = 0;
15377
15378
15379
15380
15381
15382
15383
15384
15385 smp_rmb();
15386
15387 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
15388
15389
15390
15391
15392
15393
15394
15395 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
15396
15397 return i;
15398 }
15399
15400 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
15401 {
15402 struct lpfc_hba *phba = eq->phba;
15403
15404
15405 if (list_empty(&phba->poll_list))
15406 mod_timer(&phba->cpuhp_poll_timer,
15407 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15408
15409 list_add_rcu(&eq->_poll_list, &phba->poll_list);
15410 synchronize_rcu();
15411 }
15412
15413 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15414 {
15415 struct lpfc_hba *phba = eq->phba;
15416
15417
15418
15419
15420 list_del_rcu(&eq->_poll_list);
15421 synchronize_rcu();
15422
15423 if (list_empty(&phba->poll_list))
15424 del_timer_sync(&phba->cpuhp_poll_timer);
15425 }
15426
15427 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15428 {
15429 struct lpfc_queue *eq, *next;
15430
15431 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15432 list_del(&eq->_poll_list);
15433
15434 INIT_LIST_HEAD(&phba->poll_list);
15435 synchronize_rcu();
15436 }
15437
15438 static inline void
15439 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15440 {
15441 if (mode == eq->mode)
15442 return;
15443
15444
15445
15446
15447
15448
15449
15450
15451
15452
15453
15454 WRITE_ONCE(eq->mode, mode);
15455
15456 smp_wmb();
15457
15458
15459
15460
15461
15462
15463
15464
15465
15466
15467
15468
15469
15470 mode ? lpfc_sli4_add_to_poll_list(eq) :
15471 lpfc_sli4_remove_from_poll_list(eq);
15472 }
15473
15474 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15475 {
15476 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15477 }
15478
15479 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15480 {
15481 struct lpfc_hba *phba = eq->phba;
15482
15483 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15484
15485
15486
15487
15488
15489
15490
15491
15492 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15493 }
15494
15495
15496
15497
15498
15499
15500
15501
15502
15503 void
15504 lpfc_sli4_queue_free(struct lpfc_queue *queue)
15505 {
15506 struct lpfc_dmabuf *dmabuf;
15507
15508 if (!queue)
15509 return;
15510
15511 if (!list_empty(&queue->wq_list))
15512 list_del(&queue->wq_list);
15513
15514 while (!list_empty(&queue->page_list)) {
15515 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15516 list);
15517 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15518 dmabuf->virt, dmabuf->phys);
15519 kfree(dmabuf);
15520 }
15521 if (queue->rqbp) {
15522 lpfc_free_rq_buffer(queue->phba, queue);
15523 kfree(queue->rqbp);
15524 }
15525
15526 if (!list_empty(&queue->cpu_list))
15527 list_del(&queue->cpu_list);
15528
15529 kfree(queue);
15530 return;
15531 }
15532
15533
15534
15535
15536
15537
15538
15539
15540
15541
15542
15543
15544
15545 struct lpfc_queue *
15546 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15547 uint32_t entry_size, uint32_t entry_count, int cpu)
15548 {
15549 struct lpfc_queue *queue;
15550 struct lpfc_dmabuf *dmabuf;
15551 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15552 uint16_t x, pgcnt;
15553
15554 if (!phba->sli4_hba.pc_sli4_params.supported)
15555 hw_page_size = page_size;
15556
15557 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15558
15559
15560 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15561 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15562
15563 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15564 GFP_KERNEL, cpu_to_node(cpu));
15565 if (!queue)
15566 return NULL;
15567
15568 INIT_LIST_HEAD(&queue->list);
15569 INIT_LIST_HEAD(&queue->_poll_list);
15570 INIT_LIST_HEAD(&queue->wq_list);
15571 INIT_LIST_HEAD(&queue->wqfull_list);
15572 INIT_LIST_HEAD(&queue->page_list);
15573 INIT_LIST_HEAD(&queue->child_list);
15574 INIT_LIST_HEAD(&queue->cpu_list);
15575
15576
15577
15578
15579 queue->page_count = pgcnt;
15580 queue->q_pgs = (void **)&queue[1];
15581 queue->entry_cnt_per_pg = hw_page_size / entry_size;
15582 queue->entry_size = entry_size;
15583 queue->entry_count = entry_count;
15584 queue->page_size = hw_page_size;
15585 queue->phba = phba;
15586
15587 for (x = 0; x < queue->page_count; x++) {
15588 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15589 dev_to_node(&phba->pcidev->dev));
15590 if (!dmabuf)
15591 goto out_fail;
15592 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15593 hw_page_size, &dmabuf->phys,
15594 GFP_KERNEL);
15595 if (!dmabuf->virt) {
15596 kfree(dmabuf);
15597 goto out_fail;
15598 }
15599 dmabuf->buffer_tag = x;
15600 list_add_tail(&dmabuf->list, &queue->page_list);
15601
15602 queue->q_pgs[x] = dmabuf->virt;
15603 }
15604 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15605 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15606 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15607 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15608
15609
15610
15611 return queue;
15612 out_fail:
15613 lpfc_sli4_queue_free(queue);
15614 return NULL;
15615 }
15616
15617
15618
15619
15620
15621
15622
15623
15624
15625
15626 static void __iomem *
15627 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15628 {
15629 if (!phba->pcidev)
15630 return NULL;
15631
15632 switch (pci_barset) {
15633 case WQ_PCI_BAR_0_AND_1:
15634 return phba->pci_bar0_memmap_p;
15635 case WQ_PCI_BAR_2_AND_3:
15636 return phba->pci_bar2_memmap_p;
15637 case WQ_PCI_BAR_4_AND_5:
15638 return phba->pci_bar4_memmap_p;
15639 default:
15640 break;
15641 }
15642 return NULL;
15643 }
15644
15645
15646
15647
15648
15649
15650
15651
15652
15653
15654
15655
15656
15657
15658
15659
15660
15661
15662
15663
15664
15665
15666
15667
15668 void
15669 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15670 uint32_t numq, uint32_t usdelay)
15671 {
15672 struct lpfc_mbx_modify_eq_delay *eq_delay;
15673 LPFC_MBOXQ_t *mbox;
15674 struct lpfc_queue *eq;
15675 int cnt = 0, rc, length;
15676 uint32_t shdr_status, shdr_add_status;
15677 uint32_t dmult;
15678 int qidx;
15679 union lpfc_sli4_cfg_shdr *shdr;
15680
15681 if (startq >= phba->cfg_irq_chann)
15682 return;
15683
15684 if (usdelay > 0xFFFF) {
15685 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15686 "6429 usdelay %d too large. Scaled down to "
15687 "0xFFFF.\n", usdelay);
15688 usdelay = 0xFFFF;
15689 }
15690
15691
15692 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15693 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15694 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15695 if (!eq)
15696 continue;
15697
15698 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15699
15700 if (++cnt >= numq)
15701 break;
15702 }
15703 return;
15704 }
15705
15706
15707
15708 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15709 if (!mbox) {
15710 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15711 "6428 Failed allocating mailbox cmd buffer."
15712 " EQ delay was not set.\n");
15713 return;
15714 }
15715 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15716 sizeof(struct lpfc_sli4_cfg_mhdr));
15717 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15718 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15719 length, LPFC_SLI4_MBX_EMBED);
15720 eq_delay = &mbox->u.mqe.un.eq_delay;
15721
15722
15723 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15724 if (dmult)
15725 dmult--;
15726 if (dmult > LPFC_DMULT_MAX)
15727 dmult = LPFC_DMULT_MAX;
15728
15729 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15730 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15731 if (!eq)
15732 continue;
15733 eq->q_mode = usdelay;
15734 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
15735 eq_delay->u.request.eq[cnt].phase = 0;
15736 eq_delay->u.request.eq[cnt].delay_multi = dmult;
15737
15738 if (++cnt >= numq)
15739 break;
15740 }
15741 eq_delay->u.request.num_eq = cnt;
15742
15743 mbox->vport = phba->pport;
15744 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15745 mbox->ctx_ndlp = NULL;
15746 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15747 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15748 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15749 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15750 if (shdr_status || shdr_add_status || rc) {
15751 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15752 "2512 MODIFY_EQ_DELAY mailbox failed with "
15753 "status x%x add_status x%x, mbx status x%x\n",
15754 shdr_status, shdr_add_status, rc);
15755 }
15756 mempool_free(mbox, phba->mbox_mem_pool);
15757 return;
15758 }
15759
15760
15761
15762
15763
15764
15765
15766
15767
15768
15769
15770
15771
15772
15773
15774
15775
15776
15777
15778
15779
15780 int
15781 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
15782 {
15783 struct lpfc_mbx_eq_create *eq_create;
15784 LPFC_MBOXQ_t *mbox;
15785 int rc, length, status = 0;
15786 struct lpfc_dmabuf *dmabuf;
15787 uint32_t shdr_status, shdr_add_status;
15788 union lpfc_sli4_cfg_shdr *shdr;
15789 uint16_t dmult;
15790 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15791
15792
15793 if (!eq)
15794 return -ENODEV;
15795 if (!phba->sli4_hba.pc_sli4_params.supported)
15796 hw_page_size = SLI4_PAGE_SIZE;
15797
15798 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15799 if (!mbox)
15800 return -ENOMEM;
15801 length = (sizeof(struct lpfc_mbx_eq_create) -
15802 sizeof(struct lpfc_sli4_cfg_mhdr));
15803 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15804 LPFC_MBOX_OPCODE_EQ_CREATE,
15805 length, LPFC_SLI4_MBX_EMBED);
15806 eq_create = &mbox->u.mqe.un.eq_create;
15807 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
15808 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
15809 eq->page_count);
15810 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
15811 LPFC_EQE_SIZE);
15812 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
15813
15814
15815 if (phba->sli4_hba.pc_sli4_params.eqav) {
15816 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15817 LPFC_Q_CREATE_VERSION_2);
15818 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
15819 phba->sli4_hba.pc_sli4_params.eqav);
15820 }
15821
15822
15823 dmult = 0;
15824 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
15825 dmult);
15826 switch (eq->entry_count) {
15827 default:
15828 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15829 "0360 Unsupported EQ count. (%d)\n",
15830 eq->entry_count);
15831 if (eq->entry_count < 256) {
15832 status = -EINVAL;
15833 goto out;
15834 }
15835 fallthrough;
15836 case 256:
15837 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15838 LPFC_EQ_CNT_256);
15839 break;
15840 case 512:
15841 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15842 LPFC_EQ_CNT_512);
15843 break;
15844 case 1024:
15845 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15846 LPFC_EQ_CNT_1024);
15847 break;
15848 case 2048:
15849 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15850 LPFC_EQ_CNT_2048);
15851 break;
15852 case 4096:
15853 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15854 LPFC_EQ_CNT_4096);
15855 break;
15856 }
15857 list_for_each_entry(dmabuf, &eq->page_list, list) {
15858 memset(dmabuf->virt, 0, hw_page_size);
15859 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15860 putPaddrLow(dmabuf->phys);
15861 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15862 putPaddrHigh(dmabuf->phys);
15863 }
15864 mbox->vport = phba->pport;
15865 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15866 mbox->ctx_buf = NULL;
15867 mbox->ctx_ndlp = NULL;
15868 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15869 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15870 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15871 if (shdr_status || shdr_add_status || rc) {
15872 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15873 "2500 EQ_CREATE mailbox failed with "
15874 "status x%x add_status x%x, mbx status x%x\n",
15875 shdr_status, shdr_add_status, rc);
15876 status = -ENXIO;
15877 }
15878 eq->type = LPFC_EQ;
15879 eq->subtype = LPFC_NONE;
15880 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
15881 if (eq->queue_id == 0xFFFF)
15882 status = -ENXIO;
15883 eq->host_index = 0;
15884 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15885 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
15886 out:
15887 mempool_free(mbox, phba->mbox_mem_pool);
15888 return status;
15889 }
15890
15891 static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
15892 {
15893 struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
15894
15895 __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
15896
15897 return 1;
15898 }
15899
15900
15901
15902
15903
15904
15905
15906
15907
15908
15909
15910
15911
15912
15913
15914
15915
15916
15917
15918
15919
15920
15921
15922
15923 int
15924 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
15925 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
15926 {
15927 struct lpfc_mbx_cq_create *cq_create;
15928 struct lpfc_dmabuf *dmabuf;
15929 LPFC_MBOXQ_t *mbox;
15930 int rc, length, status = 0;
15931 uint32_t shdr_status, shdr_add_status;
15932 union lpfc_sli4_cfg_shdr *shdr;
15933
15934
15935 if (!cq || !eq)
15936 return -ENODEV;
15937
15938 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15939 if (!mbox)
15940 return -ENOMEM;
15941 length = (sizeof(struct lpfc_mbx_cq_create) -
15942 sizeof(struct lpfc_sli4_cfg_mhdr));
15943 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15944 LPFC_MBOX_OPCODE_CQ_CREATE,
15945 length, LPFC_SLI4_MBX_EMBED);
15946 cq_create = &mbox->u.mqe.un.cq_create;
15947 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
15948 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
15949 cq->page_count);
15950 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
15951 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
15952 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15953 phba->sli4_hba.pc_sli4_params.cqv);
15954 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
15955 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
15956 (cq->page_size / SLI4_PAGE_SIZE));
15957 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
15958 eq->queue_id);
15959 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
15960 phba->sli4_hba.pc_sli4_params.cqav);
15961 } else {
15962 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15963 eq->queue_id);
15964 }
15965 switch (cq->entry_count) {
15966 case 2048:
15967 case 4096:
15968 if (phba->sli4_hba.pc_sli4_params.cqv ==
15969 LPFC_Q_CREATE_VERSION_2) {
15970 cq_create->u.request.context.lpfc_cq_context_count =
15971 cq->entry_count;
15972 bf_set(lpfc_cq_context_count,
15973 &cq_create->u.request.context,
15974 LPFC_CQ_CNT_WORD7);
15975 break;
15976 }
15977 fallthrough;
15978 default:
15979 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15980 "0361 Unsupported CQ count: "
15981 "entry cnt %d sz %d pg cnt %d\n",
15982 cq->entry_count, cq->entry_size,
15983 cq->page_count);
15984 if (cq->entry_count < 256) {
15985 status = -EINVAL;
15986 goto out;
15987 }
15988 fallthrough;
15989 case 256:
15990 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15991 LPFC_CQ_CNT_256);
15992 break;
15993 case 512:
15994 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15995 LPFC_CQ_CNT_512);
15996 break;
15997 case 1024:
15998 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15999 LPFC_CQ_CNT_1024);
16000 break;
16001 }
16002 list_for_each_entry(dmabuf, &cq->page_list, list) {
16003 memset(dmabuf->virt, 0, cq->page_size);
16004 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16005 putPaddrLow(dmabuf->phys);
16006 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16007 putPaddrHigh(dmabuf->phys);
16008 }
16009 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16010
16011
16012 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16013 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16014 if (shdr_status || shdr_add_status || rc) {
16015 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16016 "2501 CQ_CREATE mailbox failed with "
16017 "status x%x add_status x%x, mbx status x%x\n",
16018 shdr_status, shdr_add_status, rc);
16019 status = -ENXIO;
16020 goto out;
16021 }
16022 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16023 if (cq->queue_id == 0xFFFF) {
16024 status = -ENXIO;
16025 goto out;
16026 }
16027
16028 list_add_tail(&cq->list, &eq->child_list);
16029
16030 cq->type = type;
16031 cq->subtype = subtype;
16032 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16033 cq->assoc_qid = eq->queue_id;
16034 cq->assoc_qp = eq;
16035 cq->host_index = 0;
16036 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16037 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
16038
16039 if (cq->queue_id > phba->sli4_hba.cq_max)
16040 phba->sli4_hba.cq_max = cq->queue_id;
16041
16042 irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
16043 out:
16044 mempool_free(mbox, phba->mbox_mem_pool);
16045 return status;
16046 }
16047
16048
16049
16050
16051
16052
16053
16054
16055
16056
16057
16058
16059
16060
16061
16062
16063
16064
16065
16066
16067
16068
16069
16070
16071
16072 int
16073 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
16074 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
16075 uint32_t subtype)
16076 {
16077 struct lpfc_queue *cq;
16078 struct lpfc_queue *eq;
16079 struct lpfc_mbx_cq_create_set *cq_set;
16080 struct lpfc_dmabuf *dmabuf;
16081 LPFC_MBOXQ_t *mbox;
16082 int rc, length, alloclen, status = 0;
16083 int cnt, idx, numcq, page_idx = 0;
16084 uint32_t shdr_status, shdr_add_status;
16085 union lpfc_sli4_cfg_shdr *shdr;
16086 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16087
16088
16089 numcq = phba->cfg_nvmet_mrq;
16090 if (!cqp || !hdwq || !numcq)
16091 return -ENODEV;
16092
16093 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16094 if (!mbox)
16095 return -ENOMEM;
16096
16097 length = sizeof(struct lpfc_mbx_cq_create_set);
16098 length += ((numcq * cqp[0]->page_count) *
16099 sizeof(struct dma_address));
16100 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16101 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
16102 LPFC_SLI4_MBX_NEMBED);
16103 if (alloclen < length) {
16104 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16105 "3098 Allocated DMA memory size (%d) is "
16106 "less than the requested DMA memory size "
16107 "(%d)\n", alloclen, length);
16108 status = -ENOMEM;
16109 goto out;
16110 }
16111 cq_set = mbox->sge_array->addr[0];
16112 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
16113 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
16114
16115 for (idx = 0; idx < numcq; idx++) {
16116 cq = cqp[idx];
16117 eq = hdwq[idx].hba_eq;
16118 if (!cq || !eq) {
16119 status = -ENOMEM;
16120 goto out;
16121 }
16122 if (!phba->sli4_hba.pc_sli4_params.supported)
16123 hw_page_size = cq->page_size;
16124
16125 switch (idx) {
16126 case 0:
16127 bf_set(lpfc_mbx_cq_create_set_page_size,
16128 &cq_set->u.request,
16129 (hw_page_size / SLI4_PAGE_SIZE));
16130 bf_set(lpfc_mbx_cq_create_set_num_pages,
16131 &cq_set->u.request, cq->page_count);
16132 bf_set(lpfc_mbx_cq_create_set_evt,
16133 &cq_set->u.request, 1);
16134 bf_set(lpfc_mbx_cq_create_set_valid,
16135 &cq_set->u.request, 1);
16136 bf_set(lpfc_mbx_cq_create_set_cqe_size,
16137 &cq_set->u.request, 0);
16138 bf_set(lpfc_mbx_cq_create_set_num_cq,
16139 &cq_set->u.request, numcq);
16140 bf_set(lpfc_mbx_cq_create_set_autovalid,
16141 &cq_set->u.request,
16142 phba->sli4_hba.pc_sli4_params.cqav);
16143 switch (cq->entry_count) {
16144 case 2048:
16145 case 4096:
16146 if (phba->sli4_hba.pc_sli4_params.cqv ==
16147 LPFC_Q_CREATE_VERSION_2) {
16148 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16149 &cq_set->u.request,
16150 cq->entry_count);
16151 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16152 &cq_set->u.request,
16153 LPFC_CQ_CNT_WORD7);
16154 break;
16155 }
16156 fallthrough;
16157 default:
16158 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16159 "3118 Bad CQ count. (%d)\n",
16160 cq->entry_count);
16161 if (cq->entry_count < 256) {
16162 status = -EINVAL;
16163 goto out;
16164 }
16165 fallthrough;
16166 case 256:
16167 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16168 &cq_set->u.request, LPFC_CQ_CNT_256);
16169 break;
16170 case 512:
16171 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16172 &cq_set->u.request, LPFC_CQ_CNT_512);
16173 break;
16174 case 1024:
16175 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16176 &cq_set->u.request, LPFC_CQ_CNT_1024);
16177 break;
16178 }
16179 bf_set(lpfc_mbx_cq_create_set_eq_id0,
16180 &cq_set->u.request, eq->queue_id);
16181 break;
16182 case 1:
16183 bf_set(lpfc_mbx_cq_create_set_eq_id1,
16184 &cq_set->u.request, eq->queue_id);
16185 break;
16186 case 2:
16187 bf_set(lpfc_mbx_cq_create_set_eq_id2,
16188 &cq_set->u.request, eq->queue_id);
16189 break;
16190 case 3:
16191 bf_set(lpfc_mbx_cq_create_set_eq_id3,
16192 &cq_set->u.request, eq->queue_id);
16193 break;
16194 case 4:
16195 bf_set(lpfc_mbx_cq_create_set_eq_id4,
16196 &cq_set->u.request, eq->queue_id);
16197 break;
16198 case 5:
16199 bf_set(lpfc_mbx_cq_create_set_eq_id5,
16200 &cq_set->u.request, eq->queue_id);
16201 break;
16202 case 6:
16203 bf_set(lpfc_mbx_cq_create_set_eq_id6,
16204 &cq_set->u.request, eq->queue_id);
16205 break;
16206 case 7:
16207 bf_set(lpfc_mbx_cq_create_set_eq_id7,
16208 &cq_set->u.request, eq->queue_id);
16209 break;
16210 case 8:
16211 bf_set(lpfc_mbx_cq_create_set_eq_id8,
16212 &cq_set->u.request, eq->queue_id);
16213 break;
16214 case 9:
16215 bf_set(lpfc_mbx_cq_create_set_eq_id9,
16216 &cq_set->u.request, eq->queue_id);
16217 break;
16218 case 10:
16219 bf_set(lpfc_mbx_cq_create_set_eq_id10,
16220 &cq_set->u.request, eq->queue_id);
16221 break;
16222 case 11:
16223 bf_set(lpfc_mbx_cq_create_set_eq_id11,
16224 &cq_set->u.request, eq->queue_id);
16225 break;
16226 case 12:
16227 bf_set(lpfc_mbx_cq_create_set_eq_id12,
16228 &cq_set->u.request, eq->queue_id);
16229 break;
16230 case 13:
16231 bf_set(lpfc_mbx_cq_create_set_eq_id13,
16232 &cq_set->u.request, eq->queue_id);
16233 break;
16234 case 14:
16235 bf_set(lpfc_mbx_cq_create_set_eq_id14,
16236 &cq_set->u.request, eq->queue_id);
16237 break;
16238 case 15:
16239 bf_set(lpfc_mbx_cq_create_set_eq_id15,
16240 &cq_set->u.request, eq->queue_id);
16241 break;
16242 }
16243
16244
16245 list_add_tail(&cq->list, &eq->child_list);
16246
16247 cq->type = type;
16248 cq->subtype = subtype;
16249 cq->assoc_qid = eq->queue_id;
16250 cq->assoc_qp = eq;
16251 cq->host_index = 0;
16252 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16253 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
16254 cq->entry_count);
16255 cq->chann = idx;
16256
16257 rc = 0;
16258 list_for_each_entry(dmabuf, &cq->page_list, list) {
16259 memset(dmabuf->virt, 0, hw_page_size);
16260 cnt = page_idx + dmabuf->buffer_tag;
16261 cq_set->u.request.page[cnt].addr_lo =
16262 putPaddrLow(dmabuf->phys);
16263 cq_set->u.request.page[cnt].addr_hi =
16264 putPaddrHigh(dmabuf->phys);
16265 rc++;
16266 }
16267 page_idx += rc;
16268 }
16269
16270 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16271
16272
16273 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16274 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16275 if (shdr_status || shdr_add_status || rc) {
16276 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16277 "3119 CQ_CREATE_SET mailbox failed with "
16278 "status x%x add_status x%x, mbx status x%x\n",
16279 shdr_status, shdr_add_status, rc);
16280 status = -ENXIO;
16281 goto out;
16282 }
16283 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
16284 if (rc == 0xFFFF) {
16285 status = -ENXIO;
16286 goto out;
16287 }
16288
16289 for (idx = 0; idx < numcq; idx++) {
16290 cq = cqp[idx];
16291 cq->queue_id = rc + idx;
16292 if (cq->queue_id > phba->sli4_hba.cq_max)
16293 phba->sli4_hba.cq_max = cq->queue_id;
16294 }
16295
16296 out:
16297 lpfc_sli4_mbox_cmd_free(phba, mbox);
16298 return status;
16299 }
16300
16301
16302
16303
16304
16305
16306
16307
16308
16309
16310
16311
16312
16313
16314
16315 static void
16316 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
16317 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
16318 {
16319 struct lpfc_mbx_mq_create *mq_create;
16320 struct lpfc_dmabuf *dmabuf;
16321 int length;
16322
16323 length = (sizeof(struct lpfc_mbx_mq_create) -
16324 sizeof(struct lpfc_sli4_cfg_mhdr));
16325 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16326 LPFC_MBOX_OPCODE_MQ_CREATE,
16327 length, LPFC_SLI4_MBX_EMBED);
16328 mq_create = &mbox->u.mqe.un.mq_create;
16329 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
16330 mq->page_count);
16331 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
16332 cq->queue_id);
16333 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
16334 switch (mq->entry_count) {
16335 case 16:
16336 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16337 LPFC_MQ_RING_SIZE_16);
16338 break;
16339 case 32:
16340 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16341 LPFC_MQ_RING_SIZE_32);
16342 break;
16343 case 64:
16344 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16345 LPFC_MQ_RING_SIZE_64);
16346 break;
16347 case 128:
16348 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16349 LPFC_MQ_RING_SIZE_128);
16350 break;
16351 }
16352 list_for_each_entry(dmabuf, &mq->page_list, list) {
16353 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16354 putPaddrLow(dmabuf->phys);
16355 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16356 putPaddrHigh(dmabuf->phys);
16357 }
16358 }
16359
16360
16361
16362
16363
16364
16365
16366
16367
16368
16369
16370
16371
16372
16373
16374
16375
16376
16377
16378
16379
16380
16381 int32_t
16382 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
16383 struct lpfc_queue *cq, uint32_t subtype)
16384 {
16385 struct lpfc_mbx_mq_create *mq_create;
16386 struct lpfc_mbx_mq_create_ext *mq_create_ext;
16387 struct lpfc_dmabuf *dmabuf;
16388 LPFC_MBOXQ_t *mbox;
16389 int rc, length, status = 0;
16390 uint32_t shdr_status, shdr_add_status;
16391 union lpfc_sli4_cfg_shdr *shdr;
16392 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16393
16394
16395 if (!mq || !cq)
16396 return -ENODEV;
16397 if (!phba->sli4_hba.pc_sli4_params.supported)
16398 hw_page_size = SLI4_PAGE_SIZE;
16399
16400 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16401 if (!mbox)
16402 return -ENOMEM;
16403 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
16404 sizeof(struct lpfc_sli4_cfg_mhdr));
16405 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16406 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
16407 length, LPFC_SLI4_MBX_EMBED);
16408
16409 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
16410 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
16411 bf_set(lpfc_mbx_mq_create_ext_num_pages,
16412 &mq_create_ext->u.request, mq->page_count);
16413 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16414 &mq_create_ext->u.request, 1);
16415 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16416 &mq_create_ext->u.request, 1);
16417 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16418 &mq_create_ext->u.request, 1);
16419 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16420 &mq_create_ext->u.request, 1);
16421 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16422 &mq_create_ext->u.request, 1);
16423 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16424 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16425 phba->sli4_hba.pc_sli4_params.mqv);
16426 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16427 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16428 cq->queue_id);
16429 else
16430 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16431 cq->queue_id);
16432 switch (mq->entry_count) {
16433 default:
16434 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16435 "0362 Unsupported MQ count. (%d)\n",
16436 mq->entry_count);
16437 if (mq->entry_count < 16) {
16438 status = -EINVAL;
16439 goto out;
16440 }
16441 fallthrough;
16442 case 16:
16443 bf_set(lpfc_mq_context_ring_size,
16444 &mq_create_ext->u.request.context,
16445 LPFC_MQ_RING_SIZE_16);
16446 break;
16447 case 32:
16448 bf_set(lpfc_mq_context_ring_size,
16449 &mq_create_ext->u.request.context,
16450 LPFC_MQ_RING_SIZE_32);
16451 break;
16452 case 64:
16453 bf_set(lpfc_mq_context_ring_size,
16454 &mq_create_ext->u.request.context,
16455 LPFC_MQ_RING_SIZE_64);
16456 break;
16457 case 128:
16458 bf_set(lpfc_mq_context_ring_size,
16459 &mq_create_ext->u.request.context,
16460 LPFC_MQ_RING_SIZE_128);
16461 break;
16462 }
16463 list_for_each_entry(dmabuf, &mq->page_list, list) {
16464 memset(dmabuf->virt, 0, hw_page_size);
16465 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16466 putPaddrLow(dmabuf->phys);
16467 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16468 putPaddrHigh(dmabuf->phys);
16469 }
16470 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16471 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16472 &mq_create_ext->u.response);
16473 if (rc != MBX_SUCCESS) {
16474 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16475 "2795 MQ_CREATE_EXT failed with "
16476 "status x%x. Failback to MQ_CREATE.\n",
16477 rc);
16478 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16479 mq_create = &mbox->u.mqe.un.mq_create;
16480 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16481 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16482 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16483 &mq_create->u.response);
16484 }
16485
16486
16487 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16488 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16489 if (shdr_status || shdr_add_status || rc) {
16490 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16491 "2502 MQ_CREATE mailbox failed with "
16492 "status x%x add_status x%x, mbx status x%x\n",
16493 shdr_status, shdr_add_status, rc);
16494 status = -ENXIO;
16495 goto out;
16496 }
16497 if (mq->queue_id == 0xFFFF) {
16498 status = -ENXIO;
16499 goto out;
16500 }
16501 mq->type = LPFC_MQ;
16502 mq->assoc_qid = cq->queue_id;
16503 mq->subtype = subtype;
16504 mq->host_index = 0;
16505 mq->hba_index = 0;
16506
16507
16508 list_add_tail(&mq->list, &cq->child_list);
16509 out:
16510 mempool_free(mbox, phba->mbox_mem_pool);
16511 return status;
16512 }
16513
16514
16515
16516
16517
16518
16519
16520
16521
16522
16523
16524
16525
16526
16527
16528
16529
16530
16531
16532
16533
16534
16535
16536 int
16537 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16538 struct lpfc_queue *cq, uint32_t subtype)
16539 {
16540 struct lpfc_mbx_wq_create *wq_create;
16541 struct lpfc_dmabuf *dmabuf;
16542 LPFC_MBOXQ_t *mbox;
16543 int rc, length, status = 0;
16544 uint32_t shdr_status, shdr_add_status;
16545 union lpfc_sli4_cfg_shdr *shdr;
16546 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16547 struct dma_address *page;
16548 void __iomem *bar_memmap_p;
16549 uint32_t db_offset;
16550 uint16_t pci_barset;
16551 uint8_t dpp_barset;
16552 uint32_t dpp_offset;
16553 uint8_t wq_create_version;
16554 #ifdef CONFIG_X86
16555 unsigned long pg_addr;
16556 #endif
16557
16558
16559 if (!wq || !cq)
16560 return -ENODEV;
16561 if (!phba->sli4_hba.pc_sli4_params.supported)
16562 hw_page_size = wq->page_size;
16563
16564 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16565 if (!mbox)
16566 return -ENOMEM;
16567 length = (sizeof(struct lpfc_mbx_wq_create) -
16568 sizeof(struct lpfc_sli4_cfg_mhdr));
16569 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16570 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16571 length, LPFC_SLI4_MBX_EMBED);
16572 wq_create = &mbox->u.mqe.un.wq_create;
16573 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16574 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16575 wq->page_count);
16576 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16577 cq->queue_id);
16578
16579
16580 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16581 phba->sli4_hba.pc_sli4_params.wqv);
16582
16583 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16584 (wq->page_size > SLI4_PAGE_SIZE))
16585 wq_create_version = LPFC_Q_CREATE_VERSION_1;
16586 else
16587 wq_create_version = LPFC_Q_CREATE_VERSION_0;
16588
16589 switch (wq_create_version) {
16590 case LPFC_Q_CREATE_VERSION_1:
16591 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16592 wq->entry_count);
16593 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16594 LPFC_Q_CREATE_VERSION_1);
16595
16596 switch (wq->entry_size) {
16597 default:
16598 case 64:
16599 bf_set(lpfc_mbx_wq_create_wqe_size,
16600 &wq_create->u.request_1,
16601 LPFC_WQ_WQE_SIZE_64);
16602 break;
16603 case 128:
16604 bf_set(lpfc_mbx_wq_create_wqe_size,
16605 &wq_create->u.request_1,
16606 LPFC_WQ_WQE_SIZE_128);
16607 break;
16608 }
16609
16610 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16611 bf_set(lpfc_mbx_wq_create_page_size,
16612 &wq_create->u.request_1,
16613 (wq->page_size / SLI4_PAGE_SIZE));
16614 page = wq_create->u.request_1.page;
16615 break;
16616 default:
16617 page = wq_create->u.request.page;
16618 break;
16619 }
16620
16621 list_for_each_entry(dmabuf, &wq->page_list, list) {
16622 memset(dmabuf->virt, 0, hw_page_size);
16623 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16624 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16625 }
16626
16627 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16628 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16629
16630 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16631
16632 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16633 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16634 if (shdr_status || shdr_add_status || rc) {
16635 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16636 "2503 WQ_CREATE mailbox failed with "
16637 "status x%x add_status x%x, mbx status x%x\n",
16638 shdr_status, shdr_add_status, rc);
16639 status = -ENXIO;
16640 goto out;
16641 }
16642
16643 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16644 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16645 &wq_create->u.response);
16646 else
16647 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16648 &wq_create->u.response_1);
16649
16650 if (wq->queue_id == 0xFFFF) {
16651 status = -ENXIO;
16652 goto out;
16653 }
16654
16655 wq->db_format = LPFC_DB_LIST_FORMAT;
16656 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16657 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16658 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16659 &wq_create->u.response);
16660 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16661 (wq->db_format != LPFC_DB_RING_FORMAT)) {
16662 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16663 "3265 WQ[%d] doorbell format "
16664 "not supported: x%x\n",
16665 wq->queue_id, wq->db_format);
16666 status = -EINVAL;
16667 goto out;
16668 }
16669 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
16670 &wq_create->u.response);
16671 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16672 pci_barset);
16673 if (!bar_memmap_p) {
16674 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16675 "3263 WQ[%d] failed to memmap "
16676 "pci barset:x%x\n",
16677 wq->queue_id, pci_barset);
16678 status = -ENOMEM;
16679 goto out;
16680 }
16681 db_offset = wq_create->u.response.doorbell_offset;
16682 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
16683 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
16684 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16685 "3252 WQ[%d] doorbell offset "
16686 "not supported: x%x\n",
16687 wq->queue_id, db_offset);
16688 status = -EINVAL;
16689 goto out;
16690 }
16691 wq->db_regaddr = bar_memmap_p + db_offset;
16692 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16693 "3264 WQ[%d]: barset:x%x, offset:x%x, "
16694 "format:x%x\n", wq->queue_id,
16695 pci_barset, db_offset, wq->db_format);
16696 } else
16697 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16698 } else {
16699
16700 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
16701 &wq_create->u.response_1);
16702 if (wq->dpp_enable) {
16703 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
16704 &wq_create->u.response_1);
16705 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16706 pci_barset);
16707 if (!bar_memmap_p) {
16708 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16709 "3267 WQ[%d] failed to memmap "
16710 "pci barset:x%x\n",
16711 wq->queue_id, pci_barset);
16712 status = -ENOMEM;
16713 goto out;
16714 }
16715 db_offset = wq_create->u.response_1.doorbell_offset;
16716 wq->db_regaddr = bar_memmap_p + db_offset;
16717 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
16718 &wq_create->u.response_1);
16719 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
16720 &wq_create->u.response_1);
16721 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16722 dpp_barset);
16723 if (!bar_memmap_p) {
16724 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16725 "3268 WQ[%d] failed to memmap "
16726 "pci barset:x%x\n",
16727 wq->queue_id, dpp_barset);
16728 status = -ENOMEM;
16729 goto out;
16730 }
16731 dpp_offset = wq_create->u.response_1.dpp_offset;
16732 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16733 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16734 "3271 WQ[%d]: barset:x%x, offset:x%x, "
16735 "dpp_id:x%x dpp_barset:x%x "
16736 "dpp_offset:x%x\n",
16737 wq->queue_id, pci_barset, db_offset,
16738 wq->dpp_id, dpp_barset, dpp_offset);
16739
16740 #ifdef CONFIG_X86
16741
16742 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16743 rc = set_memory_wc(pg_addr, 1);
16744 if (rc) {
16745 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16746 "3272 Cannot setup Combined "
16747 "Write on WQ[%d] - disable DPP\n",
16748 wq->queue_id);
16749 phba->cfg_enable_dpp = 0;
16750 }
16751 #else
16752 phba->cfg_enable_dpp = 0;
16753 #endif
16754 } else
16755 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16756 }
16757 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
16758 if (wq->pring == NULL) {
16759 status = -ENOMEM;
16760 goto out;
16761 }
16762 wq->type = LPFC_WQ;
16763 wq->assoc_qid = cq->queue_id;
16764 wq->subtype = subtype;
16765 wq->host_index = 0;
16766 wq->hba_index = 0;
16767 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
16768
16769
16770 list_add_tail(&wq->list, &cq->child_list);
16771 out:
16772 mempool_free(mbox, phba->mbox_mem_pool);
16773 return status;
16774 }
16775
16776
16777
16778
16779
16780
16781
16782
16783
16784
16785
16786
16787
16788
16789
16790
16791
16792
16793
16794
16795
16796
16797
16798
16799
16800 int
16801 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16802 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
16803 {
16804 struct lpfc_mbx_rq_create *rq_create;
16805 struct lpfc_dmabuf *dmabuf;
16806 LPFC_MBOXQ_t *mbox;
16807 int rc, length, status = 0;
16808 uint32_t shdr_status, shdr_add_status;
16809 union lpfc_sli4_cfg_shdr *shdr;
16810 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16811 void __iomem *bar_memmap_p;
16812 uint32_t db_offset;
16813 uint16_t pci_barset;
16814
16815
16816 if (!hrq || !drq || !cq)
16817 return -ENODEV;
16818 if (!phba->sli4_hba.pc_sli4_params.supported)
16819 hw_page_size = SLI4_PAGE_SIZE;
16820
16821 if (hrq->entry_count != drq->entry_count)
16822 return -EINVAL;
16823 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16824 if (!mbox)
16825 return -ENOMEM;
16826 length = (sizeof(struct lpfc_mbx_rq_create) -
16827 sizeof(struct lpfc_sli4_cfg_mhdr));
16828 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16829 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16830 length, LPFC_SLI4_MBX_EMBED);
16831 rq_create = &mbox->u.mqe.un.rq_create;
16832 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16833 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16834 phba->sli4_hba.pc_sli4_params.rqv);
16835 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16836 bf_set(lpfc_rq_context_rqe_count_1,
16837 &rq_create->u.request.context,
16838 hrq->entry_count);
16839 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
16840 bf_set(lpfc_rq_context_rqe_size,
16841 &rq_create->u.request.context,
16842 LPFC_RQE_SIZE_8);
16843 bf_set(lpfc_rq_context_page_size,
16844 &rq_create->u.request.context,
16845 LPFC_RQ_PAGE_SIZE_4096);
16846 } else {
16847 switch (hrq->entry_count) {
16848 default:
16849 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16850 "2535 Unsupported RQ count. (%d)\n",
16851 hrq->entry_count);
16852 if (hrq->entry_count < 512) {
16853 status = -EINVAL;
16854 goto out;
16855 }
16856 fallthrough;
16857 case 512:
16858 bf_set(lpfc_rq_context_rqe_count,
16859 &rq_create->u.request.context,
16860 LPFC_RQ_RING_SIZE_512);
16861 break;
16862 case 1024:
16863 bf_set(lpfc_rq_context_rqe_count,
16864 &rq_create->u.request.context,
16865 LPFC_RQ_RING_SIZE_1024);
16866 break;
16867 case 2048:
16868 bf_set(lpfc_rq_context_rqe_count,
16869 &rq_create->u.request.context,
16870 LPFC_RQ_RING_SIZE_2048);
16871 break;
16872 case 4096:
16873 bf_set(lpfc_rq_context_rqe_count,
16874 &rq_create->u.request.context,
16875 LPFC_RQ_RING_SIZE_4096);
16876 break;
16877 }
16878 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
16879 LPFC_HDR_BUF_SIZE);
16880 }
16881 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16882 cq->queue_id);
16883 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16884 hrq->page_count);
16885 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16886 memset(dmabuf->virt, 0, hw_page_size);
16887 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16888 putPaddrLow(dmabuf->phys);
16889 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16890 putPaddrHigh(dmabuf->phys);
16891 }
16892 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16893 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16894
16895 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16896
16897 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16898 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16899 if (shdr_status || shdr_add_status || rc) {
16900 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16901 "2504 RQ_CREATE mailbox failed with "
16902 "status x%x add_status x%x, mbx status x%x\n",
16903 shdr_status, shdr_add_status, rc);
16904 status = -ENXIO;
16905 goto out;
16906 }
16907 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16908 if (hrq->queue_id == 0xFFFF) {
16909 status = -ENXIO;
16910 goto out;
16911 }
16912
16913 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16914 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
16915 &rq_create->u.response);
16916 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
16917 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
16918 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16919 "3262 RQ [%d] doorbell format not "
16920 "supported: x%x\n", hrq->queue_id,
16921 hrq->db_format);
16922 status = -EINVAL;
16923 goto out;
16924 }
16925
16926 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
16927 &rq_create->u.response);
16928 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
16929 if (!bar_memmap_p) {
16930 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16931 "3269 RQ[%d] failed to memmap pci "
16932 "barset:x%x\n", hrq->queue_id,
16933 pci_barset);
16934 status = -ENOMEM;
16935 goto out;
16936 }
16937
16938 db_offset = rq_create->u.response.doorbell_offset;
16939 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
16940 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
16941 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16942 "3270 RQ[%d] doorbell offset not "
16943 "supported: x%x\n", hrq->queue_id,
16944 db_offset);
16945 status = -EINVAL;
16946 goto out;
16947 }
16948 hrq->db_regaddr = bar_memmap_p + db_offset;
16949 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16950 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16951 "format:x%x\n", hrq->queue_id, pci_barset,
16952 db_offset, hrq->db_format);
16953 } else {
16954 hrq->db_format = LPFC_DB_RING_FORMAT;
16955 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16956 }
16957 hrq->type = LPFC_HRQ;
16958 hrq->assoc_qid = cq->queue_id;
16959 hrq->subtype = subtype;
16960 hrq->host_index = 0;
16961 hrq->hba_index = 0;
16962 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16963
16964
16965 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16966 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16967 length, LPFC_SLI4_MBX_EMBED);
16968 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16969 phba->sli4_hba.pc_sli4_params.rqv);
16970 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16971 bf_set(lpfc_rq_context_rqe_count_1,
16972 &rq_create->u.request.context, hrq->entry_count);
16973 if (subtype == LPFC_NVMET)
16974 rq_create->u.request.context.buffer_size =
16975 LPFC_NVMET_DATA_BUF_SIZE;
16976 else
16977 rq_create->u.request.context.buffer_size =
16978 LPFC_DATA_BUF_SIZE;
16979 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16980 LPFC_RQE_SIZE_8);
16981 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16982 (PAGE_SIZE/SLI4_PAGE_SIZE));
16983 } else {
16984 switch (drq->entry_count) {
16985 default:
16986 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16987 "2536 Unsupported RQ count. (%d)\n",
16988 drq->entry_count);
16989 if (drq->entry_count < 512) {
16990 status = -EINVAL;
16991 goto out;
16992 }
16993 fallthrough;
16994 case 512:
16995 bf_set(lpfc_rq_context_rqe_count,
16996 &rq_create->u.request.context,
16997 LPFC_RQ_RING_SIZE_512);
16998 break;
16999 case 1024:
17000 bf_set(lpfc_rq_context_rqe_count,
17001 &rq_create->u.request.context,
17002 LPFC_RQ_RING_SIZE_1024);
17003 break;
17004 case 2048:
17005 bf_set(lpfc_rq_context_rqe_count,
17006 &rq_create->u.request.context,
17007 LPFC_RQ_RING_SIZE_2048);
17008 break;
17009 case 4096:
17010 bf_set(lpfc_rq_context_rqe_count,
17011 &rq_create->u.request.context,
17012 LPFC_RQ_RING_SIZE_4096);
17013 break;
17014 }
17015 if (subtype == LPFC_NVMET)
17016 bf_set(lpfc_rq_context_buf_size,
17017 &rq_create->u.request.context,
17018 LPFC_NVMET_DATA_BUF_SIZE);
17019 else
17020 bf_set(lpfc_rq_context_buf_size,
17021 &rq_create->u.request.context,
17022 LPFC_DATA_BUF_SIZE);
17023 }
17024 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17025 cq->queue_id);
17026 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17027 drq->page_count);
17028 list_for_each_entry(dmabuf, &drq->page_list, list) {
17029 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17030 putPaddrLow(dmabuf->phys);
17031 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17032 putPaddrHigh(dmabuf->phys);
17033 }
17034 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17035 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17036 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17037
17038 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17039 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17040 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17041 if (shdr_status || shdr_add_status || rc) {
17042 status = -ENXIO;
17043 goto out;
17044 }
17045 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17046 if (drq->queue_id == 0xFFFF) {
17047 status = -ENXIO;
17048 goto out;
17049 }
17050 drq->type = LPFC_DRQ;
17051 drq->assoc_qid = cq->queue_id;
17052 drq->subtype = subtype;
17053 drq->host_index = 0;
17054 drq->hba_index = 0;
17055 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17056
17057
17058 list_add_tail(&hrq->list, &cq->child_list);
17059 list_add_tail(&drq->list, &cq->child_list);
17060
17061 out:
17062 mempool_free(mbox, phba->mbox_mem_pool);
17063 return status;
17064 }
17065
17066
17067
17068
17069
17070
17071
17072
17073
17074
17075
17076
17077
17078
17079
17080
17081
17082
17083
17084
17085
17086
17087
17088
17089
17090 int
17091 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
17092 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
17093 uint32_t subtype)
17094 {
17095 struct lpfc_queue *hrq, *drq, *cq;
17096 struct lpfc_mbx_rq_create_v2 *rq_create;
17097 struct lpfc_dmabuf *dmabuf;
17098 LPFC_MBOXQ_t *mbox;
17099 int rc, length, alloclen, status = 0;
17100 int cnt, idx, numrq, page_idx = 0;
17101 uint32_t shdr_status, shdr_add_status;
17102 union lpfc_sli4_cfg_shdr *shdr;
17103 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17104
17105 numrq = phba->cfg_nvmet_mrq;
17106
17107 if (!hrqp || !drqp || !cqp || !numrq)
17108 return -ENODEV;
17109 if (!phba->sli4_hba.pc_sli4_params.supported)
17110 hw_page_size = SLI4_PAGE_SIZE;
17111
17112 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17113 if (!mbox)
17114 return -ENOMEM;
17115
17116 length = sizeof(struct lpfc_mbx_rq_create_v2);
17117 length += ((2 * numrq * hrqp[0]->page_count) *
17118 sizeof(struct dma_address));
17119
17120 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17121 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
17122 LPFC_SLI4_MBX_NEMBED);
17123 if (alloclen < length) {
17124 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17125 "3099 Allocated DMA memory size (%d) is "
17126 "less than the requested DMA memory size "
17127 "(%d)\n", alloclen, length);
17128 status = -ENOMEM;
17129 goto out;
17130 }
17131
17132
17133
17134 rq_create = mbox->sge_array->addr[0];
17135 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
17136
17137 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
17138 cnt = 0;
17139
17140 for (idx = 0; idx < numrq; idx++) {
17141 hrq = hrqp[idx];
17142 drq = drqp[idx];
17143 cq = cqp[idx];
17144
17145
17146 if (!hrq || !drq || !cq) {
17147 status = -ENODEV;
17148 goto out;
17149 }
17150
17151 if (hrq->entry_count != drq->entry_count) {
17152 status = -EINVAL;
17153 goto out;
17154 }
17155
17156 if (idx == 0) {
17157 bf_set(lpfc_mbx_rq_create_num_pages,
17158 &rq_create->u.request,
17159 hrq->page_count);
17160 bf_set(lpfc_mbx_rq_create_rq_cnt,
17161 &rq_create->u.request, (numrq * 2));
17162 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
17163 1);
17164 bf_set(lpfc_rq_context_base_cq,
17165 &rq_create->u.request.context,
17166 cq->queue_id);
17167 bf_set(lpfc_rq_context_data_size,
17168 &rq_create->u.request.context,
17169 LPFC_NVMET_DATA_BUF_SIZE);
17170 bf_set(lpfc_rq_context_hdr_size,
17171 &rq_create->u.request.context,
17172 LPFC_HDR_BUF_SIZE);
17173 bf_set(lpfc_rq_context_rqe_count_1,
17174 &rq_create->u.request.context,
17175 hrq->entry_count);
17176 bf_set(lpfc_rq_context_rqe_size,
17177 &rq_create->u.request.context,
17178 LPFC_RQE_SIZE_8);
17179 bf_set(lpfc_rq_context_page_size,
17180 &rq_create->u.request.context,
17181 (PAGE_SIZE/SLI4_PAGE_SIZE));
17182 }
17183 rc = 0;
17184 list_for_each_entry(dmabuf, &hrq->page_list, list) {
17185 memset(dmabuf->virt, 0, hw_page_size);
17186 cnt = page_idx + dmabuf->buffer_tag;
17187 rq_create->u.request.page[cnt].addr_lo =
17188 putPaddrLow(dmabuf->phys);
17189 rq_create->u.request.page[cnt].addr_hi =
17190 putPaddrHigh(dmabuf->phys);
17191 rc++;
17192 }
17193 page_idx += rc;
17194
17195 rc = 0;
17196 list_for_each_entry(dmabuf, &drq->page_list, list) {
17197 memset(dmabuf->virt, 0, hw_page_size);
17198 cnt = page_idx + dmabuf->buffer_tag;
17199 rq_create->u.request.page[cnt].addr_lo =
17200 putPaddrLow(dmabuf->phys);
17201 rq_create->u.request.page[cnt].addr_hi =
17202 putPaddrHigh(dmabuf->phys);
17203 rc++;
17204 }
17205 page_idx += rc;
17206
17207 hrq->db_format = LPFC_DB_RING_FORMAT;
17208 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17209 hrq->type = LPFC_HRQ;
17210 hrq->assoc_qid = cq->queue_id;
17211 hrq->subtype = subtype;
17212 hrq->host_index = 0;
17213 hrq->hba_index = 0;
17214 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17215
17216 drq->db_format = LPFC_DB_RING_FORMAT;
17217 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17218 drq->type = LPFC_DRQ;
17219 drq->assoc_qid = cq->queue_id;
17220 drq->subtype = subtype;
17221 drq->host_index = 0;
17222 drq->hba_index = 0;
17223 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17224
17225 list_add_tail(&hrq->list, &cq->child_list);
17226 list_add_tail(&drq->list, &cq->child_list);
17227 }
17228
17229 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17230
17231 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17232 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17233 if (shdr_status || shdr_add_status || rc) {
17234 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17235 "3120 RQ_CREATE mailbox failed with "
17236 "status x%x add_status x%x, mbx status x%x\n",
17237 shdr_status, shdr_add_status, rc);
17238 status = -ENXIO;
17239 goto out;
17240 }
17241 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17242 if (rc == 0xFFFF) {
17243 status = -ENXIO;
17244 goto out;
17245 }
17246
17247
17248 for (idx = 0; idx < numrq; idx++) {
17249 hrq = hrqp[idx];
17250 hrq->queue_id = rc + (2 * idx);
17251 drq = drqp[idx];
17252 drq->queue_id = rc + (2 * idx) + 1;
17253 }
17254
17255 out:
17256 lpfc_sli4_mbox_cmd_free(phba, mbox);
17257 return status;
17258 }
17259
17260
17261
17262
17263
17264
17265
17266
17267
17268
17269
17270
17271
17272
17273 int
17274 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
17275 {
17276 LPFC_MBOXQ_t *mbox;
17277 int rc, length, status = 0;
17278 uint32_t shdr_status, shdr_add_status;
17279 union lpfc_sli4_cfg_shdr *shdr;
17280
17281
17282 if (!eq)
17283 return -ENODEV;
17284
17285 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
17286 if (!mbox)
17287 return -ENOMEM;
17288 length = (sizeof(struct lpfc_mbx_eq_destroy) -
17289 sizeof(struct lpfc_sli4_cfg_mhdr));
17290 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17291 LPFC_MBOX_OPCODE_EQ_DESTROY,
17292 length, LPFC_SLI4_MBX_EMBED);
17293 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
17294 eq->queue_id);
17295 mbox->vport = eq->phba->pport;
17296 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17297
17298 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
17299
17300 shdr = (union lpfc_sli4_cfg_shdr *)
17301 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
17302 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17303 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17304 if (shdr_status || shdr_add_status || rc) {
17305 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17306 "2505 EQ_DESTROY mailbox failed with "
17307 "status x%x add_status x%x, mbx status x%x\n",
17308 shdr_status, shdr_add_status, rc);
17309 status = -ENXIO;
17310 }
17311
17312
17313 list_del_init(&eq->list);
17314 mempool_free(mbox, eq->phba->mbox_mem_pool);
17315 return status;
17316 }
17317
17318
17319
17320
17321
17322
17323
17324
17325
17326
17327
17328
17329
17330
17331 int
17332 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
17333 {
17334 LPFC_MBOXQ_t *mbox;
17335 int rc, length, status = 0;
17336 uint32_t shdr_status, shdr_add_status;
17337 union lpfc_sli4_cfg_shdr *shdr;
17338
17339
17340 if (!cq)
17341 return -ENODEV;
17342 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
17343 if (!mbox)
17344 return -ENOMEM;
17345 length = (sizeof(struct lpfc_mbx_cq_destroy) -
17346 sizeof(struct lpfc_sli4_cfg_mhdr));
17347 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17348 LPFC_MBOX_OPCODE_CQ_DESTROY,
17349 length, LPFC_SLI4_MBX_EMBED);
17350 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
17351 cq->queue_id);
17352 mbox->vport = cq->phba->pport;
17353 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17354 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
17355
17356 shdr = (union lpfc_sli4_cfg_shdr *)
17357 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
17358 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17359 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17360 if (shdr_status || shdr_add_status || rc) {
17361 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17362 "2506 CQ_DESTROY mailbox failed with "
17363 "status x%x add_status x%x, mbx status x%x\n",
17364 shdr_status, shdr_add_status, rc);
17365 status = -ENXIO;
17366 }
17367
17368 list_del_init(&cq->list);
17369 mempool_free(mbox, cq->phba->mbox_mem_pool);
17370 return status;
17371 }
17372
17373
17374
17375
17376
17377
17378
17379
17380
17381
17382
17383
17384
17385
17386 int
17387 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
17388 {
17389 LPFC_MBOXQ_t *mbox;
17390 int rc, length, status = 0;
17391 uint32_t shdr_status, shdr_add_status;
17392 union lpfc_sli4_cfg_shdr *shdr;
17393
17394
17395 if (!mq)
17396 return -ENODEV;
17397 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
17398 if (!mbox)
17399 return -ENOMEM;
17400 length = (sizeof(struct lpfc_mbx_mq_destroy) -
17401 sizeof(struct lpfc_sli4_cfg_mhdr));
17402 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17403 LPFC_MBOX_OPCODE_MQ_DESTROY,
17404 length, LPFC_SLI4_MBX_EMBED);
17405 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
17406 mq->queue_id);
17407 mbox->vport = mq->phba->pport;
17408 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17409 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
17410
17411 shdr = (union lpfc_sli4_cfg_shdr *)
17412 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17413 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17414 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17415 if (shdr_status || shdr_add_status || rc) {
17416 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17417 "2507 MQ_DESTROY mailbox failed with "
17418 "status x%x add_status x%x, mbx status x%x\n",
17419 shdr_status, shdr_add_status, rc);
17420 status = -ENXIO;
17421 }
17422
17423 list_del_init(&mq->list);
17424 mempool_free(mbox, mq->phba->mbox_mem_pool);
17425 return status;
17426 }
17427
17428
17429
17430
17431
17432
17433
17434
17435
17436
17437
17438
17439
17440
17441 int
17442 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17443 {
17444 LPFC_MBOXQ_t *mbox;
17445 int rc, length, status = 0;
17446 uint32_t shdr_status, shdr_add_status;
17447 union lpfc_sli4_cfg_shdr *shdr;
17448
17449
17450 if (!wq)
17451 return -ENODEV;
17452 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17453 if (!mbox)
17454 return -ENOMEM;
17455 length = (sizeof(struct lpfc_mbx_wq_destroy) -
17456 sizeof(struct lpfc_sli4_cfg_mhdr));
17457 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17458 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17459 length, LPFC_SLI4_MBX_EMBED);
17460 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17461 wq->queue_id);
17462 mbox->vport = wq->phba->pport;
17463 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17464 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17465 shdr = (union lpfc_sli4_cfg_shdr *)
17466 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17467 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17468 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17469 if (shdr_status || shdr_add_status || rc) {
17470 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17471 "2508 WQ_DESTROY mailbox failed with "
17472 "status x%x add_status x%x, mbx status x%x\n",
17473 shdr_status, shdr_add_status, rc);
17474 status = -ENXIO;
17475 }
17476
17477 list_del_init(&wq->list);
17478 kfree(wq->pring);
17479 wq->pring = NULL;
17480 mempool_free(mbox, wq->phba->mbox_mem_pool);
17481 return status;
17482 }
17483
17484
17485
17486
17487
17488
17489
17490
17491
17492
17493
17494
17495
17496
17497
17498 int
17499 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17500 struct lpfc_queue *drq)
17501 {
17502 LPFC_MBOXQ_t *mbox;
17503 int rc, length, status = 0;
17504 uint32_t shdr_status, shdr_add_status;
17505 union lpfc_sli4_cfg_shdr *shdr;
17506
17507
17508 if (!hrq || !drq)
17509 return -ENODEV;
17510 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17511 if (!mbox)
17512 return -ENOMEM;
17513 length = (sizeof(struct lpfc_mbx_rq_destroy) -
17514 sizeof(struct lpfc_sli4_cfg_mhdr));
17515 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17516 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17517 length, LPFC_SLI4_MBX_EMBED);
17518 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17519 hrq->queue_id);
17520 mbox->vport = hrq->phba->pport;
17521 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17522 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17523
17524 shdr = (union lpfc_sli4_cfg_shdr *)
17525 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17526 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17527 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17528 if (shdr_status || shdr_add_status || rc) {
17529 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17530 "2509 RQ_DESTROY mailbox failed with "
17531 "status x%x add_status x%x, mbx status x%x\n",
17532 shdr_status, shdr_add_status, rc);
17533 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17534 return -ENXIO;
17535 }
17536 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17537 drq->queue_id);
17538 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17539 shdr = (union lpfc_sli4_cfg_shdr *)
17540 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17541 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17542 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17543 if (shdr_status || shdr_add_status || rc) {
17544 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17545 "2510 RQ_DESTROY mailbox failed with "
17546 "status x%x add_status x%x, mbx status x%x\n",
17547 shdr_status, shdr_add_status, rc);
17548 status = -ENXIO;
17549 }
17550 list_del_init(&hrq->list);
17551 list_del_init(&drq->list);
17552 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17553 return status;
17554 }
17555
17556
17557
17558
17559
17560
17561
17562
17563
17564
17565
17566
17567
17568
17569
17570
17571
17572
17573
17574
17575
17576
17577
17578 int
17579 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17580 dma_addr_t pdma_phys_addr0,
17581 dma_addr_t pdma_phys_addr1,
17582 uint16_t xritag)
17583 {
17584 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17585 LPFC_MBOXQ_t *mbox;
17586 int rc;
17587 uint32_t shdr_status, shdr_add_status;
17588 uint32_t mbox_tmo;
17589 union lpfc_sli4_cfg_shdr *shdr;
17590
17591 if (xritag == NO_XRI) {
17592 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17593 "0364 Invalid param:\n");
17594 return -EINVAL;
17595 }
17596
17597 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17598 if (!mbox)
17599 return -ENOMEM;
17600
17601 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17602 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17603 sizeof(struct lpfc_mbx_post_sgl_pages) -
17604 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17605
17606 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17607 &mbox->u.mqe.un.post_sgl_pages;
17608 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17609 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17610
17611 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17612 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17613 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17614 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17615
17616 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17617 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17618 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17619 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17620 if (!phba->sli4_hba.intr_enable)
17621 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17622 else {
17623 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17624 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17625 }
17626
17627 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17628 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17629 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17630 if (!phba->sli4_hba.intr_enable)
17631 mempool_free(mbox, phba->mbox_mem_pool);
17632 else if (rc != MBX_TIMEOUT)
17633 mempool_free(mbox, phba->mbox_mem_pool);
17634 if (shdr_status || shdr_add_status || rc) {
17635 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17636 "2511 POST_SGL mailbox failed with "
17637 "status x%x add_status x%x, mbx status x%x\n",
17638 shdr_status, shdr_add_status, rc);
17639 }
17640 return 0;
17641 }
17642
17643
17644
17645
17646
17647
17648
17649
17650
17651
17652
17653
17654
17655
17656 static uint16_t
17657 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17658 {
17659 unsigned long xri;
17660
17661
17662
17663
17664
17665 spin_lock_irq(&phba->hbalock);
17666 xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
17667 phba->sli4_hba.max_cfg_param.max_xri);
17668 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
17669 spin_unlock_irq(&phba->hbalock);
17670 return NO_XRI;
17671 } else {
17672 set_bit(xri, phba->sli4_hba.xri_bmask);
17673 phba->sli4_hba.max_cfg_param.xri_used++;
17674 }
17675 spin_unlock_irq(&phba->hbalock);
17676 return xri;
17677 }
17678
17679
17680
17681
17682
17683
17684
17685
17686
17687 static void
17688 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17689 {
17690 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
17691 phba->sli4_hba.max_cfg_param.xri_used--;
17692 }
17693 }
17694
17695
17696
17697
17698
17699
17700
17701
17702
17703 void
17704 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17705 {
17706 spin_lock_irq(&phba->hbalock);
17707 __lpfc_sli4_free_xri(phba, xri);
17708 spin_unlock_irq(&phba->hbalock);
17709 }
17710
17711
17712
17713
17714
17715
17716
17717
17718
17719
17720
17721 uint16_t
17722 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
17723 {
17724 uint16_t xri_index;
17725
17726 xri_index = lpfc_sli4_alloc_xri(phba);
17727 if (xri_index == NO_XRI)
17728 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17729 "2004 Failed to allocate XRI.last XRITAG is %d"
17730 " Max XRI is %d, Used XRI is %d\n",
17731 xri_index,
17732 phba->sli4_hba.max_cfg_param.max_xri,
17733 phba->sli4_hba.max_cfg_param.xri_used);
17734 return xri_index;
17735 }
17736
17737
17738
17739
17740
17741
17742
17743
17744
17745
17746
17747
17748 static int
17749 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
17750 struct list_head *post_sgl_list,
17751 int post_cnt)
17752 {
17753 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
17754 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17755 struct sgl_page_pairs *sgl_pg_pairs;
17756 void *viraddr;
17757 LPFC_MBOXQ_t *mbox;
17758 uint32_t reqlen, alloclen, pg_pairs;
17759 uint32_t mbox_tmo;
17760 uint16_t xritag_start = 0;
17761 int rc = 0;
17762 uint32_t shdr_status, shdr_add_status;
17763 union lpfc_sli4_cfg_shdr *shdr;
17764
17765 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
17766 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17767 if (reqlen > SLI4_PAGE_SIZE) {
17768 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17769 "2559 Block sgl registration required DMA "
17770 "size (%d) great than a page\n", reqlen);
17771 return -ENOMEM;
17772 }
17773
17774 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17775 if (!mbox)
17776 return -ENOMEM;
17777
17778
17779 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17780 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
17781 LPFC_SLI4_MBX_NEMBED);
17782
17783 if (alloclen < reqlen) {
17784 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17785 "0285 Allocated DMA memory size (%d) is "
17786 "less than the requested DMA memory "
17787 "size (%d)\n", alloclen, reqlen);
17788 lpfc_sli4_mbox_cmd_free(phba, mbox);
17789 return -ENOMEM;
17790 }
17791
17792 viraddr = mbox->sge_array->addr[0];
17793 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17794 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17795
17796 pg_pairs = 0;
17797 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
17798
17799 sgl_pg_pairs->sgl_pg0_addr_lo =
17800 cpu_to_le32(putPaddrLow(sglq_entry->phys));
17801 sgl_pg_pairs->sgl_pg0_addr_hi =
17802 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
17803 sgl_pg_pairs->sgl_pg1_addr_lo =
17804 cpu_to_le32(putPaddrLow(0));
17805 sgl_pg_pairs->sgl_pg1_addr_hi =
17806 cpu_to_le32(putPaddrHigh(0));
17807
17808
17809 if (pg_pairs == 0)
17810 xritag_start = sglq_entry->sli4_xritag;
17811 sgl_pg_pairs++;
17812 pg_pairs++;
17813 }
17814
17815
17816 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17817 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
17818 sgl->word0 = cpu_to_le32(sgl->word0);
17819
17820 if (!phba->sli4_hba.intr_enable)
17821 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17822 else {
17823 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17824 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17825 }
17826 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
17827 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17828 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17829 if (!phba->sli4_hba.intr_enable)
17830 lpfc_sli4_mbox_cmd_free(phba, mbox);
17831 else if (rc != MBX_TIMEOUT)
17832 lpfc_sli4_mbox_cmd_free(phba, mbox);
17833 if (shdr_status || shdr_add_status || rc) {
17834 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17835 "2513 POST_SGL_BLOCK mailbox command failed "
17836 "status x%x add_status x%x mbx status x%x\n",
17837 shdr_status, shdr_add_status, rc);
17838 rc = -ENXIO;
17839 }
17840 return rc;
17841 }
17842
17843
17844
17845
17846
17847
17848
17849
17850
17851
17852
17853
17854 static int
17855 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
17856 int count)
17857 {
17858 struct lpfc_io_buf *lpfc_ncmd;
17859 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17860 struct sgl_page_pairs *sgl_pg_pairs;
17861 void *viraddr;
17862 LPFC_MBOXQ_t *mbox;
17863 uint32_t reqlen, alloclen, pg_pairs;
17864 uint32_t mbox_tmo;
17865 uint16_t xritag_start = 0;
17866 int rc = 0;
17867 uint32_t shdr_status, shdr_add_status;
17868 dma_addr_t pdma_phys_bpl1;
17869 union lpfc_sli4_cfg_shdr *shdr;
17870
17871
17872 reqlen = count * sizeof(struct sgl_page_pairs) +
17873 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17874 if (reqlen > SLI4_PAGE_SIZE) {
17875 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
17876 "6118 Block sgl registration required DMA "
17877 "size (%d) great than a page\n", reqlen);
17878 return -ENOMEM;
17879 }
17880 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17881 if (!mbox) {
17882 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17883 "6119 Failed to allocate mbox cmd memory\n");
17884 return -ENOMEM;
17885 }
17886
17887
17888 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17889 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17890 reqlen, LPFC_SLI4_MBX_NEMBED);
17891
17892 if (alloclen < reqlen) {
17893 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17894 "6120 Allocated DMA memory size (%d) is "
17895 "less than the requested DMA memory "
17896 "size (%d)\n", alloclen, reqlen);
17897 lpfc_sli4_mbox_cmd_free(phba, mbox);
17898 return -ENOMEM;
17899 }
17900
17901
17902 viraddr = mbox->sge_array->addr[0];
17903
17904
17905 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17906 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17907
17908 pg_pairs = 0;
17909 list_for_each_entry(lpfc_ncmd, nblist, list) {
17910
17911 sgl_pg_pairs->sgl_pg0_addr_lo =
17912 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
17913 sgl_pg_pairs->sgl_pg0_addr_hi =
17914 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
17915 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
17916 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17917 SGL_PAGE_SIZE;
17918 else
17919 pdma_phys_bpl1 = 0;
17920 sgl_pg_pairs->sgl_pg1_addr_lo =
17921 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
17922 sgl_pg_pairs->sgl_pg1_addr_hi =
17923 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
17924
17925 if (pg_pairs == 0)
17926 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
17927 sgl_pg_pairs++;
17928 pg_pairs++;
17929 }
17930 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17931 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
17932
17933 sgl->word0 = cpu_to_le32(sgl->word0);
17934
17935 if (!phba->sli4_hba.intr_enable) {
17936 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17937 } else {
17938 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17939 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17940 }
17941 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
17942 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17943 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17944 if (!phba->sli4_hba.intr_enable)
17945 lpfc_sli4_mbox_cmd_free(phba, mbox);
17946 else if (rc != MBX_TIMEOUT)
17947 lpfc_sli4_mbox_cmd_free(phba, mbox);
17948 if (shdr_status || shdr_add_status || rc) {
17949 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17950 "6125 POST_SGL_BLOCK mailbox command failed "
17951 "status x%x add_status x%x mbx status x%x\n",
17952 shdr_status, shdr_add_status, rc);
17953 rc = -ENXIO;
17954 }
17955 return rc;
17956 }
17957
17958
17959
17960
17961
17962
17963
17964
17965
17966
17967
17968
17969
17970
17971
17972
17973 int
17974 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17975 struct list_head *post_nblist, int sb_count)
17976 {
17977 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
17978 int status, sgl_size;
17979 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17980 dma_addr_t pdma_phys_sgl1;
17981 int last_xritag = NO_XRI;
17982 int cur_xritag;
17983 LIST_HEAD(prep_nblist);
17984 LIST_HEAD(blck_nblist);
17985 LIST_HEAD(nvme_nblist);
17986
17987
17988 if (sb_count <= 0)
17989 return -EINVAL;
17990
17991 sgl_size = phba->cfg_sg_dma_buf_size;
17992 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17993 list_del_init(&lpfc_ncmd->list);
17994 block_cnt++;
17995 if ((last_xritag != NO_XRI) &&
17996 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17997
17998 list_splice_init(&prep_nblist, &blck_nblist);
17999 post_cnt = block_cnt - 1;
18000
18001 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18002 block_cnt = 1;
18003 } else {
18004
18005 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18006
18007 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
18008 list_splice_init(&prep_nblist, &blck_nblist);
18009 post_cnt = block_cnt;
18010 block_cnt = 0;
18011 }
18012 }
18013 num_posting++;
18014 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18015
18016
18017 if (num_posting == sb_count) {
18018 if (post_cnt == 0) {
18019
18020 list_splice_init(&prep_nblist, &blck_nblist);
18021 post_cnt = block_cnt;
18022 } else if (block_cnt == 1) {
18023
18024 if (sgl_size > SGL_PAGE_SIZE)
18025 pdma_phys_sgl1 =
18026 lpfc_ncmd->dma_phys_sgl +
18027 SGL_PAGE_SIZE;
18028 else
18029 pdma_phys_sgl1 = 0;
18030 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18031 status = lpfc_sli4_post_sgl(
18032 phba, lpfc_ncmd->dma_phys_sgl,
18033 pdma_phys_sgl1, cur_xritag);
18034 if (status) {
18035
18036 lpfc_ncmd->flags |=
18037 LPFC_SBUF_NOT_POSTED;
18038 } else {
18039
18040 lpfc_ncmd->flags &=
18041 ~LPFC_SBUF_NOT_POSTED;
18042 lpfc_ncmd->status = IOSTAT_SUCCESS;
18043 num_posted++;
18044 }
18045
18046 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18047 }
18048 }
18049
18050
18051 if (post_cnt == 0)
18052 continue;
18053
18054
18055 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
18056 post_cnt);
18057
18058
18059 if (block_cnt == 0)
18060 last_xritag = NO_XRI;
18061
18062
18063 post_cnt = 0;
18064
18065
18066 while (!list_empty(&blck_nblist)) {
18067 list_remove_head(&blck_nblist, lpfc_ncmd,
18068 struct lpfc_io_buf, list);
18069 if (status) {
18070
18071 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
18072 } else {
18073
18074 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
18075 lpfc_ncmd->status = IOSTAT_SUCCESS;
18076 num_posted++;
18077 }
18078 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18079 }
18080 }
18081
18082 lpfc_io_buf_replenish(phba, &nvme_nblist);
18083
18084 return num_posted;
18085 }
18086
18087
18088
18089
18090
18091
18092
18093
18094
18095
18096
18097 static int
18098 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
18099 {
18100
18101 struct fc_vft_header *fc_vft_hdr;
18102 uint32_t *header = (uint32_t *) fc_hdr;
18103
18104 #define FC_RCTL_MDS_DIAGS 0xF4
18105
18106 switch (fc_hdr->fh_r_ctl) {
18107 case FC_RCTL_DD_UNCAT:
18108 case FC_RCTL_DD_SOL_DATA:
18109 case FC_RCTL_DD_UNSOL_CTL:
18110 case FC_RCTL_DD_SOL_CTL:
18111 case FC_RCTL_DD_UNSOL_DATA:
18112 case FC_RCTL_DD_DATA_DESC:
18113 case FC_RCTL_DD_UNSOL_CMD:
18114 case FC_RCTL_DD_CMD_STATUS:
18115 case FC_RCTL_ELS_REQ:
18116 case FC_RCTL_ELS_REP:
18117 case FC_RCTL_ELS4_REQ:
18118 case FC_RCTL_ELS4_REP:
18119 case FC_RCTL_BA_ABTS:
18120 case FC_RCTL_BA_RMC:
18121 case FC_RCTL_BA_ACC:
18122 case FC_RCTL_BA_RJT:
18123 case FC_RCTL_BA_PRMT:
18124 case FC_RCTL_ACK_1:
18125 case FC_RCTL_ACK_0:
18126 case FC_RCTL_P_RJT:
18127 case FC_RCTL_F_RJT:
18128 case FC_RCTL_P_BSY:
18129 case FC_RCTL_F_BSY:
18130 case FC_RCTL_F_BSYL:
18131 case FC_RCTL_LCR:
18132 case FC_RCTL_MDS_DIAGS:
18133 case FC_RCTL_END:
18134 break;
18135 case FC_RCTL_VFTH:
18136 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18137 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
18138 return lpfc_fc_frame_check(phba, fc_hdr);
18139 case FC_RCTL_BA_NOP:
18140 default:
18141 goto drop;
18142 }
18143
18144 switch (fc_hdr->fh_type) {
18145 case FC_TYPE_BLS:
18146 case FC_TYPE_ELS:
18147 case FC_TYPE_FCP:
18148 case FC_TYPE_CT:
18149 case FC_TYPE_NVME:
18150 break;
18151 case FC_TYPE_IP:
18152 case FC_TYPE_ILS:
18153 default:
18154 goto drop;
18155 }
18156
18157 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
18158 "2538 Received frame rctl:x%x, type:x%x, "
18159 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
18160 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
18161 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
18162 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
18163 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
18164 be32_to_cpu(header[6]));
18165 return 0;
18166 drop:
18167 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
18168 "2539 Dropped frame rctl:x%x type:x%x\n",
18169 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18170 return 1;
18171 }
18172
18173
18174
18175
18176
18177
18178
18179
18180
18181 static uint32_t
18182 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
18183 {
18184 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18185
18186 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
18187 return 0;
18188 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
18189 }
18190
18191
18192
18193
18194
18195
18196
18197
18198
18199
18200
18201
18202
18203
18204 static struct lpfc_vport *
18205 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
18206 uint16_t fcfi, uint32_t did)
18207 {
18208 struct lpfc_vport **vports;
18209 struct lpfc_vport *vport = NULL;
18210 int i;
18211
18212 if (did == Fabric_DID)
18213 return phba->pport;
18214 if ((phba->pport->fc_flag & FC_PT2PT) &&
18215 !(phba->link_state == LPFC_HBA_READY))
18216 return phba->pport;
18217
18218 vports = lpfc_create_vport_work_array(phba);
18219 if (vports != NULL) {
18220 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
18221 if (phba->fcf.fcfi == fcfi &&
18222 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
18223 vports[i]->fc_myDID == did) {
18224 vport = vports[i];
18225 break;
18226 }
18227 }
18228 }
18229 lpfc_destroy_vport_work_array(phba, vports);
18230 return vport;
18231 }
18232
18233
18234
18235
18236
18237
18238
18239
18240
18241
18242
18243 static void
18244 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
18245 {
18246 struct lpfc_dmabuf *h_buf;
18247 struct hbq_dmabuf *dmabuf = NULL;
18248
18249
18250 h_buf = list_get_first(&vport->rcv_buffer_list,
18251 struct lpfc_dmabuf, list);
18252 if (!h_buf)
18253 return;
18254 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18255 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
18256 }
18257
18258
18259
18260
18261
18262
18263
18264
18265
18266 void
18267 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
18268 {
18269 struct lpfc_dmabuf *h_buf, *hnext;
18270 struct lpfc_dmabuf *d_buf, *dnext;
18271 struct hbq_dmabuf *dmabuf = NULL;
18272
18273
18274 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18275 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18276 list_del_init(&dmabuf->hbuf.list);
18277 list_for_each_entry_safe(d_buf, dnext,
18278 &dmabuf->dbuf.list, list) {
18279 list_del_init(&d_buf->list);
18280 lpfc_in_buf_free(vport->phba, d_buf);
18281 }
18282 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18283 }
18284 }
18285
18286
18287
18288
18289
18290
18291
18292
18293
18294
18295
18296
18297
18298 void
18299 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
18300 {
18301 struct lpfc_dmabuf *h_buf, *hnext;
18302 struct lpfc_dmabuf *d_buf, *dnext;
18303 struct hbq_dmabuf *dmabuf = NULL;
18304 unsigned long timeout;
18305 int abort_count = 0;
18306
18307 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18308 vport->rcv_buffer_time_stamp);
18309 if (list_empty(&vport->rcv_buffer_list) ||
18310 time_before(jiffies, timeout))
18311 return;
18312
18313 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18314 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18315 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18316 dmabuf->time_stamp);
18317 if (time_before(jiffies, timeout))
18318 break;
18319 abort_count++;
18320 list_del_init(&dmabuf->hbuf.list);
18321 list_for_each_entry_safe(d_buf, dnext,
18322 &dmabuf->dbuf.list, list) {
18323 list_del_init(&d_buf->list);
18324 lpfc_in_buf_free(vport->phba, d_buf);
18325 }
18326 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18327 }
18328 if (abort_count)
18329 lpfc_update_rcv_time_stamp(vport);
18330 }
18331
18332
18333
18334
18335
18336
18337
18338
18339
18340
18341
18342
18343
18344
18345 static struct hbq_dmabuf *
18346 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18347 {
18348 struct fc_frame_header *new_hdr;
18349 struct fc_frame_header *temp_hdr;
18350 struct lpfc_dmabuf *d_buf;
18351 struct lpfc_dmabuf *h_buf;
18352 struct hbq_dmabuf *seq_dmabuf = NULL;
18353 struct hbq_dmabuf *temp_dmabuf = NULL;
18354 uint8_t found = 0;
18355
18356 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18357 dmabuf->time_stamp = jiffies;
18358 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18359
18360
18361 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18362 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18363 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18364 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18365 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18366 continue;
18367
18368 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18369 break;
18370 }
18371 if (!seq_dmabuf) {
18372
18373
18374
18375
18376 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18377 lpfc_update_rcv_time_stamp(vport);
18378 return dmabuf;
18379 }
18380 temp_hdr = seq_dmabuf->hbuf.virt;
18381 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
18382 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18383 list_del_init(&seq_dmabuf->hbuf.list);
18384 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18385 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18386 lpfc_update_rcv_time_stamp(vport);
18387 return dmabuf;
18388 }
18389
18390 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
18391 seq_dmabuf->time_stamp = jiffies;
18392 lpfc_update_rcv_time_stamp(vport);
18393 if (list_empty(&seq_dmabuf->dbuf.list)) {
18394 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18395 return seq_dmabuf;
18396 }
18397
18398 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
18399 while (!found) {
18400 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18401 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
18402
18403
18404
18405
18406 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
18407 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18408 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
18409 found = 1;
18410 break;
18411 }
18412
18413 if (&d_buf->list == &seq_dmabuf->dbuf.list)
18414 break;
18415 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18416 }
18417
18418 if (found)
18419 return seq_dmabuf;
18420 return NULL;
18421 }
18422
18423
18424
18425
18426
18427
18428
18429
18430
18431
18432
18433
18434
18435
18436
18437
18438
18439 static bool
18440 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18441 struct hbq_dmabuf *dmabuf)
18442 {
18443 struct fc_frame_header *new_hdr;
18444 struct fc_frame_header *temp_hdr;
18445 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18446 struct hbq_dmabuf *seq_dmabuf = NULL;
18447
18448
18449 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18450 INIT_LIST_HEAD(&dmabuf->hbuf.list);
18451 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18452 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18453 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18454 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18455 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18456 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18457 continue;
18458
18459 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18460 break;
18461 }
18462
18463
18464 if (seq_dmabuf) {
18465 list_for_each_entry_safe(d_buf, n_buf,
18466 &seq_dmabuf->dbuf.list, list) {
18467 list_del_init(&d_buf->list);
18468 lpfc_in_buf_free(vport->phba, d_buf);
18469 }
18470 return true;
18471 }
18472 return false;
18473 }
18474
18475
18476
18477
18478
18479
18480
18481
18482
18483
18484
18485
18486
18487
18488
18489
18490
18491 static bool
18492 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18493 {
18494 struct lpfc_hba *phba = vport->phba;
18495 int handled;
18496
18497
18498 if (phba->sli_rev < LPFC_SLI_REV4)
18499 return false;
18500
18501
18502 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18503 if (handled)
18504 return true;
18505
18506 return false;
18507 }
18508
18509
18510
18511
18512
18513
18514
18515
18516
18517
18518
18519 static void
18520 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18521 struct lpfc_iocbq *cmd_iocbq,
18522 struct lpfc_iocbq *rsp_iocbq)
18523 {
18524 if (cmd_iocbq) {
18525 lpfc_nlp_put(cmd_iocbq->ndlp);
18526 lpfc_sli_release_iocbq(phba, cmd_iocbq);
18527 }
18528
18529
18530 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18531 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18532 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
18533 get_job_ulpstatus(phba, rsp_iocbq),
18534 get_job_word4(phba, rsp_iocbq));
18535 }
18536
18537
18538
18539
18540
18541
18542
18543
18544
18545 uint16_t
18546 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18547 uint16_t xri)
18548 {
18549 uint16_t i;
18550
18551 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18552 if (xri == phba->sli4_hba.xri_ids[i])
18553 return i;
18554 }
18555 return NO_XRI;
18556 }
18557
18558
18559
18560
18561
18562
18563
18564
18565
18566
18567 void
18568 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18569 struct fc_frame_header *fc_hdr, bool aborted)
18570 {
18571 struct lpfc_hba *phba = vport->phba;
18572 struct lpfc_iocbq *ctiocb = NULL;
18573 struct lpfc_nodelist *ndlp;
18574 uint16_t oxid, rxid, xri, lxri;
18575 uint32_t sid, fctl;
18576 union lpfc_wqe128 *icmd;
18577 int rc;
18578
18579 if (!lpfc_is_link_up(phba))
18580 return;
18581
18582 sid = sli4_sid_from_fc_hdr(fc_hdr);
18583 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18584 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18585
18586 ndlp = lpfc_findnode_did(vport, sid);
18587 if (!ndlp) {
18588 ndlp = lpfc_nlp_init(vport, sid);
18589 if (!ndlp) {
18590 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18591 "1268 Failed to allocate ndlp for "
18592 "oxid:x%x SID:x%x\n", oxid, sid);
18593 return;
18594 }
18595
18596 lpfc_enqueue_node(vport, ndlp);
18597 }
18598
18599
18600 ctiocb = lpfc_sli_get_iocbq(phba);
18601 if (!ctiocb)
18602 return;
18603
18604 icmd = &ctiocb->wqe;
18605
18606
18607 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18608
18609 ctiocb->ndlp = lpfc_nlp_get(ndlp);
18610 if (!ctiocb->ndlp) {
18611 lpfc_sli_release_iocbq(phba, ctiocb);
18612 return;
18613 }
18614
18615 ctiocb->vport = phba->pport;
18616 ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18617 ctiocb->sli4_lxritag = NO_XRI;
18618 ctiocb->sli4_xritag = NO_XRI;
18619 ctiocb->abort_rctl = FC_RCTL_BA_ACC;
18620
18621 if (fctl & FC_FC_EX_CTX)
18622
18623
18624
18625 xri = oxid;
18626 else
18627 xri = rxid;
18628 lxri = lpfc_sli4_xri_inrange(phba, xri);
18629 if (lxri != NO_XRI)
18630 lpfc_set_rrq_active(phba, ndlp, lxri,
18631 (xri == oxid) ? rxid : oxid, 0);
18632
18633
18634
18635
18636
18637 if ((fctl & FC_FC_EX_CTX) &&
18638 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18639 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18640 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18641 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18642 FC_BA_RJT_INV_XID);
18643 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18644 FC_BA_RJT_UNABLE);
18645 }
18646
18647
18648
18649
18650
18651 if (aborted == false) {
18652 ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18653 bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18654 bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18655 FC_BA_RJT_INV_XID);
18656 bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18657 FC_BA_RJT_UNABLE);
18658 }
18659
18660 if (fctl & FC_FC_EX_CTX) {
18661
18662
18663
18664
18665 ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
18666 bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
18667 } else {
18668
18669
18670
18671
18672 ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
18673 }
18674
18675
18676 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
18677 bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
18678
18679
18680 bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
18681 ndlp->nlp_DID);
18682 bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
18683 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
18684 bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
18685
18686
18687 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
18688 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18689 ctiocb->abort_rctl, oxid, phba->link_state);
18690
18691 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
18692 if (rc == IOCB_ERROR) {
18693 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18694 "2925 Failed to issue CT ABTS RSP x%x on "
18695 "xri x%x, Data x%x\n",
18696 ctiocb->abort_rctl, oxid,
18697 phba->link_state);
18698 lpfc_nlp_put(ndlp);
18699 ctiocb->ndlp = NULL;
18700 lpfc_sli_release_iocbq(phba, ctiocb);
18701 }
18702 }
18703
18704
18705
18706
18707
18708
18709
18710
18711
18712
18713
18714
18715
18716
18717 static void
18718 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
18719 struct hbq_dmabuf *dmabuf)
18720 {
18721 struct lpfc_hba *phba = vport->phba;
18722 struct fc_frame_header fc_hdr;
18723 uint32_t fctl;
18724 bool aborted;
18725
18726
18727 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
18728 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18729
18730 if (fctl & FC_FC_EX_CTX) {
18731
18732 aborted = true;
18733 } else {
18734
18735 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18736 if (aborted == false)
18737 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18738 }
18739 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18740
18741 if (phba->nvmet_support) {
18742 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18743 return;
18744 }
18745
18746
18747 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
18748 }
18749
18750
18751
18752
18753
18754
18755
18756
18757
18758
18759
18760
18761
18762 static int
18763 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
18764 {
18765 struct fc_frame_header *hdr;
18766 struct lpfc_dmabuf *d_buf;
18767 struct hbq_dmabuf *seq_dmabuf;
18768 uint32_t fctl;
18769 int seq_count = 0;
18770
18771 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18772
18773 if (hdr->fh_seq_cnt != seq_count)
18774 return 0;
18775 fctl = (hdr->fh_f_ctl[0] << 16 |
18776 hdr->fh_f_ctl[1] << 8 |
18777 hdr->fh_f_ctl[2]);
18778
18779 if (fctl & FC_FC_END_SEQ)
18780 return 1;
18781 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
18782 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18783 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18784
18785 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
18786 return 0;
18787 fctl = (hdr->fh_f_ctl[0] << 16 |
18788 hdr->fh_f_ctl[1] << 8 |
18789 hdr->fh_f_ctl[2]);
18790
18791 if (fctl & FC_FC_END_SEQ)
18792 return 1;
18793 }
18794 return 0;
18795 }
18796
18797
18798
18799
18800
18801
18802
18803
18804
18805
18806
18807
18808
18809
18810 static struct lpfc_iocbq *
18811 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
18812 {
18813 struct hbq_dmabuf *hbq_buf;
18814 struct lpfc_dmabuf *d_buf, *n_buf;
18815 struct lpfc_iocbq *first_iocbq, *iocbq;
18816 struct fc_frame_header *fc_hdr;
18817 uint32_t sid;
18818 uint32_t len, tot_len;
18819
18820 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18821
18822 list_del_init(&seq_dmabuf->hbuf.list);
18823 lpfc_update_rcv_time_stamp(vport);
18824
18825 sid = sli4_sid_from_fc_hdr(fc_hdr);
18826 tot_len = 0;
18827
18828 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
18829 if (first_iocbq) {
18830
18831 first_iocbq->wcqe_cmpl.total_data_placed = 0;
18832 bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
18833 IOSTAT_SUCCESS);
18834 first_iocbq->vport = vport;
18835
18836
18837 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
18838 bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
18839 sli4_did_from_fc_hdr(fc_hdr));
18840 }
18841
18842 bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
18843 NO_XRI);
18844 bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
18845 be16_to_cpu(fc_hdr->fh_ox_id));
18846
18847
18848 tot_len = bf_get(lpfc_rcqe_length,
18849 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
18850
18851 first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
18852 first_iocbq->bpl_dmabuf = NULL;
18853
18854 first_iocbq->wcqe_cmpl.word3 = 1;
18855
18856 if (tot_len > LPFC_DATA_BUF_SIZE)
18857 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
18858 LPFC_DATA_BUF_SIZE;
18859 else
18860 first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
18861
18862 first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
18863 bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
18864 sid);
18865 }
18866 iocbq = first_iocbq;
18867
18868
18869
18870
18871 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
18872 if (!iocbq) {
18873 lpfc_in_buf_free(vport->phba, d_buf);
18874 continue;
18875 }
18876 if (!iocbq->bpl_dmabuf) {
18877 iocbq->bpl_dmabuf = d_buf;
18878 iocbq->wcqe_cmpl.word3++;
18879
18880 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18881 len = bf_get(lpfc_rcqe_length,
18882 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18883 iocbq->unsol_rcv_len = len;
18884 iocbq->wcqe_cmpl.total_data_placed += len;
18885 tot_len += len;
18886 } else {
18887 iocbq = lpfc_sli_get_iocbq(vport->phba);
18888 if (!iocbq) {
18889 if (first_iocbq) {
18890 bf_set(lpfc_wcqe_c_status,
18891 &first_iocbq->wcqe_cmpl,
18892 IOSTAT_SUCCESS);
18893 first_iocbq->wcqe_cmpl.parameter =
18894 IOERR_NO_RESOURCES;
18895 }
18896 lpfc_in_buf_free(vport->phba, d_buf);
18897 continue;
18898 }
18899
18900 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18901 len = bf_get(lpfc_rcqe_length,
18902 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18903 iocbq->cmd_dmabuf = d_buf;
18904 iocbq->bpl_dmabuf = NULL;
18905 iocbq->wcqe_cmpl.word3 = 1;
18906
18907 if (len > LPFC_DATA_BUF_SIZE)
18908 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
18909 LPFC_DATA_BUF_SIZE;
18910 else
18911 iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
18912 len;
18913
18914 tot_len += len;
18915 iocbq->wcqe_cmpl.total_data_placed = tot_len;
18916 bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
18917 sid);
18918 list_add_tail(&iocbq->list, &first_iocbq->list);
18919 }
18920 }
18921
18922 if (!first_iocbq)
18923 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
18924
18925 return first_iocbq;
18926 }
18927
18928 static void
18929 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
18930 struct hbq_dmabuf *seq_dmabuf)
18931 {
18932 struct fc_frame_header *fc_hdr;
18933 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
18934 struct lpfc_hba *phba = vport->phba;
18935
18936 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18937 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
18938 if (!iocbq) {
18939 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18940 "2707 Ring %d handler: Failed to allocate "
18941 "iocb Rctl x%x Type x%x received\n",
18942 LPFC_ELS_RING,
18943 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18944 return;
18945 }
18946 if (!lpfc_complete_unsol_iocb(phba,
18947 phba->sli4_hba.els_wq->pring,
18948 iocbq, fc_hdr->fh_r_ctl,
18949 fc_hdr->fh_type)) {
18950 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18951 "2540 Ring %d handler: unexpected Rctl "
18952 "x%x Type x%x received\n",
18953 LPFC_ELS_RING,
18954 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18955 lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
18956 }
18957
18958
18959 list_for_each_entry_safe(curr_iocb, next_iocb,
18960 &iocbq->list, list) {
18961 list_del_init(&curr_iocb->list);
18962 lpfc_sli_release_iocbq(phba, curr_iocb);
18963 }
18964 lpfc_sli_release_iocbq(phba, iocbq);
18965 }
18966
18967 static void
18968 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
18969 struct lpfc_iocbq *rspiocb)
18970 {
18971 struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
18972
18973 if (pcmd && pcmd->virt)
18974 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18975 kfree(pcmd);
18976 lpfc_sli_release_iocbq(phba, cmdiocb);
18977 lpfc_drain_txq(phba);
18978 }
18979
18980 static void
18981 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18982 struct hbq_dmabuf *dmabuf)
18983 {
18984 struct fc_frame_header *fc_hdr;
18985 struct lpfc_hba *phba = vport->phba;
18986 struct lpfc_iocbq *iocbq = NULL;
18987 union lpfc_wqe128 *pwqe;
18988 struct lpfc_dmabuf *pcmd = NULL;
18989 uint32_t frame_len;
18990 int rc;
18991 unsigned long iflags;
18992
18993 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18994 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18995
18996
18997 iocbq = lpfc_sli_get_iocbq(phba);
18998 if (!iocbq) {
18999
19000 spin_lock_irqsave(&phba->hbalock, iflags);
19001 list_add_tail(&dmabuf->cq_event.list,
19002 &phba->sli4_hba.sp_queue_event);
19003 phba->hba_flag |= HBA_SP_QUEUE_EVT;
19004 spin_unlock_irqrestore(&phba->hbalock, iflags);
19005 lpfc_worker_wake_up(phba);
19006 return;
19007 }
19008
19009
19010 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
19011 if (pcmd)
19012 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
19013 &pcmd->phys);
19014 if (!pcmd || !pcmd->virt)
19015 goto exit;
19016
19017 INIT_LIST_HEAD(&pcmd->list);
19018
19019
19020 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
19021
19022 iocbq->cmd_dmabuf = pcmd;
19023 iocbq->vport = vport;
19024 iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
19025 iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
19026 iocbq->num_bdes = 0;
19027
19028 pwqe = &iocbq->wqe;
19029
19030 pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
19031 pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
19032 pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
19033 pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
19034
19035 pwqe->send_frame.frame_len = frame_len;
19036 pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
19037 pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
19038 pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
19039 pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
19040 pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
19041 pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
19042
19043 pwqe->generic.wqe_com.word7 = 0;
19044 pwqe->generic.wqe_com.word10 = 0;
19045
19046 bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
19047 bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E);
19048 bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41);
19049 bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
19050 bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
19051 bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
19052 bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
19053 bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
19054 bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
19055 bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
19056 bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
19057 bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
19058 pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
19059
19060 iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
19061
19062 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
19063 if (rc == IOCB_ERROR)
19064 goto exit;
19065
19066 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19067 return;
19068
19069 exit:
19070 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
19071 "2023 Unable to process MDS loopback frame\n");
19072 if (pcmd && pcmd->virt)
19073 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19074 kfree(pcmd);
19075 if (iocbq)
19076 lpfc_sli_release_iocbq(phba, iocbq);
19077 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19078 }
19079
19080
19081
19082
19083
19084
19085
19086
19087
19088
19089
19090
19091
19092 void
19093 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
19094 struct hbq_dmabuf *dmabuf)
19095 {
19096 struct hbq_dmabuf *seq_dmabuf;
19097 struct fc_frame_header *fc_hdr;
19098 struct lpfc_vport *vport;
19099 uint32_t fcfi;
19100 uint32_t did;
19101
19102
19103 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19104
19105 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
19106 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
19107 vport = phba->pport;
19108
19109 if (!(phba->pport->load_flag & FC_UNLOADING))
19110 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19111 else
19112 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19113 return;
19114 }
19115
19116
19117 if (lpfc_fc_frame_check(phba, fc_hdr)) {
19118 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19119 return;
19120 }
19121
19122 if ((bf_get(lpfc_cqe_code,
19123 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
19124 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
19125 &dmabuf->cq_event.cqe.rcqe_cmpl);
19126 else
19127 fcfi = bf_get(lpfc_rcqe_fcf_id,
19128 &dmabuf->cq_event.cqe.rcqe_cmpl);
19129
19130 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
19131 vport = phba->pport;
19132 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
19133 "2023 MDS Loopback %d bytes\n",
19134 bf_get(lpfc_rcqe_length,
19135 &dmabuf->cq_event.cqe.rcqe_cmpl));
19136
19137 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19138 return;
19139 }
19140
19141
19142 did = sli4_did_from_fc_hdr(fc_hdr);
19143
19144 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
19145 if (!vport) {
19146
19147 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19148 return;
19149 }
19150
19151
19152 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
19153 (did != Fabric_DID)) {
19154
19155
19156
19157
19158
19159 if (!(vport->fc_flag & FC_PT2PT) ||
19160 (phba->link_state == LPFC_HBA_READY)) {
19161 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19162 return;
19163 }
19164 }
19165
19166
19167 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
19168 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
19169 return;
19170 }
19171
19172
19173 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
19174 if (!seq_dmabuf) {
19175
19176 lpfc_in_buf_free(phba, &dmabuf->dbuf);
19177 return;
19178 }
19179
19180 if (!lpfc_seq_complete(seq_dmabuf))
19181 return;
19182
19183
19184 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
19185 }
19186
19187
19188
19189
19190
19191
19192
19193
19194
19195
19196
19197
19198
19199
19200
19201
19202
19203
19204
19205
19206
19207
19208 int
19209 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
19210 {
19211 struct lpfc_rpi_hdr *rpi_page;
19212 uint32_t rc = 0;
19213 uint16_t lrpi = 0;
19214
19215
19216 if (!phba->sli4_hba.rpi_hdrs_in_use)
19217 goto exit;
19218 if (phba->sli4_hba.extents_in_use)
19219 return -EIO;
19220
19221 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
19222
19223
19224
19225
19226
19227 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
19228 LPFC_RPI_RSRC_RDY)
19229 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19230
19231 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
19232 if (rc != MBX_SUCCESS) {
19233 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19234 "2008 Error %d posting all rpi "
19235 "headers\n", rc);
19236 rc = -EIO;
19237 break;
19238 }
19239 }
19240
19241 exit:
19242 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
19243 LPFC_RPI_RSRC_RDY);
19244 return rc;
19245 }
19246
19247
19248
19249
19250
19251
19252
19253
19254
19255
19256
19257
19258
19259
19260
19261 int
19262 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
19263 {
19264 LPFC_MBOXQ_t *mboxq;
19265 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
19266 uint32_t rc = 0;
19267 uint32_t shdr_status, shdr_add_status;
19268 union lpfc_sli4_cfg_shdr *shdr;
19269
19270
19271 if (!phba->sli4_hba.rpi_hdrs_in_use)
19272 return rc;
19273 if (phba->sli4_hba.extents_in_use)
19274 return -EIO;
19275
19276
19277 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19278 if (!mboxq) {
19279 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19280 "2001 Unable to allocate memory for issuing "
19281 "SLI_CONFIG_SPECIAL mailbox command\n");
19282 return -ENOMEM;
19283 }
19284
19285
19286 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
19287 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19288 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
19289 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
19290 sizeof(struct lpfc_sli4_cfg_mhdr),
19291 LPFC_SLI4_MBX_EMBED);
19292
19293
19294
19295 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
19296 rpi_page->start_rpi);
19297 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
19298 hdr_tmpl, rpi_page->page_count);
19299
19300 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
19301 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
19302 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19303 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
19304 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19305 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19306 mempool_free(mboxq, phba->mbox_mem_pool);
19307 if (shdr_status || shdr_add_status || rc) {
19308 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19309 "2514 POST_RPI_HDR mailbox failed with "
19310 "status x%x add_status x%x, mbx status x%x\n",
19311 shdr_status, shdr_add_status, rc);
19312 rc = -ENXIO;
19313 } else {
19314
19315
19316
19317
19318 spin_lock_irq(&phba->hbalock);
19319 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
19320 spin_unlock_irq(&phba->hbalock);
19321 }
19322 return rc;
19323 }
19324
19325
19326
19327
19328
19329
19330
19331
19332
19333
19334
19335
19336
19337
19338 int
19339 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
19340 {
19341 unsigned long rpi;
19342 uint16_t max_rpi, rpi_limit;
19343 uint16_t rpi_remaining, lrpi = 0;
19344 struct lpfc_rpi_hdr *rpi_hdr;
19345 unsigned long iflag;
19346
19347
19348
19349
19350
19351 spin_lock_irqsave(&phba->hbalock, iflag);
19352 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
19353 rpi_limit = phba->sli4_hba.next_rpi;
19354
19355 rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
19356 if (rpi >= rpi_limit)
19357 rpi = LPFC_RPI_ALLOC_ERROR;
19358 else {
19359 set_bit(rpi, phba->sli4_hba.rpi_bmask);
19360 phba->sli4_hba.max_cfg_param.rpi_used++;
19361 phba->sli4_hba.rpi_count++;
19362 }
19363 lpfc_printf_log(phba, KERN_INFO,
19364 LOG_NODE | LOG_DISCOVERY,
19365 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
19366 (int) rpi, max_rpi, rpi_limit);
19367
19368
19369
19370
19371
19372 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
19373 (phba->sli4_hba.rpi_count >= max_rpi)) {
19374 spin_unlock_irqrestore(&phba->hbalock, iflag);
19375 return rpi;
19376 }
19377
19378
19379
19380
19381
19382 if (!phba->sli4_hba.rpi_hdrs_in_use) {
19383 spin_unlock_irqrestore(&phba->hbalock, iflag);
19384 return rpi;
19385 }
19386
19387
19388
19389
19390
19391
19392
19393 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
19394 spin_unlock_irqrestore(&phba->hbalock, iflag);
19395 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
19396 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
19397 if (!rpi_hdr) {
19398 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19399 "2002 Error Could not grow rpi "
19400 "count\n");
19401 } else {
19402 lrpi = rpi_hdr->start_rpi;
19403 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19404 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
19405 }
19406 }
19407
19408 return rpi;
19409 }
19410
19411
19412
19413
19414
19415
19416
19417
19418
19419 static void
19420 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19421 {
19422
19423
19424
19425
19426 if (rpi == LPFC_RPI_ALLOC_ERROR)
19427 return;
19428
19429 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19430 phba->sli4_hba.rpi_count--;
19431 phba->sli4_hba.max_cfg_param.rpi_used--;
19432 } else {
19433 lpfc_printf_log(phba, KERN_INFO,
19434 LOG_NODE | LOG_DISCOVERY,
19435 "2016 rpi %x not inuse\n",
19436 rpi);
19437 }
19438 }
19439
19440
19441
19442
19443
19444
19445
19446
19447
19448 void
19449 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19450 {
19451 spin_lock_irq(&phba->hbalock);
19452 __lpfc_sli4_free_rpi(phba, rpi);
19453 spin_unlock_irq(&phba->hbalock);
19454 }
19455
19456
19457
19458
19459
19460
19461
19462
19463 void
19464 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19465 {
19466 kfree(phba->sli4_hba.rpi_bmask);
19467 kfree(phba->sli4_hba.rpi_ids);
19468 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19469 }
19470
19471
19472
19473
19474
19475
19476
19477
19478
19479
19480 int
19481 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19482 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
19483 {
19484 LPFC_MBOXQ_t *mboxq;
19485 struct lpfc_hba *phba = ndlp->phba;
19486 int rc;
19487
19488
19489 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19490 if (!mboxq)
19491 return -ENOMEM;
19492
19493
19494
19495
19496
19497
19498
19499 if (!lpfc_nlp_get(ndlp)) {
19500 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19501 "2122 %s: Failed to get nlp ref\n",
19502 __func__);
19503 mempool_free(mboxq, phba->mbox_mem_pool);
19504 return -EIO;
19505 }
19506
19507
19508 lpfc_resume_rpi(mboxq, ndlp);
19509 if (cmpl) {
19510 mboxq->mbox_cmpl = cmpl;
19511 mboxq->ctx_buf = arg;
19512 } else
19513 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19514 mboxq->ctx_ndlp = ndlp;
19515 mboxq->vport = ndlp->vport;
19516 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19517 if (rc == MBX_NOT_FINISHED) {
19518 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19519 "2010 Resume RPI Mailbox failed "
19520 "status %d, mbxStatus x%x\n", rc,
19521 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19522 lpfc_nlp_put(ndlp);
19523 mempool_free(mboxq, phba->mbox_mem_pool);
19524 return -EIO;
19525 }
19526 return 0;
19527 }
19528
19529
19530
19531
19532
19533
19534
19535
19536
19537
19538
19539 int
19540 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19541 {
19542 LPFC_MBOXQ_t *mboxq;
19543 int rc = 0;
19544 int retval = MBX_SUCCESS;
19545 uint32_t mbox_tmo;
19546 struct lpfc_hba *phba = vport->phba;
19547 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19548 if (!mboxq)
19549 return -ENOMEM;
19550 lpfc_init_vpi(phba, mboxq, vport->vpi);
19551 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19552 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19553 if (rc != MBX_SUCCESS) {
19554 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19555 "2022 INIT VPI Mailbox failed "
19556 "status %d, mbxStatus x%x\n", rc,
19557 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19558 retval = -EIO;
19559 }
19560 if (rc != MBX_TIMEOUT)
19561 mempool_free(mboxq, vport->phba->mbox_mem_pool);
19562
19563 return retval;
19564 }
19565
19566
19567
19568
19569
19570
19571
19572
19573
19574
19575 static void
19576 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19577 {
19578 void *virt_addr;
19579 union lpfc_sli4_cfg_shdr *shdr;
19580 uint32_t shdr_status, shdr_add_status;
19581
19582 virt_addr = mboxq->sge_array->addr[0];
19583
19584 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19585 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19586 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19587
19588 if ((shdr_status || shdr_add_status) &&
19589 (shdr_status != STATUS_FCF_IN_USE))
19590 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19591 "2558 ADD_FCF_RECORD mailbox failed with "
19592 "status x%x add_status x%x\n",
19593 shdr_status, shdr_add_status);
19594
19595 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19596 }
19597
19598
19599
19600
19601
19602
19603
19604
19605
19606
19607 int
19608 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19609 {
19610 int rc = 0;
19611 LPFC_MBOXQ_t *mboxq;
19612 uint8_t *bytep;
19613 void *virt_addr;
19614 struct lpfc_mbx_sge sge;
19615 uint32_t alloc_len, req_len;
19616 uint32_t fcfindex;
19617
19618 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19619 if (!mboxq) {
19620 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19621 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19622 return -ENOMEM;
19623 }
19624
19625 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19626 sizeof(uint32_t);
19627
19628
19629 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19630 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19631 req_len, LPFC_SLI4_MBX_NEMBED);
19632 if (alloc_len < req_len) {
19633 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19634 "2523 Allocated DMA memory size (x%x) is "
19635 "less than the requested DMA memory "
19636 "size (x%x)\n", alloc_len, req_len);
19637 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19638 return -ENOMEM;
19639 }
19640
19641
19642
19643
19644
19645 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19646 virt_addr = mboxq->sge_array->addr[0];
19647
19648
19649
19650
19651 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19652 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19653 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19654
19655
19656
19657
19658
19659
19660 bytep += sizeof(uint32_t);
19661 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19662 mboxq->vport = phba->pport;
19663 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19664 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19665 if (rc == MBX_NOT_FINISHED) {
19666 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19667 "2515 ADD_FCF_RECORD mailbox failed with "
19668 "status 0x%x\n", rc);
19669 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19670 rc = -EIO;
19671 } else
19672 rc = 0;
19673
19674 return rc;
19675 }
19676
19677
19678
19679
19680
19681
19682
19683
19684
19685
19686
19687 void
19688 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
19689 struct fcf_record *fcf_record,
19690 uint16_t fcf_index)
19691 {
19692 memset(fcf_record, 0, sizeof(struct fcf_record));
19693 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
19694 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
19695 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
19696 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
19697 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
19698 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
19699 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
19700 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
19701 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
19702 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
19703 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
19704 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
19705 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
19706 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
19707 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
19708 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
19709 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
19710
19711 if (phba->valid_vlan) {
19712 fcf_record->vlan_bitmap[phba->vlan_id / 8]
19713 = 1 << (phba->vlan_id % 8);
19714 }
19715 }
19716
19717
19718
19719
19720
19721
19722
19723
19724
19725
19726
19727
19728
19729 int
19730 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19731 {
19732 int rc = 0, error;
19733 LPFC_MBOXQ_t *mboxq;
19734
19735 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
19736 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
19737 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19738 if (!mboxq) {
19739 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19740 "2000 Failed to allocate mbox for "
19741 "READ_FCF cmd\n");
19742 error = -ENOMEM;
19743 goto fail_fcf_scan;
19744 }
19745
19746 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19747 if (rc) {
19748 error = -EINVAL;
19749 goto fail_fcf_scan;
19750 }
19751
19752 mboxq->vport = phba->pport;
19753 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
19754
19755 spin_lock_irq(&phba->hbalock);
19756 phba->hba_flag |= FCF_TS_INPROG;
19757 spin_unlock_irq(&phba->hbalock);
19758
19759 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19760 if (rc == MBX_NOT_FINISHED)
19761 error = -EIO;
19762 else {
19763
19764 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
19765 phba->fcf.eligible_fcf_cnt = 0;
19766 error = 0;
19767 }
19768 fail_fcf_scan:
19769 if (error) {
19770 if (mboxq)
19771 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19772
19773 spin_lock_irq(&phba->hbalock);
19774 phba->hba_flag &= ~FCF_TS_INPROG;
19775 spin_unlock_irq(&phba->hbalock);
19776 }
19777 return error;
19778 }
19779
19780
19781
19782
19783
19784
19785
19786
19787
19788
19789
19790
19791 int
19792 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19793 {
19794 int rc = 0, error;
19795 LPFC_MBOXQ_t *mboxq;
19796
19797 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19798 if (!mboxq) {
19799 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19800 "2763 Failed to allocate mbox for "
19801 "READ_FCF cmd\n");
19802 error = -ENOMEM;
19803 goto fail_fcf_read;
19804 }
19805
19806 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19807 if (rc) {
19808 error = -EINVAL;
19809 goto fail_fcf_read;
19810 }
19811
19812 mboxq->vport = phba->pport;
19813 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
19814 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19815 if (rc == MBX_NOT_FINISHED)
19816 error = -EIO;
19817 else
19818 error = 0;
19819
19820 fail_fcf_read:
19821 if (error && mboxq)
19822 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19823 return error;
19824 }
19825
19826
19827
19828
19829
19830
19831
19832
19833
19834
19835
19836
19837 int
19838 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19839 {
19840 int rc = 0, error;
19841 LPFC_MBOXQ_t *mboxq;
19842
19843 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19844 if (!mboxq) {
19845 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19846 "2758 Failed to allocate mbox for "
19847 "READ_FCF cmd\n");
19848 error = -ENOMEM;
19849 goto fail_fcf_read;
19850 }
19851
19852 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19853 if (rc) {
19854 error = -EINVAL;
19855 goto fail_fcf_read;
19856 }
19857
19858 mboxq->vport = phba->pport;
19859 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
19860 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19861 if (rc == MBX_NOT_FINISHED)
19862 error = -EIO;
19863 else
19864 error = 0;
19865
19866 fail_fcf_read:
19867 if (error && mboxq)
19868 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19869 return error;
19870 }
19871
19872
19873
19874
19875
19876
19877
19878
19879
19880
19881
19882
19883
19884
19885 static int
19886 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
19887 {
19888 uint16_t next_fcf_pri;
19889 uint16_t last_index;
19890 struct lpfc_fcf_pri *fcf_pri;
19891 int rc;
19892 int ret = 0;
19893
19894 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19895 LPFC_SLI4_FCF_TBL_INDX_MAX);
19896 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19897 "3060 Last IDX %d\n", last_index);
19898
19899
19900 spin_lock_irq(&phba->hbalock);
19901 if (list_empty(&phba->fcf.fcf_pri_list) ||
19902 list_is_singular(&phba->fcf.fcf_pri_list)) {
19903 spin_unlock_irq(&phba->hbalock);
19904 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19905 "3061 Last IDX %d\n", last_index);
19906 return 0;
19907 }
19908 spin_unlock_irq(&phba->hbalock);
19909
19910 next_fcf_pri = 0;
19911
19912
19913
19914
19915 memset(phba->fcf.fcf_rr_bmask, 0,
19916 sizeof(*phba->fcf.fcf_rr_bmask));
19917 spin_lock_irq(&phba->hbalock);
19918 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19919 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
19920 continue;
19921
19922
19923
19924
19925 if (!next_fcf_pri)
19926 next_fcf_pri = fcf_pri->fcf_rec.priority;
19927 spin_unlock_irq(&phba->hbalock);
19928 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19929 rc = lpfc_sli4_fcf_rr_index_set(phba,
19930 fcf_pri->fcf_rec.fcf_index);
19931 if (rc)
19932 return 0;
19933 }
19934 spin_lock_irq(&phba->hbalock);
19935 }
19936
19937
19938
19939
19940
19941 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
19942 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19943 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
19944
19945
19946
19947
19948 if (!next_fcf_pri)
19949 next_fcf_pri = fcf_pri->fcf_rec.priority;
19950 spin_unlock_irq(&phba->hbalock);
19951 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19952 rc = lpfc_sli4_fcf_rr_index_set(phba,
19953 fcf_pri->fcf_rec.fcf_index);
19954 if (rc)
19955 return 0;
19956 }
19957 spin_lock_irq(&phba->hbalock);
19958 }
19959 } else
19960 ret = 1;
19961 spin_unlock_irq(&phba->hbalock);
19962
19963 return ret;
19964 }
19965
19966
19967
19968
19969
19970
19971
19972
19973
19974
19975 uint16_t
19976 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
19977 {
19978 uint16_t next_fcf_index;
19979
19980 initial_priority:
19981
19982 next_fcf_index = phba->fcf.current_rec.fcf_indx;
19983
19984 next_priority:
19985
19986 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
19987 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19988 LPFC_SLI4_FCF_TBL_INDX_MAX,
19989 next_fcf_index);
19990
19991
19992 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19993
19994
19995
19996
19997
19998 next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19999 LPFC_SLI4_FCF_TBL_INDX_MAX);
20000 }
20001
20002
20003
20004 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
20005 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
20006
20007
20008
20009
20010
20011
20012 if (lpfc_check_next_fcf_pri_level(phba))
20013 goto initial_priority;
20014 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
20015 "2844 No roundrobin failover FCF available\n");
20016
20017 return LPFC_FCOE_FCF_NEXT_NONE;
20018 }
20019
20020 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
20021 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
20022 LPFC_FCF_FLOGI_FAILED) {
20023 if (list_is_singular(&phba->fcf.fcf_pri_list))
20024 return LPFC_FCOE_FCF_NEXT_NONE;
20025
20026 goto next_priority;
20027 }
20028
20029 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20030 "2845 Get next roundrobin failover FCF (x%x)\n",
20031 next_fcf_index);
20032
20033 return next_fcf_index;
20034 }
20035
20036
20037
20038
20039
20040
20041
20042
20043
20044
20045
20046
20047
20048
20049 int
20050 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
20051 {
20052 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20053 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20054 "2610 FCF (x%x) reached driver's book "
20055 "keeping dimension:x%x\n",
20056 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20057 return -EINVAL;
20058 }
20059
20060 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20061
20062 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20063 "2790 Set FCF (x%x) to roundrobin FCF failover "
20064 "bmask\n", fcf_index);
20065
20066 return 0;
20067 }
20068
20069
20070
20071
20072
20073
20074
20075
20076
20077
20078
20079 void
20080 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
20081 {
20082 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
20083 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20084 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20085 "2762 FCF (x%x) reached driver's book "
20086 "keeping dimension:x%x\n",
20087 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20088 return;
20089 }
20090
20091 spin_lock_irq(&phba->hbalock);
20092 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
20093 list) {
20094 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
20095 list_del_init(&fcf_pri->list);
20096 break;
20097 }
20098 }
20099 spin_unlock_irq(&phba->hbalock);
20100 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20101
20102 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20103 "2791 Clear FCF (x%x) from roundrobin failover "
20104 "bmask\n", fcf_index);
20105 }
20106
20107
20108
20109
20110
20111
20112
20113
20114
20115
20116 static void
20117 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
20118 {
20119 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20120 uint32_t shdr_status, shdr_add_status;
20121
20122 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20123
20124 shdr_status = bf_get(lpfc_mbox_hdr_status,
20125 &redisc_fcf->header.cfg_shdr.response);
20126 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20127 &redisc_fcf->header.cfg_shdr.response);
20128 if (shdr_status || shdr_add_status) {
20129 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20130 "2746 Requesting for FCF rediscovery failed "
20131 "status x%x add_status x%x\n",
20132 shdr_status, shdr_add_status);
20133 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
20134 spin_lock_irq(&phba->hbalock);
20135 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
20136 spin_unlock_irq(&phba->hbalock);
20137
20138
20139
20140
20141 lpfc_retry_pport_discovery(phba);
20142 } else {
20143 spin_lock_irq(&phba->hbalock);
20144 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
20145 spin_unlock_irq(&phba->hbalock);
20146
20147
20148
20149
20150
20151 lpfc_sli4_fcf_dead_failthrough(phba);
20152 }
20153 } else {
20154 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20155 "2775 Start FCF rediscover quiescent timer\n");
20156
20157
20158
20159
20160 lpfc_fcf_redisc_wait_start_timer(phba);
20161 }
20162
20163 mempool_free(mbox, phba->mbox_mem_pool);
20164 }
20165
20166
20167
20168
20169
20170
20171
20172
20173 int
20174 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
20175 {
20176 LPFC_MBOXQ_t *mbox;
20177 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20178 int rc, length;
20179
20180
20181 lpfc_cancel_all_vport_retry_delay_timer(phba);
20182
20183 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20184 if (!mbox) {
20185 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20186 "2745 Failed to allocate mbox for "
20187 "requesting FCF rediscover.\n");
20188 return -ENOMEM;
20189 }
20190
20191 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
20192 sizeof(struct lpfc_sli4_cfg_mhdr));
20193 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
20194 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
20195 length, LPFC_SLI4_MBX_EMBED);
20196
20197 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20198
20199 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
20200
20201
20202 mbox->vport = phba->pport;
20203 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
20204 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
20205
20206 if (rc == MBX_NOT_FINISHED) {
20207 mempool_free(mbox, phba->mbox_mem_pool);
20208 return -EIO;
20209 }
20210 return 0;
20211 }
20212
20213
20214
20215
20216
20217
20218
20219
20220 void
20221 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
20222 {
20223 uint32_t link_state;
20224
20225
20226
20227
20228
20229
20230 link_state = phba->link_state;
20231 lpfc_linkdown(phba);
20232 phba->link_state = link_state;
20233
20234
20235 lpfc_unregister_unused_fcf(phba);
20236 }
20237
20238
20239
20240
20241
20242
20243
20244
20245
20246
20247 static uint32_t
20248 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20249 {
20250 LPFC_MBOXQ_t *pmb = NULL;
20251 MAILBOX_t *mb;
20252 uint32_t offset = 0;
20253 int rc;
20254
20255 if (!rgn23_data)
20256 return 0;
20257
20258 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20259 if (!pmb) {
20260 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20261 "2600 failed to allocate mailbox memory\n");
20262 return 0;
20263 }
20264 mb = &pmb->u.mb;
20265
20266 do {
20267 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
20268 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
20269
20270 if (rc != MBX_SUCCESS) {
20271 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20272 "2601 failed to read config "
20273 "region 23, rc 0x%x Status 0x%x\n",
20274 rc, mb->mbxStatus);
20275 mb->un.varDmp.word_cnt = 0;
20276 }
20277
20278
20279
20280
20281 if (mb->un.varDmp.word_cnt == 0)
20282 break;
20283
20284 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
20285 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
20286
20287 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
20288 rgn23_data + offset,
20289 mb->un.varDmp.word_cnt);
20290 offset += mb->un.varDmp.word_cnt;
20291 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
20292
20293 mempool_free(pmb, phba->mbox_mem_pool);
20294 return offset;
20295 }
20296
20297
20298
20299
20300
20301
20302
20303
20304
20305
20306 static uint32_t
20307 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20308 {
20309 LPFC_MBOXQ_t *mboxq = NULL;
20310 struct lpfc_dmabuf *mp = NULL;
20311 struct lpfc_mqe *mqe;
20312 uint32_t data_length = 0;
20313 int rc;
20314
20315 if (!rgn23_data)
20316 return 0;
20317
20318 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20319 if (!mboxq) {
20320 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20321 "3105 failed to allocate mailbox memory\n");
20322 return 0;
20323 }
20324
20325 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
20326 goto out;
20327 mqe = &mboxq->u.mqe;
20328 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
20329 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
20330 if (rc)
20331 goto out;
20332 data_length = mqe->un.mb_words[5];
20333 if (data_length == 0)
20334 goto out;
20335 if (data_length > DMP_RGN23_SIZE) {
20336 data_length = 0;
20337 goto out;
20338 }
20339 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
20340 out:
20341 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
20342 return data_length;
20343 }
20344
20345
20346
20347
20348
20349
20350
20351
20352
20353 void
20354 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
20355 {
20356 uint8_t *rgn23_data = NULL;
20357 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
20358 uint32_t offset = 0;
20359
20360
20361 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
20362 if (!rgn23_data)
20363 goto out;
20364
20365 if (phba->sli_rev < LPFC_SLI_REV4)
20366 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
20367 else {
20368 if_type = bf_get(lpfc_sli_intf_if_type,
20369 &phba->sli4_hba.sli_intf);
20370 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
20371 goto out;
20372 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
20373 }
20374
20375 if (!data_size)
20376 goto out;
20377
20378
20379 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
20380 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20381 "2619 Config region 23 has bad signature\n");
20382 goto out;
20383 }
20384 offset += 4;
20385
20386
20387 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
20388 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20389 "2620 Config region 23 has bad version\n");
20390 goto out;
20391 }
20392 offset += 4;
20393
20394
20395 while (offset < data_size) {
20396 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
20397 break;
20398
20399
20400
20401
20402 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
20403 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
20404 (rgn23_data[offset + 3] != 0)) {
20405 offset += rgn23_data[offset + 1] * 4 + 4;
20406 continue;
20407 }
20408
20409
20410 sub_tlv_len = rgn23_data[offset + 1] * 4;
20411 offset += 4;
20412 tlv_offset = 0;
20413
20414
20415
20416
20417 while ((offset < data_size) &&
20418 (tlv_offset < sub_tlv_len)) {
20419 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20420 offset += 4;
20421 tlv_offset += 4;
20422 break;
20423 }
20424 if (rgn23_data[offset] != PORT_STE_TYPE) {
20425 offset += rgn23_data[offset + 1] * 4 + 4;
20426 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20427 continue;
20428 }
20429
20430
20431 if (!rgn23_data[offset + 2])
20432 phba->hba_flag |= LINK_DISABLED;
20433
20434 goto out;
20435 }
20436 }
20437
20438 out:
20439 kfree(rgn23_data);
20440 return;
20441 }
20442
20443
20444
20445
20446
20447
20448
20449
20450
20451
20452
20453
20454
20455
20456 static void
20457 lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
20458 u32 shdr_add_status, u32 shdr_add_status_2,
20459 u32 shdr_change_status, u32 shdr_csf)
20460 {
20461 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20462 "4198 %s: flash_id x%02x, asic_rev x%02x, "
20463 "status x%02x, add_status x%02x, add_status_2 x%02x, "
20464 "change_status x%02x, csf %01x\n", __func__,
20465 phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
20466 shdr_status, shdr_add_status, shdr_add_status_2,
20467 shdr_change_status, shdr_csf);
20468
20469 if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
20470 switch (shdr_add_status_2) {
20471 case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
20472 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20473 "4199 Firmware write failed: "
20474 "image incompatible with flash x%02x\n",
20475 phba->sli4_hba.flash_id);
20476 break;
20477 case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
20478 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20479 "4200 Firmware write failed: "
20480 "image incompatible with ASIC "
20481 "architecture x%02x\n",
20482 phba->sli4_hba.asic_rev);
20483 break;
20484 default:
20485 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20486 "4210 Firmware write failed: "
20487 "add_status_2 x%02x\n",
20488 shdr_add_status_2);
20489 break;
20490 }
20491 } else if (!shdr_status && !shdr_add_status) {
20492 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20493 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20494 if (shdr_csf)
20495 shdr_change_status =
20496 LPFC_CHANGE_STATUS_PCI_RESET;
20497 }
20498
20499 switch (shdr_change_status) {
20500 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20501 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20502 "3198 Firmware write complete: System "
20503 "reboot required to instantiate\n");
20504 break;
20505 case (LPFC_CHANGE_STATUS_FW_RESET):
20506 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20507 "3199 Firmware write complete: "
20508 "Firmware reset required to "
20509 "instantiate\n");
20510 break;
20511 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20512 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20513 "3200 Firmware write complete: Port "
20514 "Migration or PCI Reset required to "
20515 "instantiate\n");
20516 break;
20517 case (LPFC_CHANGE_STATUS_PCI_RESET):
20518 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20519 "3201 Firmware write complete: PCI "
20520 "Reset required to instantiate\n");
20521 break;
20522 default:
20523 break;
20524 }
20525 }
20526 }
20527
20528
20529
20530
20531
20532
20533
20534
20535
20536
20537
20538
20539
20540
20541
20542
20543
20544
20545
20546
20547 int
20548 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20549 uint32_t size, uint32_t *offset)
20550 {
20551 struct lpfc_mbx_wr_object *wr_object;
20552 LPFC_MBOXQ_t *mbox;
20553 int rc = 0, i = 0;
20554 uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
20555 uint32_t shdr_change_status = 0, shdr_csf = 0;
20556 uint32_t mbox_tmo;
20557 struct lpfc_dmabuf *dmabuf;
20558 uint32_t written = 0;
20559 bool check_change_status = false;
20560
20561 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20562 if (!mbox)
20563 return -ENOMEM;
20564
20565 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20566 LPFC_MBOX_OPCODE_WRITE_OBJECT,
20567 sizeof(struct lpfc_mbx_wr_object) -
20568 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20569
20570 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20571 wr_object->u.request.write_offset = *offset;
20572 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20573 wr_object->u.request.object_name[0] =
20574 cpu_to_le32(wr_object->u.request.object_name[0]);
20575 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20576 list_for_each_entry(dmabuf, dmabuf_list, list) {
20577 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20578 break;
20579 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20580 wr_object->u.request.bde[i].addrHigh =
20581 putPaddrHigh(dmabuf->phys);
20582 if (written + SLI4_PAGE_SIZE >= size) {
20583 wr_object->u.request.bde[i].tus.f.bdeSize =
20584 (size - written);
20585 written += (size - written);
20586 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20587 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20588 check_change_status = true;
20589 } else {
20590 wr_object->u.request.bde[i].tus.f.bdeSize =
20591 SLI4_PAGE_SIZE;
20592 written += SLI4_PAGE_SIZE;
20593 }
20594 i++;
20595 }
20596 wr_object->u.request.bde_count = i;
20597 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20598 if (!phba->sli4_hba.intr_enable)
20599 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20600 else {
20601 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20602 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20603 }
20604
20605 shdr_status = bf_get(lpfc_mbox_hdr_status,
20606 &wr_object->header.cfg_shdr.response);
20607 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20608 &wr_object->header.cfg_shdr.response);
20609 shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
20610 &wr_object->header.cfg_shdr.response);
20611 if (check_change_status) {
20612 shdr_change_status = bf_get(lpfc_wr_object_change_status,
20613 &wr_object->u.response);
20614 shdr_csf = bf_get(lpfc_wr_object_csf,
20615 &wr_object->u.response);
20616 }
20617
20618 if (!phba->sli4_hba.intr_enable)
20619 mempool_free(mbox, phba->mbox_mem_pool);
20620 else if (rc != MBX_TIMEOUT)
20621 mempool_free(mbox, phba->mbox_mem_pool);
20622 if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
20623 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20624 "3025 Write Object mailbox failed with "
20625 "status x%x add_status x%x, add_status_2 x%x, "
20626 "mbx status x%x\n",
20627 shdr_status, shdr_add_status, shdr_add_status_2,
20628 rc);
20629 rc = -ENXIO;
20630 *offset = shdr_add_status;
20631 } else {
20632 *offset += wr_object->u.response.actual_write_length;
20633 }
20634
20635 if (rc || check_change_status)
20636 lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
20637 shdr_add_status_2, shdr_change_status,
20638 shdr_csf);
20639 return rc;
20640 }
20641
20642
20643
20644
20645
20646
20647
20648
20649
20650
20651 void
20652 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20653 {
20654 struct lpfc_hba *phba = vport->phba;
20655 LPFC_MBOXQ_t *mb, *nextmb;
20656 struct lpfc_nodelist *ndlp;
20657 struct lpfc_nodelist *act_mbx_ndlp = NULL;
20658 LIST_HEAD(mbox_cmd_list);
20659 uint8_t restart_loop;
20660
20661
20662 spin_lock_irq(&phba->hbalock);
20663 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
20664 if (mb->vport != vport)
20665 continue;
20666
20667 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20668 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20669 continue;
20670
20671 list_move_tail(&mb->list, &mbox_cmd_list);
20672 }
20673
20674 mb = phba->sli.mbox_active;
20675 if (mb && (mb->vport == vport)) {
20676 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
20677 (mb->u.mb.mbxCommand == MBX_REG_VPI))
20678 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20679 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20680 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20681
20682
20683
20684
20685 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
20686
20687
20688 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20689 }
20690 }
20691
20692 do {
20693 restart_loop = 0;
20694 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
20695
20696
20697
20698
20699 if ((mb->vport != vport) ||
20700 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
20701 continue;
20702
20703 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20704 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20705 continue;
20706
20707 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20708 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20709 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20710
20711 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20712 restart_loop = 1;
20713 spin_unlock_irq(&phba->hbalock);
20714 spin_lock(&ndlp->lock);
20715 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20716 spin_unlock(&ndlp->lock);
20717 spin_lock_irq(&phba->hbalock);
20718 break;
20719 }
20720 }
20721 } while (restart_loop);
20722
20723 spin_unlock_irq(&phba->hbalock);
20724
20725
20726 while (!list_empty(&mbox_cmd_list)) {
20727 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
20728 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20729 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20730 mb->ctx_ndlp = NULL;
20731 if (ndlp) {
20732 spin_lock(&ndlp->lock);
20733 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20734 spin_unlock(&ndlp->lock);
20735 lpfc_nlp_put(ndlp);
20736 }
20737 }
20738 lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
20739 }
20740
20741
20742 if (act_mbx_ndlp) {
20743 spin_lock(&act_mbx_ndlp->lock);
20744 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20745 spin_unlock(&act_mbx_ndlp->lock);
20746 lpfc_nlp_put(act_mbx_ndlp);
20747 }
20748 }
20749
20750
20751
20752
20753
20754
20755
20756
20757
20758
20759
20760
20761 uint32_t
20762 lpfc_drain_txq(struct lpfc_hba *phba)
20763 {
20764 LIST_HEAD(completions);
20765 struct lpfc_sli_ring *pring;
20766 struct lpfc_iocbq *piocbq = NULL;
20767 unsigned long iflags = 0;
20768 char *fail_msg = NULL;
20769 uint32_t txq_cnt = 0;
20770 struct lpfc_queue *wq;
20771 int ret = 0;
20772
20773 if (phba->link_flag & LS_MDS_LOOPBACK) {
20774
20775 wq = phba->sli4_hba.hdwq[0].io_wq;
20776 if (unlikely(!wq))
20777 return 0;
20778 pring = wq->pring;
20779 } else {
20780 wq = phba->sli4_hba.els_wq;
20781 if (unlikely(!wq))
20782 return 0;
20783 pring = lpfc_phba_elsring(phba);
20784 }
20785
20786 if (unlikely(!pring) || list_empty(&pring->txq))
20787 return 0;
20788
20789 spin_lock_irqsave(&pring->ring_lock, iflags);
20790 list_for_each_entry(piocbq, &pring->txq, list) {
20791 txq_cnt++;
20792 }
20793
20794 if (txq_cnt > pring->txq_max)
20795 pring->txq_max = txq_cnt;
20796
20797 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20798
20799 while (!list_empty(&pring->txq)) {
20800 spin_lock_irqsave(&pring->ring_lock, iflags);
20801
20802 piocbq = lpfc_sli_ringtx_get(phba, pring);
20803 if (!piocbq) {
20804 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20805 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20806 "2823 txq empty and txq_cnt is %d\n ",
20807 txq_cnt);
20808 break;
20809 }
20810 txq_cnt--;
20811
20812 ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
20813
20814 if (ret && ret != IOCB_BUSY) {
20815 fail_msg = " - Cannot send IO ";
20816 piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
20817 }
20818 if (fail_msg) {
20819 piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
20820
20821 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20822 "2822 IOCB failed %s iotag 0x%x "
20823 "xri 0x%x %d flg x%x\n",
20824 fail_msg, piocbq->iotag,
20825 piocbq->sli4_xritag, ret,
20826 piocbq->cmd_flag);
20827 list_add_tail(&piocbq->list, &completions);
20828 fail_msg = NULL;
20829 }
20830 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20831 if (txq_cnt == 0 || ret == IOCB_BUSY)
20832 break;
20833 }
20834
20835 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
20836 IOERR_SLI_ABORTED);
20837
20838 return txq_cnt;
20839 }
20840
20841
20842
20843
20844
20845
20846
20847
20848
20849
20850
20851
20852
20853
20854
20855
20856
20857
20858 static uint16_t
20859 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
20860 struct lpfc_sglq *sglq)
20861 {
20862 uint16_t xritag = NO_XRI;
20863 struct ulp_bde64 *bpl = NULL;
20864 struct ulp_bde64 bde;
20865 struct sli4_sge *sgl = NULL;
20866 struct lpfc_dmabuf *dmabuf;
20867 union lpfc_wqe128 *wqe;
20868 int numBdes = 0;
20869 int i = 0;
20870 uint32_t offset = 0;
20871 int inbound = 0;
20872 uint32_t cmd;
20873
20874 if (!pwqeq || !sglq)
20875 return xritag;
20876
20877 sgl = (struct sli4_sge *)sglq->sgl;
20878 wqe = &pwqeq->wqe;
20879 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
20880
20881 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
20882 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
20883 return sglq->sli4_xritag;
20884 numBdes = pwqeq->num_bdes;
20885 if (numBdes) {
20886
20887
20888
20889
20890 if (pwqeq->bpl_dmabuf)
20891 dmabuf = pwqeq->bpl_dmabuf;
20892 else
20893 return xritag;
20894
20895 bpl = (struct ulp_bde64 *)dmabuf->virt;
20896 if (!bpl)
20897 return xritag;
20898
20899 for (i = 0; i < numBdes; i++) {
20900
20901 sgl->addr_hi = bpl->addrHigh;
20902 sgl->addr_lo = bpl->addrLow;
20903
20904 sgl->word2 = le32_to_cpu(sgl->word2);
20905 if ((i+1) == numBdes)
20906 bf_set(lpfc_sli4_sge_last, sgl, 1);
20907 else
20908 bf_set(lpfc_sli4_sge_last, sgl, 0);
20909
20910
20911
20912 bde.tus.w = le32_to_cpu(bpl->tus.w);
20913 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
20914
20915
20916
20917
20918 switch (cmd) {
20919 case CMD_GEN_REQUEST64_WQE:
20920
20921 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
20922 inbound++;
20923
20924 if (inbound == 1)
20925 offset = 0;
20926 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20927 bf_set(lpfc_sli4_sge_type, sgl,
20928 LPFC_SGE_TYPE_DATA);
20929 offset += bde.tus.f.bdeSize;
20930 break;
20931 case CMD_FCP_TRSP64_WQE:
20932 bf_set(lpfc_sli4_sge_offset, sgl, 0);
20933 bf_set(lpfc_sli4_sge_type, sgl,
20934 LPFC_SGE_TYPE_DATA);
20935 break;
20936 case CMD_FCP_TSEND64_WQE:
20937 case CMD_FCP_TRECEIVE64_WQE:
20938 bf_set(lpfc_sli4_sge_type, sgl,
20939 bpl->tus.f.bdeFlags);
20940 if (i < 3)
20941 offset = 0;
20942 else
20943 offset += bde.tus.f.bdeSize;
20944 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20945 break;
20946 }
20947 sgl->word2 = cpu_to_le32(sgl->word2);
20948 bpl++;
20949 sgl++;
20950 }
20951 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
20952
20953
20954
20955
20956 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
20957 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
20958 sgl->word2 = le32_to_cpu(sgl->word2);
20959 bf_set(lpfc_sli4_sge_last, sgl, 1);
20960 sgl->word2 = cpu_to_le32(sgl->word2);
20961 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
20962 }
20963 return sglq->sli4_xritag;
20964 }
20965
20966
20967
20968
20969
20970
20971
20972 int
20973 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20974 struct lpfc_iocbq *pwqe)
20975 {
20976 union lpfc_wqe128 *wqe = &pwqe->wqe;
20977 struct lpfc_async_xchg_ctx *ctxp;
20978 struct lpfc_queue *wq;
20979 struct lpfc_sglq *sglq;
20980 struct lpfc_sli_ring *pring;
20981 unsigned long iflags;
20982 uint32_t ret = 0;
20983
20984
20985 if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
20986 pring = phba->sli4_hba.nvmels_wq->pring;
20987 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20988 qp, wq_access);
20989 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
20990 if (!sglq) {
20991 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20992 return WQE_BUSY;
20993 }
20994 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20995 pwqe->sli4_xritag = sglq->sli4_xritag;
20996 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
20997 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20998 return WQE_ERROR;
20999 }
21000 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21001 pwqe->sli4_xritag);
21002 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
21003 if (ret) {
21004 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21005 return ret;
21006 }
21007
21008 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21009 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21010
21011 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21012 return 0;
21013 }
21014
21015
21016 if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
21017
21018 wq = qp->io_wq;
21019 pring = wq->pring;
21020
21021 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21022
21023 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21024 qp, wq_access);
21025 ret = lpfc_sli4_wq_put(wq, wqe);
21026 if (ret) {
21027 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21028 return ret;
21029 }
21030 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21031 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21032
21033 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21034 return 0;
21035 }
21036
21037
21038 if (pwqe->cmd_flag & LPFC_IO_NVMET) {
21039
21040 wq = qp->io_wq;
21041 pring = wq->pring;
21042
21043 ctxp = pwqe->context_un.axchg;
21044 sglq = ctxp->ctxbuf->sglq;
21045 if (pwqe->sli4_xritag == NO_XRI) {
21046 pwqe->sli4_lxritag = sglq->sli4_lxritag;
21047 pwqe->sli4_xritag = sglq->sli4_xritag;
21048 }
21049 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21050 pwqe->sli4_xritag);
21051 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21052
21053 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21054 qp, wq_access);
21055 ret = lpfc_sli4_wq_put(wq, wqe);
21056 if (ret) {
21057 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21058 return ret;
21059 }
21060 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21061 spin_unlock_irqrestore(&pring->ring_lock, iflags);
21062
21063 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
21064 return 0;
21065 }
21066 return WQE_ERROR;
21067 }
21068
21069
21070
21071
21072
21073
21074
21075
21076
21077
21078
21079
21080
21081
21082 int
21083 lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
21084 void *cmpl)
21085 {
21086 struct lpfc_vport *vport = cmdiocb->vport;
21087 struct lpfc_iocbq *abtsiocb = NULL;
21088 union lpfc_wqe128 *abtswqe;
21089 struct lpfc_io_buf *lpfc_cmd;
21090 int retval = IOCB_ERROR;
21091 u16 xritag = cmdiocb->sli4_xritag;
21092
21093
21094
21095
21096
21097
21098
21099 abtsiocb = __lpfc_sli_get_iocbq(phba);
21100 if (!abtsiocb)
21101 return WQE_NORESOURCE;
21102
21103
21104 cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
21105
21106 abtswqe = &abtsiocb->wqe;
21107 memset(abtswqe, 0, sizeof(*abtswqe));
21108
21109 if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
21110 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
21111 bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
21112 abtswqe->abort_cmd.rsrvd5 = 0;
21113 abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
21114 bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
21115 bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
21116 bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
21117 bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
21118 bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
21119 bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
21120
21121
21122 abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
21123 abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
21124 if (cmdiocb->cmd_flag & LPFC_IO_FCP)
21125 abtsiocb->cmd_flag |= LPFC_IO_FCP;
21126 if (cmdiocb->cmd_flag & LPFC_IO_NVME)
21127 abtsiocb->cmd_flag |= LPFC_IO_NVME;
21128 if (cmdiocb->cmd_flag & LPFC_IO_FOF)
21129 abtsiocb->cmd_flag |= LPFC_IO_FOF;
21130 abtsiocb->vport = vport;
21131 abtsiocb->cmd_cmpl = cmpl;
21132
21133 lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
21134 retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
21135
21136 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21137 "0359 Abort xri x%x, original iotag x%x, "
21138 "abort cmd iotag x%x retval x%x\n",
21139 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
21140
21141 if (retval) {
21142 cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21143 __lpfc_sli_release_iocbq(phba, abtsiocb);
21144 }
21145
21146 return retval;
21147 }
21148
21149 #ifdef LPFC_MXP_STAT
21150
21151
21152
21153
21154
21155
21156
21157
21158
21159
21160
21161
21162
21163
21164 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
21165 {
21166 struct lpfc_sli4_hdw_queue *qp;
21167 struct lpfc_multixri_pool *multixri_pool;
21168 struct lpfc_pvt_pool *pvt_pool;
21169 struct lpfc_pbl_pool *pbl_pool;
21170 u32 txcmplq_cnt;
21171
21172 qp = &phba->sli4_hba.hdwq[hwqid];
21173 multixri_pool = qp->p_multixri_pool;
21174 if (!multixri_pool)
21175 return;
21176
21177 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
21178 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21179 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21180 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21181
21182 multixri_pool->stat_pbl_count = pbl_pool->count;
21183 multixri_pool->stat_pvt_count = pvt_pool->count;
21184 multixri_pool->stat_busy_count = txcmplq_cnt;
21185 }
21186
21187 multixri_pool->stat_snapshot_taken++;
21188 }
21189 #endif
21190
21191
21192
21193
21194
21195
21196
21197
21198
21199 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
21200 {
21201 struct lpfc_multixri_pool *multixri_pool;
21202 u32 io_req_count;
21203 u32 prev_io_req_count;
21204
21205 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21206 if (!multixri_pool)
21207 return;
21208 io_req_count = multixri_pool->io_req_count;
21209 prev_io_req_count = multixri_pool->prev_io_req_count;
21210
21211 if (prev_io_req_count != io_req_count) {
21212
21213 multixri_pool->prev_io_req_count = io_req_count;
21214 } else {
21215
21216
21217
21218 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
21219 }
21220 }
21221
21222
21223
21224
21225
21226
21227
21228
21229
21230 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
21231 {
21232 u32 new_watermark;
21233 u32 watermark_max;
21234 u32 watermark_min;
21235 u32 xri_limit;
21236 u32 txcmplq_cnt;
21237 u32 abts_io_bufs;
21238 struct lpfc_multixri_pool *multixri_pool;
21239 struct lpfc_sli4_hdw_queue *qp;
21240
21241 qp = &phba->sli4_hba.hdwq[hwqid];
21242 multixri_pool = qp->p_multixri_pool;
21243 if (!multixri_pool)
21244 return;
21245 xri_limit = multixri_pool->xri_limit;
21246
21247 watermark_max = xri_limit;
21248 watermark_min = xri_limit / 2;
21249
21250 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21251 abts_io_bufs = qp->abts_scsi_io_bufs;
21252 abts_io_bufs += qp->abts_nvme_io_bufs;
21253
21254 new_watermark = txcmplq_cnt + abts_io_bufs;
21255 new_watermark = min(watermark_max, new_watermark);
21256 new_watermark = max(watermark_min, new_watermark);
21257 multixri_pool->pvt_pool.high_watermark = new_watermark;
21258
21259 #ifdef LPFC_MXP_STAT
21260 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
21261 new_watermark);
21262 #endif
21263 }
21264
21265
21266
21267
21268
21269
21270
21271
21272
21273
21274
21275 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
21276 {
21277 struct lpfc_pbl_pool *pbl_pool;
21278 struct lpfc_pvt_pool *pvt_pool;
21279 struct lpfc_sli4_hdw_queue *qp;
21280 struct lpfc_io_buf *lpfc_ncmd;
21281 struct lpfc_io_buf *lpfc_ncmd_next;
21282 unsigned long iflag;
21283 struct list_head tmp_list;
21284 u32 tmp_count;
21285
21286 qp = &phba->sli4_hba.hdwq[hwqid];
21287 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21288 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21289 tmp_count = 0;
21290
21291 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
21292 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
21293
21294 if (pvt_pool->count > pvt_pool->low_watermark) {
21295
21296
21297
21298
21299
21300 INIT_LIST_HEAD(&tmp_list);
21301 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21302 &pvt_pool->list, list) {
21303 list_move_tail(&lpfc_ncmd->list, &tmp_list);
21304 tmp_count++;
21305 if (tmp_count >= pvt_pool->low_watermark)
21306 break;
21307 }
21308
21309
21310 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21311
21312
21313 list_splice(&tmp_list, &pvt_pool->list);
21314
21315 pbl_pool->count += (pvt_pool->count - tmp_count);
21316 pvt_pool->count = tmp_count;
21317 } else {
21318
21319 list_splice_init(&pvt_pool->list, &pbl_pool->list);
21320 pbl_pool->count += pvt_pool->count;
21321 pvt_pool->count = 0;
21322 }
21323
21324 spin_unlock(&pvt_pool->lock);
21325 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21326 }
21327
21328
21329
21330
21331
21332
21333
21334
21335
21336
21337
21338
21339
21340
21341
21342
21343
21344
21345 static bool
21346 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21347 struct lpfc_pbl_pool *pbl_pool,
21348 struct lpfc_pvt_pool *pvt_pool, u32 count)
21349 {
21350 struct lpfc_io_buf *lpfc_ncmd;
21351 struct lpfc_io_buf *lpfc_ncmd_next;
21352 unsigned long iflag;
21353 int ret;
21354
21355 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
21356 if (ret) {
21357 if (pbl_pool->count) {
21358
21359 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
21360 list_for_each_entry_safe(lpfc_ncmd,
21361 lpfc_ncmd_next,
21362 &pbl_pool->list,
21363 list) {
21364 list_move_tail(&lpfc_ncmd->list,
21365 &pvt_pool->list);
21366 pvt_pool->count++;
21367 pbl_pool->count--;
21368 count--;
21369 if (count == 0)
21370 break;
21371 }
21372
21373 spin_unlock(&pvt_pool->lock);
21374 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21375 return true;
21376 }
21377 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21378 }
21379
21380 return false;
21381 }
21382
21383
21384
21385
21386
21387
21388
21389
21390
21391
21392
21393
21394
21395 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
21396 {
21397 struct lpfc_multixri_pool *multixri_pool;
21398 struct lpfc_multixri_pool *next_multixri_pool;
21399 struct lpfc_pvt_pool *pvt_pool;
21400 struct lpfc_pbl_pool *pbl_pool;
21401 struct lpfc_sli4_hdw_queue *qp;
21402 u32 next_hwqid;
21403 u32 hwq_count;
21404 int ret;
21405
21406 qp = &phba->sli4_hba.hdwq[hwqid];
21407 multixri_pool = qp->p_multixri_pool;
21408 pvt_pool = &multixri_pool->pvt_pool;
21409 pbl_pool = &multixri_pool->pbl_pool;
21410
21411
21412 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
21413 if (ret) {
21414 #ifdef LPFC_MXP_STAT
21415 multixri_pool->local_pbl_hit_count++;
21416 #endif
21417 return;
21418 }
21419
21420 hwq_count = phba->cfg_hdw_queue;
21421
21422
21423 next_hwqid = multixri_pool->rrb_next_hwqid;
21424
21425 do {
21426
21427 next_hwqid = (next_hwqid + 1) % hwq_count;
21428
21429 next_multixri_pool =
21430 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
21431 pbl_pool = &next_multixri_pool->pbl_pool;
21432
21433
21434 ret = _lpfc_move_xri_pbl_to_pvt(
21435 phba, qp, pbl_pool, pvt_pool, count);
21436
21437
21438 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
21439
21440
21441 multixri_pool->rrb_next_hwqid = next_hwqid;
21442
21443 if (!ret) {
21444
21445 multixri_pool->pbl_empty_count++;
21446 }
21447
21448 #ifdef LPFC_MXP_STAT
21449 if (ret) {
21450 if (next_hwqid == hwqid)
21451 multixri_pool->local_pbl_hit_count++;
21452 else
21453 multixri_pool->other_pbl_hit_count++;
21454 }
21455 #endif
21456 }
21457
21458
21459
21460
21461
21462
21463
21464
21465
21466 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
21467 {
21468 struct lpfc_multixri_pool *multixri_pool;
21469 struct lpfc_pvt_pool *pvt_pool;
21470
21471 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21472 pvt_pool = &multixri_pool->pvt_pool;
21473
21474 if (pvt_pool->count < pvt_pool->low_watermark)
21475 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21476 }
21477
21478
21479
21480
21481
21482
21483
21484
21485
21486
21487
21488
21489
21490 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21491 struct lpfc_sli4_hdw_queue *qp)
21492 {
21493 unsigned long iflag;
21494 struct lpfc_pbl_pool *pbl_pool;
21495 struct lpfc_pvt_pool *pvt_pool;
21496 struct lpfc_epd_pool *epd_pool;
21497 u32 txcmplq_cnt;
21498 u32 xri_owned;
21499 u32 xri_limit;
21500 u32 abts_io_bufs;
21501
21502
21503 lpfc_ncmd->nvmeCmd = NULL;
21504 lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
21505
21506 if (phba->cfg_xpsgl && !phba->nvmet_support &&
21507 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21508 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21509
21510 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21511 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21512
21513 if (phba->cfg_xri_rebalancing) {
21514 if (lpfc_ncmd->expedite) {
21515
21516 epd_pool = &phba->epd_pool;
21517 spin_lock_irqsave(&epd_pool->lock, iflag);
21518 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21519 epd_pool->count++;
21520 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21521 return;
21522 }
21523
21524
21525
21526
21527
21528 if (!qp->p_multixri_pool)
21529 return;
21530
21531 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21532 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21533
21534 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21535 abts_io_bufs = qp->abts_scsi_io_bufs;
21536 abts_io_bufs += qp->abts_nvme_io_bufs;
21537
21538 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21539 xri_limit = qp->p_multixri_pool->xri_limit;
21540
21541 #ifdef LPFC_MXP_STAT
21542 if (xri_owned <= xri_limit)
21543 qp->p_multixri_pool->below_limit_count++;
21544 else
21545 qp->p_multixri_pool->above_limit_count++;
21546 #endif
21547
21548
21549
21550
21551 if ((pvt_pool->count < pvt_pool->low_watermark) ||
21552 (xri_owned < xri_limit &&
21553 pvt_pool->count < pvt_pool->high_watermark)) {
21554 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21555 qp, free_pvt_pool);
21556 list_add_tail(&lpfc_ncmd->list,
21557 &pvt_pool->list);
21558 pvt_pool->count++;
21559 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21560 } else {
21561 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21562 qp, free_pub_pool);
21563 list_add_tail(&lpfc_ncmd->list,
21564 &pbl_pool->list);
21565 pbl_pool->count++;
21566 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21567 }
21568 } else {
21569 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21570 qp, free_xri);
21571 list_add_tail(&lpfc_ncmd->list,
21572 &qp->lpfc_io_buf_list_put);
21573 qp->put_io_bufs++;
21574 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21575 iflag);
21576 }
21577 }
21578
21579
21580
21581
21582
21583
21584
21585
21586
21587
21588
21589
21590
21591
21592 static struct lpfc_io_buf *
21593 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21594 struct lpfc_sli4_hdw_queue *qp,
21595 struct lpfc_pvt_pool *pvt_pool,
21596 struct lpfc_nodelist *ndlp)
21597 {
21598 struct lpfc_io_buf *lpfc_ncmd;
21599 struct lpfc_io_buf *lpfc_ncmd_next;
21600 unsigned long iflag;
21601
21602 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21603 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21604 &pvt_pool->list, list) {
21605 if (lpfc_test_rrq_active(
21606 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21607 continue;
21608 list_del(&lpfc_ncmd->list);
21609 pvt_pool->count--;
21610 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21611 return lpfc_ncmd;
21612 }
21613 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21614
21615 return NULL;
21616 }
21617
21618
21619
21620
21621
21622
21623
21624
21625
21626
21627
21628 static struct lpfc_io_buf *
21629 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21630 {
21631 struct lpfc_io_buf *lpfc_ncmd;
21632 struct lpfc_io_buf *lpfc_ncmd_next;
21633 unsigned long iflag;
21634 struct lpfc_epd_pool *epd_pool;
21635
21636 epd_pool = &phba->epd_pool;
21637 lpfc_ncmd = NULL;
21638
21639 spin_lock_irqsave(&epd_pool->lock, iflag);
21640 if (epd_pool->count > 0) {
21641 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21642 &epd_pool->list, list) {
21643 list_del(&lpfc_ncmd->list);
21644 epd_pool->count--;
21645 break;
21646 }
21647 }
21648 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21649
21650 return lpfc_ncmd;
21651 }
21652
21653
21654
21655
21656
21657
21658
21659
21660
21661
21662
21663
21664
21665
21666
21667
21668
21669
21670
21671
21672
21673
21674
21675
21676 static struct lpfc_io_buf *
21677 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
21678 struct lpfc_nodelist *ndlp,
21679 int hwqid, int expedite)
21680 {
21681 struct lpfc_sli4_hdw_queue *qp;
21682 struct lpfc_multixri_pool *multixri_pool;
21683 struct lpfc_pvt_pool *pvt_pool;
21684 struct lpfc_io_buf *lpfc_ncmd;
21685
21686 qp = &phba->sli4_hba.hdwq[hwqid];
21687 lpfc_ncmd = NULL;
21688 if (!qp) {
21689 lpfc_printf_log(phba, KERN_INFO,
21690 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21691 "5556 NULL qp for hwqid x%x\n", hwqid);
21692 return lpfc_ncmd;
21693 }
21694 multixri_pool = qp->p_multixri_pool;
21695 if (!multixri_pool) {
21696 lpfc_printf_log(phba, KERN_INFO,
21697 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21698 "5557 NULL multixri for hwqid x%x\n", hwqid);
21699 return lpfc_ncmd;
21700 }
21701 pvt_pool = &multixri_pool->pvt_pool;
21702 if (!pvt_pool) {
21703 lpfc_printf_log(phba, KERN_INFO,
21704 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21705 "5558 NULL pvt_pool for hwqid x%x\n", hwqid);
21706 return lpfc_ncmd;
21707 }
21708 multixri_pool->io_req_count++;
21709
21710
21711 if (pvt_pool->count == 0)
21712 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21713
21714
21715 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
21716
21717 if (lpfc_ncmd) {
21718 lpfc_ncmd->hdwq = qp;
21719 lpfc_ncmd->hdwq_no = hwqid;
21720 } else if (expedite) {
21721
21722
21723
21724 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
21725 }
21726
21727 return lpfc_ncmd;
21728 }
21729
21730 static inline struct lpfc_io_buf *
21731 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
21732 {
21733 struct lpfc_sli4_hdw_queue *qp;
21734 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
21735
21736 qp = &phba->sli4_hba.hdwq[idx];
21737 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
21738 &qp->lpfc_io_buf_list_get, list) {
21739 if (lpfc_test_rrq_active(phba, ndlp,
21740 lpfc_cmd->cur_iocbq.sli4_lxritag))
21741 continue;
21742
21743 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
21744 continue;
21745
21746 list_del_init(&lpfc_cmd->list);
21747 qp->get_io_bufs--;
21748 lpfc_cmd->hdwq = qp;
21749 lpfc_cmd->hdwq_no = idx;
21750 return lpfc_cmd;
21751 }
21752 return NULL;
21753 }
21754
21755
21756
21757
21758
21759
21760
21761
21762
21763
21764
21765
21766
21767
21768
21769
21770
21771
21772
21773 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
21774 struct lpfc_nodelist *ndlp,
21775 u32 hwqid, int expedite)
21776 {
21777 struct lpfc_sli4_hdw_queue *qp;
21778 unsigned long iflag;
21779 struct lpfc_io_buf *lpfc_cmd;
21780
21781 qp = &phba->sli4_hba.hdwq[hwqid];
21782 lpfc_cmd = NULL;
21783 if (!qp) {
21784 lpfc_printf_log(phba, KERN_WARNING,
21785 LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21786 "5555 NULL qp for hwqid x%x\n", hwqid);
21787 return lpfc_cmd;
21788 }
21789
21790 if (phba->cfg_xri_rebalancing)
21791 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
21792 phba, ndlp, hwqid, expedite);
21793 else {
21794 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
21795 qp, alloc_xri_get);
21796 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
21797 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21798 if (!lpfc_cmd) {
21799 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
21800 qp, alloc_xri_put);
21801 list_splice(&qp->lpfc_io_buf_list_put,
21802 &qp->lpfc_io_buf_list_get);
21803 qp->get_io_bufs += qp->put_io_bufs;
21804 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
21805 qp->put_io_bufs = 0;
21806 spin_unlock(&qp->io_buf_list_put_lock);
21807 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
21808 expedite)
21809 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21810 }
21811 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
21812 }
21813
21814 return lpfc_cmd;
21815 }
21816
21817
21818
21819
21820
21821
21822
21823
21824
21825
21826
21827
21828
21829 int
21830 lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
21831 uint32_t datasz)
21832 {
21833 struct lpfc_mbx_read_object *read_object;
21834 LPFC_MBOXQ_t *mbox;
21835 int rc, length, eof, j, byte_cnt = 0;
21836 uint32_t shdr_status, shdr_add_status;
21837 union lpfc_sli4_cfg_shdr *shdr;
21838 struct lpfc_dmabuf *pcmd;
21839 u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
21840
21841
21842 if (!datap)
21843 return -ENODEV;
21844
21845 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
21846 if (!mbox)
21847 return -ENOMEM;
21848 length = (sizeof(struct lpfc_mbx_read_object) -
21849 sizeof(struct lpfc_sli4_cfg_mhdr));
21850 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
21851 LPFC_MBOX_OPCODE_READ_OBJECT,
21852 length, LPFC_SLI4_MBX_EMBED);
21853 read_object = &mbox->u.mqe.un.read_object;
21854 shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
21855
21856 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
21857 bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
21858 read_object->u.request.rd_object_offset = 0;
21859 read_object->u.request.rd_object_cnt = 1;
21860
21861 memset((void *)read_object->u.request.rd_object_name, 0,
21862 LPFC_OBJ_NAME_SZ);
21863 scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
21864 for (j = 0; j < strlen(rdobject); j++)
21865 read_object->u.request.rd_object_name[j] =
21866 cpu_to_le32(rd_object_name[j]);
21867
21868 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
21869 if (pcmd)
21870 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
21871 if (!pcmd || !pcmd->virt) {
21872 kfree(pcmd);
21873 mempool_free(mbox, phba->mbox_mem_pool);
21874 return -ENOMEM;
21875 }
21876 memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
21877 read_object->u.request.rd_object_hbuf[0].pa_lo =
21878 putPaddrLow(pcmd->phys);
21879 read_object->u.request.rd_object_hbuf[0].pa_hi =
21880 putPaddrHigh(pcmd->phys);
21881 read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
21882
21883 mbox->vport = phba->pport;
21884 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21885 mbox->ctx_ndlp = NULL;
21886
21887 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
21888 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
21889 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
21890
21891 if (shdr_status == STATUS_FAILED &&
21892 shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
21893 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
21894 "4674 No port cfg file in FW.\n");
21895 byte_cnt = -ENOENT;
21896 } else if (shdr_status || shdr_add_status || rc) {
21897 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
21898 "2625 READ_OBJECT mailbox failed with "
21899 "status x%x add_status x%x, mbx status x%x\n",
21900 shdr_status, shdr_add_status, rc);
21901 byte_cnt = -ENXIO;
21902 } else {
21903
21904 length = read_object->u.response.rd_object_actual_rlen;
21905 eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
21906 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
21907 "2626 READ_OBJECT Success len %d:%d, EOF %d\n",
21908 length, datasz, eof);
21909
21910
21911 if (!length && eof) {
21912 byte_cnt = 0;
21913 goto exit;
21914 }
21915
21916 byte_cnt = length;
21917 lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
21918 }
21919
21920 exit:
21921
21922
21923
21924 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
21925 kfree(pcmd);
21926 lpfc_sli4_mbox_cmd_free(phba, mbox);
21927 return byte_cnt;
21928 }
21929
21930
21931
21932
21933
21934
21935
21936
21937
21938
21939
21940
21941
21942 struct sli4_hybrid_sgl *
21943 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21944 {
21945 struct sli4_hybrid_sgl *list_entry = NULL;
21946 struct sli4_hybrid_sgl *tmp = NULL;
21947 struct sli4_hybrid_sgl *allocated_sgl = NULL;
21948 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21949 struct list_head *buf_list = &hdwq->sgl_list;
21950 unsigned long iflags;
21951
21952 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21953
21954 if (likely(!list_empty(buf_list))) {
21955
21956 list_for_each_entry_safe(list_entry, tmp,
21957 buf_list, list_node) {
21958 list_move_tail(&list_entry->list_node,
21959 &lpfc_buf->dma_sgl_xtra_list);
21960 break;
21961 }
21962 } else {
21963
21964 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21965 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21966 cpu_to_node(hdwq->io_wq->chann));
21967 if (!tmp) {
21968 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21969 "8353 error kmalloc memory for HDWQ "
21970 "%d %s\n",
21971 lpfc_buf->hdwq_no, __func__);
21972 return NULL;
21973 }
21974
21975 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
21976 GFP_ATOMIC, &tmp->dma_phys_sgl);
21977 if (!tmp->dma_sgl) {
21978 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21979 "8354 error pool_alloc memory for HDWQ "
21980 "%d %s\n",
21981 lpfc_buf->hdwq_no, __func__);
21982 kfree(tmp);
21983 return NULL;
21984 }
21985
21986 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21987 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
21988 }
21989
21990 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
21991 struct sli4_hybrid_sgl,
21992 list_node);
21993
21994 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21995
21996 return allocated_sgl;
21997 }
21998
21999
22000
22001
22002
22003
22004
22005
22006
22007
22008
22009
22010 int
22011 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22012 {
22013 int rc = 0;
22014 struct sli4_hybrid_sgl *list_entry = NULL;
22015 struct sli4_hybrid_sgl *tmp = NULL;
22016 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22017 struct list_head *buf_list = &hdwq->sgl_list;
22018 unsigned long iflags;
22019
22020 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22021
22022 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
22023 list_for_each_entry_safe(list_entry, tmp,
22024 &lpfc_buf->dma_sgl_xtra_list,
22025 list_node) {
22026 list_move_tail(&list_entry->list_node,
22027 buf_list);
22028 }
22029 } else {
22030 rc = -EINVAL;
22031 }
22032
22033 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22034 return rc;
22035 }
22036
22037
22038
22039
22040
22041
22042
22043
22044
22045
22046
22047 void
22048 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
22049 struct lpfc_sli4_hdw_queue *hdwq)
22050 {
22051 struct list_head *buf_list = &hdwq->sgl_list;
22052 struct sli4_hybrid_sgl *list_entry = NULL;
22053 struct sli4_hybrid_sgl *tmp = NULL;
22054 unsigned long iflags;
22055
22056 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22057
22058
22059 list_for_each_entry_safe(list_entry, tmp,
22060 buf_list, list_node) {
22061 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
22062 list_entry->dma_sgl,
22063 list_entry->dma_phys_sgl);
22064 list_del(&list_entry->list_node);
22065 kfree(list_entry);
22066 }
22067
22068 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22069 }
22070
22071
22072
22073
22074
22075
22076
22077
22078
22079
22080
22081
22082
22083 struct fcp_cmd_rsp_buf *
22084 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22085 struct lpfc_io_buf *lpfc_buf)
22086 {
22087 struct fcp_cmd_rsp_buf *list_entry = NULL;
22088 struct fcp_cmd_rsp_buf *tmp = NULL;
22089 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
22090 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22091 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22092 unsigned long iflags;
22093
22094 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22095
22096 if (likely(!list_empty(buf_list))) {
22097
22098 list_for_each_entry_safe(list_entry, tmp,
22099 buf_list,
22100 list_node) {
22101 list_move_tail(&list_entry->list_node,
22102 &lpfc_buf->dma_cmd_rsp_list);
22103 break;
22104 }
22105 } else {
22106
22107 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22108 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22109 cpu_to_node(hdwq->io_wq->chann));
22110 if (!tmp) {
22111 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22112 "8355 error kmalloc memory for HDWQ "
22113 "%d %s\n",
22114 lpfc_buf->hdwq_no, __func__);
22115 return NULL;
22116 }
22117
22118 tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
22119 GFP_ATOMIC,
22120 &tmp->fcp_cmd_rsp_dma_handle);
22121
22122 if (!tmp->fcp_cmnd) {
22123 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22124 "8356 error pool_alloc memory for HDWQ "
22125 "%d %s\n",
22126 lpfc_buf->hdwq_no, __func__);
22127 kfree(tmp);
22128 return NULL;
22129 }
22130
22131 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
22132 sizeof(struct fcp_cmnd));
22133
22134 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22135 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
22136 }
22137
22138 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
22139 struct fcp_cmd_rsp_buf,
22140 list_node);
22141
22142 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22143
22144 return allocated_buf;
22145 }
22146
22147
22148
22149
22150
22151
22152
22153
22154
22155
22156
22157
22158 int
22159 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22160 struct lpfc_io_buf *lpfc_buf)
22161 {
22162 int rc = 0;
22163 struct fcp_cmd_rsp_buf *list_entry = NULL;
22164 struct fcp_cmd_rsp_buf *tmp = NULL;
22165 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22166 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22167 unsigned long iflags;
22168
22169 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22170
22171 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
22172 list_for_each_entry_safe(list_entry, tmp,
22173 &lpfc_buf->dma_cmd_rsp_list,
22174 list_node) {
22175 list_move_tail(&list_entry->list_node,
22176 buf_list);
22177 }
22178 } else {
22179 rc = -EINVAL;
22180 }
22181
22182 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22183 return rc;
22184 }
22185
22186
22187
22188
22189
22190
22191
22192
22193
22194
22195
22196 void
22197 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22198 struct lpfc_sli4_hdw_queue *hdwq)
22199 {
22200 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22201 struct fcp_cmd_rsp_buf *list_entry = NULL;
22202 struct fcp_cmd_rsp_buf *tmp = NULL;
22203 unsigned long iflags;
22204
22205 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22206
22207
22208 list_for_each_entry_safe(list_entry, tmp,
22209 buf_list,
22210 list_node) {
22211 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
22212 list_entry->fcp_cmnd,
22213 list_entry->fcp_cmd_rsp_dma_handle);
22214 list_del(&list_entry->list_node);
22215 kfree(list_entry);
22216 }
22217
22218 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22219 }
22220
22221
22222
22223
22224
22225
22226
22227
22228
22229
22230
22231 void
22232 lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
22233 {
22234 u8 cmnd;
22235 u32 *pcmd;
22236 u32 if_type = 0;
22237 u32 fip, abort_tag;
22238 struct lpfc_nodelist *ndlp = NULL;
22239 union lpfc_wqe128 *wqe = &job->wqe;
22240 u8 command_type = ELS_COMMAND_NON_FIP;
22241
22242 fip = phba->hba_flag & HBA_FIP_SUPPORT;
22243
22244 if (job->cmd_flag & LPFC_IO_FCP)
22245 command_type = FCP_COMMAND;
22246 else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
22247 command_type = ELS_COMMAND_FIP;
22248 else
22249 command_type = ELS_COMMAND_NON_FIP;
22250
22251 abort_tag = job->iotag;
22252 cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
22253
22254 switch (cmnd) {
22255 case CMD_ELS_REQUEST64_WQE:
22256 ndlp = job->ndlp;
22257
22258 if_type = bf_get(lpfc_sli_intf_if_type,
22259 &phba->sli4_hba.sli_intf);
22260 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22261 pcmd = (u32 *)job->cmd_dmabuf->virt;
22262 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
22263 *pcmd == ELS_CMD_SCR ||
22264 *pcmd == ELS_CMD_RDF ||
22265 *pcmd == ELS_CMD_EDC ||
22266 *pcmd == ELS_CMD_RSCN_XMT ||
22267 *pcmd == ELS_CMD_FDISC ||
22268 *pcmd == ELS_CMD_LOGO ||
22269 *pcmd == ELS_CMD_QFPA ||
22270 *pcmd == ELS_CMD_UVEM ||
22271 *pcmd == ELS_CMD_PLOGI)) {
22272 bf_set(els_req64_sp, &wqe->els_req, 1);
22273 bf_set(els_req64_sid, &wqe->els_req,
22274 job->vport->fc_myDID);
22275
22276 if ((*pcmd == ELS_CMD_FLOGI) &&
22277 !(phba->fc_topology ==
22278 LPFC_TOPOLOGY_LOOP))
22279 bf_set(els_req64_sid, &wqe->els_req, 0);
22280
22281 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
22282 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22283 phba->vpi_ids[job->vport->vpi]);
22284 } else if (pcmd) {
22285 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
22286 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22287 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22288 }
22289 }
22290
22291 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
22292 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22293
22294 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
22295 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
22296 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
22297 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22298 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
22299 break;
22300 case CMD_XMIT_ELS_RSP64_WQE:
22301 ndlp = job->ndlp;
22302
22303
22304 wqe->xmit_els_rsp.word4 = 0;
22305
22306 if_type = bf_get(lpfc_sli_intf_if_type,
22307 &phba->sli4_hba.sli_intf);
22308 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22309 if (job->vport->fc_flag & FC_PT2PT) {
22310 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22311 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22312 job->vport->fc_myDID);
22313 if (job->vport->fc_myDID == Fabric_DID) {
22314 bf_set(wqe_els_did,
22315 &wqe->xmit_els_rsp.wqe_dest, 0);
22316 }
22317 }
22318 }
22319
22320 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
22321 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
22322 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
22323 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
22324 LPFC_WQE_LENLOC_WORD3);
22325 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
22326
22327 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
22328 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22329 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22330 job->vport->fc_myDID);
22331 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
22332 }
22333
22334 if (phba->sli_rev == LPFC_SLI_REV4) {
22335 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
22336 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22337
22338 if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
22339 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
22340 phba->vpi_ids[job->vport->vpi]);
22341 }
22342 command_type = OTHER_COMMAND;
22343 break;
22344 case CMD_GEN_REQUEST64_WQE:
22345
22346 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
22347 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
22348 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
22349 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22350 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
22351 command_type = OTHER_COMMAND;
22352 break;
22353 case CMD_XMIT_SEQUENCE64_WQE:
22354 if (phba->link_flag & LS_LOOPBACK_MODE)
22355 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
22356
22357 wqe->xmit_sequence.rsvd3 = 0;
22358 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
22359 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
22360 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
22361 LPFC_WQE_IOD_WRITE);
22362 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
22363 LPFC_WQE_LENLOC_WORD12);
22364 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
22365 command_type = OTHER_COMMAND;
22366 break;
22367 case CMD_XMIT_BLS_RSP64_WQE:
22368 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
22369 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
22370 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
22371 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
22372 phba->vpi_ids[phba->pport->vpi]);
22373 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
22374 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
22375 LPFC_WQE_LENLOC_NONE);
22376
22377 command_type = OTHER_COMMAND;
22378 break;
22379 case CMD_FCP_ICMND64_WQE:
22380 case CMD_ABORT_XRI_WQE:
22381 case CMD_SEND_FRAME:
22382
22383 return;
22384 default:
22385 dump_stack();
22386 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
22387 "6207 Invalid command 0x%x\n",
22388 cmnd);
22389 break;
22390 }
22391
22392 wqe->generic.wqe_com.abort_tag = abort_tag;
22393 bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
22394 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
22395 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
22396 }