0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/blkdev.h>
0025 #include <linux/delay.h>
0026 #include <linux/dma-mapping.h>
0027 #include <linux/idr.h>
0028 #include <linux/interrupt.h>
0029 #include <linux/module.h>
0030 #include <linux/kthread.h>
0031 #include <linux/pci.h>
0032 #include <linux/spinlock.h>
0033 #include <linux/ctype.h>
0034 #include <linux/aer.h>
0035 #include <linux/slab.h>
0036 #include <linux/firmware.h>
0037 #include <linux/miscdevice.h>
0038 #include <linux/percpu.h>
0039 #include <linux/msi.h>
0040 #include <linux/irq.h>
0041 #include <linux/bitops.h>
0042 #include <linux/crash_dump.h>
0043 #include <linux/cpu.h>
0044 #include <linux/cpuhotplug.h>
0045
0046 #include <scsi/scsi.h>
0047 #include <scsi/scsi_device.h>
0048 #include <scsi/scsi_host.h>
0049 #include <scsi/scsi_transport_fc.h>
0050 #include <scsi/scsi_tcq.h>
0051 #include <scsi/fc/fc_fs.h>
0052
0053 #include "lpfc_hw4.h"
0054 #include "lpfc_hw.h"
0055 #include "lpfc_sli.h"
0056 #include "lpfc_sli4.h"
0057 #include "lpfc_nl.h"
0058 #include "lpfc_disc.h"
0059 #include "lpfc.h"
0060 #include "lpfc_scsi.h"
0061 #include "lpfc_nvme.h"
0062 #include "lpfc_logmsg.h"
0063 #include "lpfc_crtn.h"
0064 #include "lpfc_vport.h"
0065 #include "lpfc_version.h"
0066 #include "lpfc_ids.h"
0067
0068 static enum cpuhp_state lpfc_cpuhp_state;
0069
0070 static uint32_t lpfc_present_cpu;
0071 static bool lpfc_pldv_detect;
0072
0073 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
0074 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
0075 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
0076 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
0077 static int lpfc_post_rcv_buf(struct lpfc_hba *);
0078 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
0079 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
0080 static int lpfc_setup_endian_order(struct lpfc_hba *);
0081 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
0082 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
0083 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
0084 static void lpfc_init_sgl_list(struct lpfc_hba *);
0085 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
0086 static void lpfc_free_active_sgl(struct lpfc_hba *);
0087 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
0088 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
0089 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
0090 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
0091 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
0092 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
0093 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
0094 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
0095 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
0096 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
0097 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
0098 static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
0099
0100 static struct scsi_transport_template *lpfc_transport_template = NULL;
0101 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
0102 static DEFINE_IDR(lpfc_hba_index);
0103 #define LPFC_NVMET_BUF_POST 254
0104 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120 int
0121 lpfc_config_port_prep(struct lpfc_hba *phba)
0122 {
0123 lpfc_vpd_t *vp = &phba->vpd;
0124 int i = 0, rc;
0125 LPFC_MBOXQ_t *pmb;
0126 MAILBOX_t *mb;
0127 char *lpfc_vpd_data = NULL;
0128 uint16_t offset = 0;
0129 static char licensed[56] =
0130 "key unlock for use with gnu public licensed code only\0";
0131 static int init_key = 1;
0132
0133 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
0134 if (!pmb) {
0135 phba->link_state = LPFC_HBA_ERROR;
0136 return -ENOMEM;
0137 }
0138
0139 mb = &pmb->u.mb;
0140 phba->link_state = LPFC_INIT_MBX_CMDS;
0141
0142 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
0143 if (init_key) {
0144 uint32_t *ptext = (uint32_t *) licensed;
0145
0146 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
0147 *ptext = cpu_to_be32(*ptext);
0148 init_key = 0;
0149 }
0150
0151 lpfc_read_nv(phba, pmb);
0152 memset((char*)mb->un.varRDnvp.rsvd3, 0,
0153 sizeof (mb->un.varRDnvp.rsvd3));
0154 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
0155 sizeof (licensed));
0156
0157 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
0158
0159 if (rc != MBX_SUCCESS) {
0160 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0161 "0324 Config Port initialization "
0162 "error, mbxCmd x%x READ_NVPARM, "
0163 "mbxStatus x%x\n",
0164 mb->mbxCommand, mb->mbxStatus);
0165 mempool_free(pmb, phba->mbox_mem_pool);
0166 return -ERESTART;
0167 }
0168 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
0169 sizeof(phba->wwnn));
0170 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
0171 sizeof(phba->wwpn));
0172 }
0173
0174
0175
0176
0177
0178 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
0179
0180
0181 lpfc_read_rev(phba, pmb);
0182 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
0183 if (rc != MBX_SUCCESS) {
0184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0185 "0439 Adapter failed to init, mbxCmd x%x "
0186 "READ_REV, mbxStatus x%x\n",
0187 mb->mbxCommand, mb->mbxStatus);
0188 mempool_free( pmb, phba->mbox_mem_pool);
0189 return -ERESTART;
0190 }
0191
0192
0193
0194
0195
0196
0197 if (mb->un.varRdRev.rr == 0) {
0198 vp->rev.rBit = 0;
0199 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0200 "0440 Adapter failed to init, READ_REV has "
0201 "missing revision information.\n");
0202 mempool_free(pmb, phba->mbox_mem_pool);
0203 return -ERESTART;
0204 }
0205
0206 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
0207 mempool_free(pmb, phba->mbox_mem_pool);
0208 return -EINVAL;
0209 }
0210
0211
0212 vp->rev.rBit = 1;
0213 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
0214 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
0215 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
0216 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
0217 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
0218 vp->rev.biuRev = mb->un.varRdRev.biuRev;
0219 vp->rev.smRev = mb->un.varRdRev.smRev;
0220 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
0221 vp->rev.endecRev = mb->un.varRdRev.endecRev;
0222 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
0223 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
0224 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
0225 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
0226 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
0227 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
0228
0229
0230
0231
0232
0233 if (vp->rev.feaLevelHigh < 9)
0234 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
0235
0236 if (lpfc_is_LC_HBA(phba->pcidev->device))
0237 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
0238 sizeof (phba->RandomData));
0239
0240
0241 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
0242 if (!lpfc_vpd_data)
0243 goto out_free_mbox;
0244 do {
0245 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
0246 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
0247
0248 if (rc != MBX_SUCCESS) {
0249 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
0250 "0441 VPD not present on adapter, "
0251 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
0252 mb->mbxCommand, mb->mbxStatus);
0253 mb->un.varDmp.word_cnt = 0;
0254 }
0255
0256
0257
0258 if (mb->un.varDmp.word_cnt == 0)
0259 break;
0260
0261 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
0262 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
0263 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
0264 lpfc_vpd_data + offset,
0265 mb->un.varDmp.word_cnt);
0266 offset += mb->un.varDmp.word_cnt;
0267 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
0268
0269 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
0270
0271 kfree(lpfc_vpd_data);
0272 out_free_mbox:
0273 mempool_free(pmb, phba->mbox_mem_pool);
0274 return 0;
0275 }
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287 static void
0288 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
0289 {
0290 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
0291 phba->temp_sensor_support = 1;
0292 else
0293 phba->temp_sensor_support = 0;
0294 mempool_free(pmboxq, phba->mbox_mem_pool);
0295 return;
0296 }
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306
0307
0308 static void
0309 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
0310 {
0311 struct prog_id *prg;
0312 uint32_t prog_id_word;
0313 char dist = ' ';
0314
0315 char dist_char[] = "nabx";
0316
0317 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
0318 mempool_free(pmboxq, phba->mbox_mem_pool);
0319 return;
0320 }
0321
0322 prg = (struct prog_id *) &prog_id_word;
0323
0324
0325 prog_id_word = pmboxq->u.mb.un.varWords[7];
0326
0327
0328 if (prg->dist < 4)
0329 dist = dist_char[prg->dist];
0330
0331 if ((prg->dist == 3) && (prg->num == 0))
0332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
0333 prg->ver, prg->rev, prg->lev);
0334 else
0335 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
0336 prg->ver, prg->rev, prg->lev,
0337 dist, prg->num);
0338 mempool_free(pmboxq, phba->mbox_mem_pool);
0339 return;
0340 }
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350 void
0351 lpfc_update_vport_wwn(struct lpfc_vport *vport)
0352 {
0353 struct lpfc_hba *phba = vport->phba;
0354
0355
0356
0357
0358
0359 if (vport->fc_nodename.u.wwn[0] == 0)
0360 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
0361 sizeof(struct lpfc_name));
0362 else
0363 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
0364 sizeof(struct lpfc_name));
0365
0366
0367
0368
0369
0370 if (vport->fc_portname.u.wwn[0] != 0 &&
0371 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
0372 sizeof(struct lpfc_name))) {
0373 vport->vport_flag |= FAWWPN_PARAM_CHG;
0374
0375 if (phba->sli_rev == LPFC_SLI_REV4 &&
0376 vport->port_type == LPFC_PHYSICAL_PORT &&
0377 phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
0378 if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
0379 phba->sli4_hba.fawwpn_flag &=
0380 ~LPFC_FAWWPN_FABRIC;
0381 lpfc_printf_log(phba, KERN_INFO,
0382 LOG_SLI | LOG_DISCOVERY | LOG_ELS,
0383 "2701 FA-PWWN change WWPN from %llx to "
0384 "%llx: vflag x%x fawwpn_flag x%x\n",
0385 wwn_to_u64(vport->fc_portname.u.wwn),
0386 wwn_to_u64
0387 (vport->fc_sparam.portName.u.wwn),
0388 vport->vport_flag,
0389 phba->sli4_hba.fawwpn_flag);
0390 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
0391 sizeof(struct lpfc_name));
0392 }
0393 }
0394
0395 if (vport->fc_portname.u.wwn[0] == 0)
0396 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
0397 sizeof(struct lpfc_name));
0398 else
0399 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
0400 sizeof(struct lpfc_name));
0401 }
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416 int
0417 lpfc_config_port_post(struct lpfc_hba *phba)
0418 {
0419 struct lpfc_vport *vport = phba->pport;
0420 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
0421 LPFC_MBOXQ_t *pmb;
0422 MAILBOX_t *mb;
0423 struct lpfc_dmabuf *mp;
0424 struct lpfc_sli *psli = &phba->sli;
0425 uint32_t status, timeout;
0426 int i, j;
0427 int rc;
0428
0429 spin_lock_irq(&phba->hbalock);
0430
0431
0432
0433
0434 if (phba->over_temp_state == HBA_OVER_TEMP)
0435 phba->over_temp_state = HBA_NORMAL_TEMP;
0436 spin_unlock_irq(&phba->hbalock);
0437
0438 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
0439 if (!pmb) {
0440 phba->link_state = LPFC_HBA_ERROR;
0441 return -ENOMEM;
0442 }
0443 mb = &pmb->u.mb;
0444
0445
0446 rc = lpfc_read_sparam(phba, pmb, 0);
0447 if (rc) {
0448 mempool_free(pmb, phba->mbox_mem_pool);
0449 return -ENOMEM;
0450 }
0451
0452 pmb->vport = vport;
0453 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
0454 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0455 "0448 Adapter failed init, mbxCmd x%x "
0456 "READ_SPARM mbxStatus x%x\n",
0457 mb->mbxCommand, mb->mbxStatus);
0458 phba->link_state = LPFC_HBA_ERROR;
0459 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
0460 return -EIO;
0461 }
0462
0463 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
0464
0465
0466
0467
0468
0469 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
0470 lpfc_mbuf_free(phba, mp->virt, mp->phys);
0471 kfree(mp);
0472 pmb->ctx_buf = NULL;
0473 lpfc_update_vport_wwn(vport);
0474
0475
0476 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
0477 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
0478 fc_host_max_npiv_vports(shost) = phba->max_vpi;
0479
0480
0481
0482 if (phba->SerialNumber[0] == 0) {
0483 uint8_t *outptr;
0484
0485 outptr = &vport->fc_nodename.u.s.IEEE[0];
0486 for (i = 0; i < 12; i++) {
0487 status = *outptr++;
0488 j = ((status & 0xf0) >> 4);
0489 if (j <= 9)
0490 phba->SerialNumber[i] =
0491 (char)((uint8_t) 0x30 + (uint8_t) j);
0492 else
0493 phba->SerialNumber[i] =
0494 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
0495 i++;
0496 j = (status & 0xf);
0497 if (j <= 9)
0498 phba->SerialNumber[i] =
0499 (char)((uint8_t) 0x30 + (uint8_t) j);
0500 else
0501 phba->SerialNumber[i] =
0502 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
0503 }
0504 }
0505
0506 lpfc_read_config(phba, pmb);
0507 pmb->vport = vport;
0508 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
0509 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0510 "0453 Adapter failed to init, mbxCmd x%x "
0511 "READ_CONFIG, mbxStatus x%x\n",
0512 mb->mbxCommand, mb->mbxStatus);
0513 phba->link_state = LPFC_HBA_ERROR;
0514 mempool_free( pmb, phba->mbox_mem_pool);
0515 return -EIO;
0516 }
0517
0518
0519 lpfc_sli_read_link_ste(phba);
0520
0521
0522 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
0523 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
0524 "3359 HBA queue depth changed from %d to %d\n",
0525 phba->cfg_hba_queue_depth,
0526 mb->un.varRdConfig.max_xri);
0527 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
0528 }
0529
0530 phba->lmt = mb->un.varRdConfig.lmt;
0531
0532
0533 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
0534
0535 phba->link_state = LPFC_LINK_DOWN;
0536
0537
0538 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
0539 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
0540 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
0541 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
0542
0543
0544 if (phba->sli_rev != 3)
0545 lpfc_post_rcv_buf(phba);
0546
0547
0548
0549
0550 if (phba->intr_type == MSIX) {
0551 rc = lpfc_config_msi(phba, pmb);
0552 if (rc) {
0553 mempool_free(pmb, phba->mbox_mem_pool);
0554 return -EIO;
0555 }
0556 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
0557 if (rc != MBX_SUCCESS) {
0558 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0559 "0352 Config MSI mailbox command "
0560 "failed, mbxCmd x%x, mbxStatus x%x\n",
0561 pmb->u.mb.mbxCommand,
0562 pmb->u.mb.mbxStatus);
0563 mempool_free(pmb, phba->mbox_mem_pool);
0564 return -EIO;
0565 }
0566 }
0567
0568 spin_lock_irq(&phba->hbalock);
0569
0570 phba->hba_flag &= ~HBA_ERATT_HANDLED;
0571
0572
0573 if (lpfc_readl(phba->HCregaddr, &status)) {
0574 spin_unlock_irq(&phba->hbalock);
0575 return -EIO;
0576 }
0577 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
0578 if (psli->num_rings > 0)
0579 status |= HC_R0INT_ENA;
0580 if (psli->num_rings > 1)
0581 status |= HC_R1INT_ENA;
0582 if (psli->num_rings > 2)
0583 status |= HC_R2INT_ENA;
0584 if (psli->num_rings > 3)
0585 status |= HC_R3INT_ENA;
0586
0587 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
0588 (phba->cfg_poll & DISABLE_FCP_RING_INT))
0589 status &= ~(HC_R0INT_ENA);
0590
0591 writel(status, phba->HCregaddr);
0592 readl(phba->HCregaddr);
0593 spin_unlock_irq(&phba->hbalock);
0594
0595
0596 timeout = phba->fc_ratov * 2;
0597 mod_timer(&vport->els_tmofunc,
0598 jiffies + msecs_to_jiffies(1000 * timeout));
0599
0600 mod_timer(&phba->hb_tmofunc,
0601 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
0602 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
0603 phba->last_completion_time = jiffies;
0604
0605 mod_timer(&phba->eratt_poll,
0606 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
0607
0608 if (phba->hba_flag & LINK_DISABLED) {
0609 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0610 "2598 Adapter Link is disabled.\n");
0611 lpfc_down_link(phba, pmb);
0612 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0613 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
0614 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
0615 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0616 "2599 Adapter failed to issue DOWN_LINK"
0617 " mbox command rc 0x%x\n", rc);
0618
0619 mempool_free(pmb, phba->mbox_mem_pool);
0620 return -EIO;
0621 }
0622 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
0623 mempool_free(pmb, phba->mbox_mem_pool);
0624 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
0625 if (rc)
0626 return rc;
0627 }
0628
0629 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
0630 if (!pmb) {
0631 phba->link_state = LPFC_HBA_ERROR;
0632 return -ENOMEM;
0633 }
0634
0635 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
0636 pmb->mbox_cmpl = lpfc_config_async_cmpl;
0637 pmb->vport = phba->pport;
0638 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
0639
0640 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
0641 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0642 "0456 Adapter failed to issue "
0643 "ASYNCEVT_ENABLE mbox status x%x\n",
0644 rc);
0645 mempool_free(pmb, phba->mbox_mem_pool);
0646 }
0647
0648
0649 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
0650 if (!pmb) {
0651 phba->link_state = LPFC_HBA_ERROR;
0652 return -ENOMEM;
0653 }
0654
0655 lpfc_dump_wakeup_param(phba, pmb);
0656 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
0657 pmb->vport = phba->pport;
0658 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
0659
0660 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
0661 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0662 "0435 Adapter failed "
0663 "to get Option ROM version status x%x\n", rc);
0664 mempool_free(pmb, phba->mbox_mem_pool);
0665 }
0666
0667 return 0;
0668 }
0669
0670
0671
0672
0673
0674
0675
0676
0677 int
0678 lpfc_sli4_refresh_params(struct lpfc_hba *phba)
0679 {
0680 LPFC_MBOXQ_t *mboxq;
0681 struct lpfc_mqe *mqe;
0682 struct lpfc_sli4_parameters *mbx_sli4_parameters;
0683 int length, rc;
0684
0685 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
0686 if (!mboxq)
0687 return -ENOMEM;
0688
0689 mqe = &mboxq->u.mqe;
0690
0691 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
0692 sizeof(struct lpfc_sli4_cfg_mhdr));
0693 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
0694 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
0695 length, LPFC_SLI4_MBX_EMBED);
0696
0697 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
0698 if (unlikely(rc)) {
0699 mempool_free(mboxq, phba->mbox_mem_pool);
0700 return rc;
0701 }
0702 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
0703
0704
0705 if (phba->cfg_enable_mi)
0706 phba->sli4_hba.pc_sli4_params.mi_ver =
0707 bf_get(cfg_mi_ver, mbx_sli4_parameters);
0708 else
0709 phba->sli4_hba.pc_sli4_params.mi_ver = 0;
0710
0711 phba->sli4_hba.pc_sli4_params.cmf =
0712 bf_get(cfg_cmf, mbx_sli4_parameters);
0713 phba->sli4_hba.pc_sli4_params.pls =
0714 bf_get(cfg_pvl, mbx_sli4_parameters);
0715
0716 mempool_free(mboxq, phba->mbox_mem_pool);
0717 return rc;
0718 }
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734 static int
0735 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
0736 {
0737 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
0738 }
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755 int
0756 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
0757 uint32_t flag)
0758 {
0759 struct lpfc_vport *vport = phba->pport;
0760 LPFC_MBOXQ_t *pmb;
0761 MAILBOX_t *mb;
0762 int rc;
0763
0764 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
0765 if (!pmb) {
0766 phba->link_state = LPFC_HBA_ERROR;
0767 return -ENOMEM;
0768 }
0769 mb = &pmb->u.mb;
0770 pmb->vport = vport;
0771
0772 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
0773 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
0774 !(phba->lmt & LMT_1Gb)) ||
0775 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
0776 !(phba->lmt & LMT_2Gb)) ||
0777 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
0778 !(phba->lmt & LMT_4Gb)) ||
0779 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
0780 !(phba->lmt & LMT_8Gb)) ||
0781 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
0782 !(phba->lmt & LMT_10Gb)) ||
0783 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
0784 !(phba->lmt & LMT_16Gb)) ||
0785 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
0786 !(phba->lmt & LMT_32Gb)) ||
0787 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
0788 !(phba->lmt & LMT_64Gb))) {
0789
0790 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0791 "1302 Invalid speed for this board:%d "
0792 "Reset link speed to auto.\n",
0793 phba->cfg_link_speed);
0794 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
0795 }
0796 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
0797 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0798 if (phba->sli_rev < LPFC_SLI_REV4)
0799 lpfc_set_loopback_flag(phba);
0800 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
0801 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
0802 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0803 "0498 Adapter failed to init, mbxCmd x%x "
0804 "INIT_LINK, mbxStatus x%x\n",
0805 mb->mbxCommand, mb->mbxStatus);
0806 if (phba->sli_rev <= LPFC_SLI_REV3) {
0807
0808 writel(0, phba->HCregaddr);
0809 readl(phba->HCregaddr);
0810
0811 writel(0xffffffff, phba->HAregaddr);
0812 readl(phba->HAregaddr);
0813 }
0814 phba->link_state = LPFC_HBA_ERROR;
0815 if (rc != MBX_BUSY || flag == MBX_POLL)
0816 mempool_free(pmb, phba->mbox_mem_pool);
0817 return -EIO;
0818 }
0819 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
0820 if (flag == MBX_POLL)
0821 mempool_free(pmb, phba->mbox_mem_pool);
0822
0823 return 0;
0824 }
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834
0835
0836
0837
0838
0839 static int
0840 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
0841 {
0842 LPFC_MBOXQ_t *pmb;
0843 int rc;
0844
0845 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
0846 if (!pmb) {
0847 phba->link_state = LPFC_HBA_ERROR;
0848 return -ENOMEM;
0849 }
0850
0851 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0852 "0491 Adapter Link is disabled.\n");
0853 lpfc_down_link(phba, pmb);
0854 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0855 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
0856 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
0857 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0858 "2522 Adapter failed to issue DOWN_LINK"
0859 " mbox command rc 0x%x\n", rc);
0860
0861 mempool_free(pmb, phba->mbox_mem_pool);
0862 return -EIO;
0863 }
0864 if (flag == MBX_POLL)
0865 mempool_free(pmb, phba->mbox_mem_pool);
0866
0867 return 0;
0868 }
0869
0870
0871
0872
0873
0874
0875
0876
0877
0878
0879
0880
0881 int
0882 lpfc_hba_down_prep(struct lpfc_hba *phba)
0883 {
0884 struct lpfc_vport **vports;
0885 int i;
0886
0887 if (phba->sli_rev <= LPFC_SLI_REV3) {
0888
0889 writel(0, phba->HCregaddr);
0890 readl(phba->HCregaddr);
0891 }
0892
0893 if (phba->pport->load_flag & FC_UNLOADING)
0894 lpfc_cleanup_discovery_resources(phba->pport);
0895 else {
0896 vports = lpfc_create_vport_work_array(phba);
0897 if (vports != NULL)
0898 for (i = 0; i <= phba->max_vports &&
0899 vports[i] != NULL; i++)
0900 lpfc_cleanup_discovery_resources(vports[i]);
0901 lpfc_destroy_vport_work_array(phba, vports);
0902 }
0903 return 0;
0904 }
0905
0906
0907
0908
0909
0910
0911
0912
0913
0914
0915
0916
0917
0918
0919 static void
0920 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
0921 {
0922 struct lpfc_iocbq *rspiocbq;
0923 struct hbq_dmabuf *dmabuf;
0924 struct lpfc_cq_event *cq_event;
0925
0926 spin_lock_irq(&phba->hbalock);
0927 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
0928 spin_unlock_irq(&phba->hbalock);
0929
0930 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
0931
0932 spin_lock_irq(&phba->hbalock);
0933 list_remove_head(&phba->sli4_hba.sp_queue_event,
0934 cq_event, struct lpfc_cq_event, list);
0935 spin_unlock_irq(&phba->hbalock);
0936
0937 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
0938 case CQE_CODE_COMPL_WQE:
0939 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
0940 cq_event);
0941 lpfc_sli_release_iocbq(phba, rspiocbq);
0942 break;
0943 case CQE_CODE_RECEIVE:
0944 case CQE_CODE_RECEIVE_V1:
0945 dmabuf = container_of(cq_event, struct hbq_dmabuf,
0946 cq_event);
0947 lpfc_in_buf_free(phba, &dmabuf->dbuf);
0948 }
0949 }
0950 }
0951
0952
0953
0954
0955
0956
0957
0958
0959
0960
0961
0962
0963 static void
0964 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
0965 {
0966 struct lpfc_sli *psli = &phba->sli;
0967 struct lpfc_sli_ring *pring;
0968 struct lpfc_dmabuf *mp, *next_mp;
0969 LIST_HEAD(buflist);
0970 int count;
0971
0972 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
0973 lpfc_sli_hbqbuf_free_all(phba);
0974 else {
0975
0976 pring = &psli->sli3_ring[LPFC_ELS_RING];
0977 spin_lock_irq(&phba->hbalock);
0978 list_splice_init(&pring->postbufq, &buflist);
0979 spin_unlock_irq(&phba->hbalock);
0980
0981 count = 0;
0982 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
0983 list_del(&mp->list);
0984 count++;
0985 lpfc_mbuf_free(phba, mp->virt, mp->phys);
0986 kfree(mp);
0987 }
0988
0989 spin_lock_irq(&phba->hbalock);
0990 pring->postbufq_cnt -= count;
0991 spin_unlock_irq(&phba->hbalock);
0992 }
0993 }
0994
0995
0996
0997
0998
0999
1000
1001
1002
1003
1004
1005 static void
1006 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
1007 {
1008 struct lpfc_sli *psli = &phba->sli;
1009 struct lpfc_queue *qp = NULL;
1010 struct lpfc_sli_ring *pring;
1011 LIST_HEAD(completions);
1012 int i;
1013 struct lpfc_iocbq *piocb, *next_iocb;
1014
1015 if (phba->sli_rev != LPFC_SLI_REV4) {
1016 for (i = 0; i < psli->num_rings; i++) {
1017 pring = &psli->sli3_ring[i];
1018 spin_lock_irq(&phba->hbalock);
1019
1020
1021
1022
1023 list_splice_init(&pring->txcmplq, &completions);
1024 pring->txcmplq_cnt = 0;
1025 spin_unlock_irq(&phba->hbalock);
1026
1027 lpfc_sli_abort_iocb_ring(phba, pring);
1028 }
1029
1030 lpfc_sli_cancel_iocbs(phba, &completions,
1031 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1032 return;
1033 }
1034 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1035 pring = qp->pring;
1036 if (!pring)
1037 continue;
1038 spin_lock_irq(&pring->ring_lock);
1039 list_for_each_entry_safe(piocb, next_iocb,
1040 &pring->txcmplq, list)
1041 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
1042 list_splice_init(&pring->txcmplq, &completions);
1043 pring->txcmplq_cnt = 0;
1044 spin_unlock_irq(&pring->ring_lock);
1045 lpfc_sli_abort_iocb_ring(phba, pring);
1046 }
1047
1048 lpfc_sli_cancel_iocbs(phba, &completions,
1049 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1050 }
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063 static int
1064 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1065 {
1066 lpfc_hba_free_post_buf(phba);
1067 lpfc_hba_clean_txcmplq(phba);
1068 return 0;
1069 }
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 static int
1083 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1084 {
1085 struct lpfc_io_buf *psb, *psb_next;
1086 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1087 struct lpfc_sli4_hdw_queue *qp;
1088 LIST_HEAD(aborts);
1089 LIST_HEAD(nvme_aborts);
1090 LIST_HEAD(nvmet_aborts);
1091 struct lpfc_sglq *sglq_entry = NULL;
1092 int cnt, idx;
1093
1094
1095 lpfc_sli_hbqbuf_free_all(phba);
1096 lpfc_hba_clean_txcmplq(phba);
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1109 list_for_each_entry(sglq_entry,
1110 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1111 sglq_entry->state = SGL_FREED;
1112
1113 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1114 &phba->sli4_hba.lpfc_els_sgl_list);
1115
1116
1117 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1118
1119
1120
1121
1122 spin_lock_irq(&phba->hbalock);
1123 cnt = 0;
1124 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1125 qp = &phba->sli4_hba.hdwq[idx];
1126
1127 spin_lock(&qp->abts_io_buf_list_lock);
1128 list_splice_init(&qp->lpfc_abts_io_buf_list,
1129 &aborts);
1130
1131 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1132 psb->pCmd = NULL;
1133 psb->status = IOSTAT_SUCCESS;
1134 cnt++;
1135 }
1136 spin_lock(&qp->io_buf_list_put_lock);
1137 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1138 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1139 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1140 qp->abts_scsi_io_bufs = 0;
1141 qp->abts_nvme_io_bufs = 0;
1142 spin_unlock(&qp->io_buf_list_put_lock);
1143 spin_unlock(&qp->abts_io_buf_list_lock);
1144 }
1145 spin_unlock_irq(&phba->hbalock);
1146
1147 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1148 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1149 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1150 &nvmet_aborts);
1151 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1152 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1153 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1154 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1155 }
1156 }
1157
1158 lpfc_sli4_free_sp_events(phba);
1159 return cnt;
1160 }
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173 int
1174 lpfc_hba_down_post(struct lpfc_hba *phba)
1175 {
1176 return (*phba->lpfc_hba_down_post)(phba);
1177 }
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191 static void
1192 lpfc_hb_timeout(struct timer_list *t)
1193 {
1194 struct lpfc_hba *phba;
1195 uint32_t tmo_posted;
1196 unsigned long iflag;
1197
1198 phba = from_timer(phba, t, hb_tmofunc);
1199
1200
1201 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1202 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1203 if (!tmo_posted)
1204 phba->pport->work_port_events |= WORKER_HB_TMO;
1205 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1206
1207
1208 if (!tmo_posted)
1209 lpfc_worker_wake_up(phba);
1210 return;
1211 }
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225 static void
1226 lpfc_rrq_timeout(struct timer_list *t)
1227 {
1228 struct lpfc_hba *phba;
1229 unsigned long iflag;
1230
1231 phba = from_timer(phba, t, rrq_tmr);
1232 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1233 if (!(phba->pport->load_flag & FC_UNLOADING))
1234 phba->hba_flag |= HBA_RRQ_ACTIVE;
1235 else
1236 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1237 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1238
1239 if (!(phba->pport->load_flag & FC_UNLOADING))
1240 lpfc_worker_wake_up(phba);
1241 }
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259 static void
1260 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1261 {
1262 unsigned long drvr_flag;
1263
1264 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1265 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1266 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1267
1268
1269 mempool_free(pmboxq, phba->mbox_mem_pool);
1270 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1271 !(phba->link_state == LPFC_HBA_ERROR) &&
1272 !(phba->pport->load_flag & FC_UNLOADING))
1273 mod_timer(&phba->hb_tmofunc,
1274 jiffies +
1275 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1276 return;
1277 }
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287 static void
1288 lpfc_idle_stat_delay_work(struct work_struct *work)
1289 {
1290 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1291 struct lpfc_hba,
1292 idle_stat_delay_work);
1293 struct lpfc_queue *cq;
1294 struct lpfc_sli4_hdw_queue *hdwq;
1295 struct lpfc_idle_stat *idle_stat;
1296 u32 i, idle_percent;
1297 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1298
1299 if (phba->pport->load_flag & FC_UNLOADING)
1300 return;
1301
1302 if (phba->link_state == LPFC_HBA_ERROR ||
1303 phba->pport->fc_flag & FC_OFFLINE_MODE ||
1304 phba->cmf_active_mode != LPFC_CFG_OFF)
1305 goto requeue;
1306
1307 for_each_present_cpu(i) {
1308 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1309 cq = hdwq->io_cq;
1310
1311
1312 if (cq->chann != i)
1313 continue;
1314
1315 idle_stat = &phba->sli4_hba.idle_stat[i];
1316
1317
1318
1319
1320
1321
1322
1323 wall_idle = get_cpu_idle_time(i, &wall, 1);
1324 diff_idle = wall_idle - idle_stat->prev_idle;
1325 diff_wall = wall - idle_stat->prev_wall;
1326
1327 if (diff_wall <= diff_idle)
1328 busy_time = 0;
1329 else
1330 busy_time = diff_wall - diff_idle;
1331
1332 idle_percent = div64_u64(100 * busy_time, diff_wall);
1333 idle_percent = 100 - idle_percent;
1334
1335 if (idle_percent < 15)
1336 cq->poll_mode = LPFC_QUEUE_WORK;
1337 else
1338 cq->poll_mode = LPFC_IRQ_POLL;
1339
1340 idle_stat->prev_idle = wall_idle;
1341 idle_stat->prev_wall = wall;
1342 }
1343
1344 requeue:
1345 schedule_delayed_work(&phba->idle_stat_delay_work,
1346 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1347 }
1348
1349 static void
1350 lpfc_hb_eq_delay_work(struct work_struct *work)
1351 {
1352 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1353 struct lpfc_hba, eq_delay_work);
1354 struct lpfc_eq_intr_info *eqi, *eqi_new;
1355 struct lpfc_queue *eq, *eq_next;
1356 unsigned char *ena_delay = NULL;
1357 uint32_t usdelay;
1358 int i;
1359
1360 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1361 return;
1362
1363 if (phba->link_state == LPFC_HBA_ERROR ||
1364 phba->pport->fc_flag & FC_OFFLINE_MODE)
1365 goto requeue;
1366
1367 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1368 GFP_KERNEL);
1369 if (!ena_delay)
1370 goto requeue;
1371
1372 for (i = 0; i < phba->cfg_irq_chann; i++) {
1373
1374 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1375 if (!eq)
1376 continue;
1377 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1378 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1379 ena_delay[eq->last_cpu] = 1;
1380 }
1381 }
1382
1383 for_each_present_cpu(i) {
1384 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1385 if (ena_delay[i]) {
1386 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1387 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1388 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1389 } else {
1390 usdelay = 0;
1391 }
1392
1393 eqi->icnt = 0;
1394
1395 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1396 if (unlikely(eq->last_cpu != i)) {
1397 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1398 eq->last_cpu);
1399 list_move_tail(&eq->cpu_list, &eqi_new->list);
1400 continue;
1401 }
1402 if (usdelay != eq->q_mode)
1403 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1404 usdelay);
1405 }
1406 }
1407
1408 kfree(ena_delay);
1409
1410 requeue:
1411 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1412 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1413 }
1414
1415
1416
1417
1418
1419
1420
1421
1422 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1423 {
1424 u32 i;
1425 u32 hwq_count;
1426
1427 hwq_count = phba->cfg_hdw_queue;
1428 for (i = 0; i < hwq_count; i++) {
1429
1430 lpfc_adjust_pvt_pool_count(phba, i);
1431
1432
1433 lpfc_adjust_high_watermark(phba, i);
1434
1435 #ifdef LPFC_MXP_STAT
1436
1437 lpfc_snapshot_mxp(phba, i);
1438 #endif
1439 }
1440 }
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450 int
1451 lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1452 {
1453 LPFC_MBOXQ_t *pmboxq;
1454 int retval;
1455
1456
1457 if (phba->hba_flag & HBA_HBEAT_INP)
1458 return 0;
1459
1460 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1461 if (!pmboxq)
1462 return -ENOMEM;
1463
1464 lpfc_heart_beat(phba, pmboxq);
1465 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1466 pmboxq->vport = phba->pport;
1467 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1468
1469 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1470 mempool_free(pmboxq, phba->mbox_mem_pool);
1471 return -ENXIO;
1472 }
1473 phba->hba_flag |= HBA_HBEAT_INP;
1474
1475 return 0;
1476 }
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488 void
1489 lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1490 {
1491 if (phba->cfg_enable_hba_heartbeat)
1492 return;
1493 phba->hba_flag |= HBA_HBEAT_TMO;
1494 }
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512 void
1513 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1514 {
1515 struct lpfc_vport **vports;
1516 struct lpfc_dmabuf *buf_ptr;
1517 int retval = 0;
1518 int i, tmo;
1519 struct lpfc_sli *psli = &phba->sli;
1520 LIST_HEAD(completions);
1521
1522 if (phba->cfg_xri_rebalancing) {
1523
1524 lpfc_hb_mxp_handler(phba);
1525 }
1526
1527 vports = lpfc_create_vport_work_array(phba);
1528 if (vports != NULL)
1529 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1530 lpfc_rcv_seq_check_edtov(vports[i]);
1531 lpfc_fdmi_change_check(vports[i]);
1532 }
1533 lpfc_destroy_vport_work_array(phba, vports);
1534
1535 if ((phba->link_state == LPFC_HBA_ERROR) ||
1536 (phba->pport->load_flag & FC_UNLOADING) ||
1537 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1538 return;
1539
1540 if (phba->elsbuf_cnt &&
1541 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1542 spin_lock_irq(&phba->hbalock);
1543 list_splice_init(&phba->elsbuf, &completions);
1544 phba->elsbuf_cnt = 0;
1545 phba->elsbuf_prev_cnt = 0;
1546 spin_unlock_irq(&phba->hbalock);
1547
1548 while (!list_empty(&completions)) {
1549 list_remove_head(&completions, buf_ptr,
1550 struct lpfc_dmabuf, list);
1551 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1552 kfree(buf_ptr);
1553 }
1554 }
1555 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1556
1557
1558 if (phba->cfg_enable_hba_heartbeat) {
1559
1560 spin_lock_irq(&phba->pport->work_port_lock);
1561 if (time_after(phba->last_completion_time +
1562 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1563 jiffies)) {
1564 spin_unlock_irq(&phba->pport->work_port_lock);
1565 if (phba->hba_flag & HBA_HBEAT_INP)
1566 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1567 else
1568 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1569 goto out;
1570 }
1571 spin_unlock_irq(&phba->pport->work_port_lock);
1572
1573
1574 if (phba->hba_flag & HBA_HBEAT_INP) {
1575
1576
1577
1578
1579
1580 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1581 "0459 Adapter heartbeat still outstanding: "
1582 "last compl time was %d ms.\n",
1583 jiffies_to_msecs(jiffies
1584 - phba->last_completion_time));
1585 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1586 } else {
1587 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1588 (list_empty(&psli->mboxq))) {
1589
1590 retval = lpfc_issue_hb_mbox(phba);
1591 if (retval) {
1592 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1593 goto out;
1594 }
1595 phba->skipped_hb = 0;
1596 } else if (time_before_eq(phba->last_completion_time,
1597 phba->skipped_hb)) {
1598 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1599 "2857 Last completion time not "
1600 " updated in %d ms\n",
1601 jiffies_to_msecs(jiffies
1602 - phba->last_completion_time));
1603 } else
1604 phba->skipped_hb = jiffies;
1605
1606 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1607 goto out;
1608 }
1609 } else {
1610
1611 if (phba->hba_flag & HBA_HBEAT_TMO) {
1612 retval = lpfc_issue_hb_mbox(phba);
1613 if (retval)
1614 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1615 else
1616 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1617 goto out;
1618 }
1619 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1620 }
1621 out:
1622 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1623 }
1624
1625
1626
1627
1628
1629
1630
1631
1632 static void
1633 lpfc_offline_eratt(struct lpfc_hba *phba)
1634 {
1635 struct lpfc_sli *psli = &phba->sli;
1636
1637 spin_lock_irq(&phba->hbalock);
1638 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1639 spin_unlock_irq(&phba->hbalock);
1640 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1641
1642 lpfc_offline(phba);
1643 lpfc_reset_barrier(phba);
1644 spin_lock_irq(&phba->hbalock);
1645 lpfc_sli_brdreset(phba);
1646 spin_unlock_irq(&phba->hbalock);
1647 lpfc_hba_down_post(phba);
1648 lpfc_sli_brdready(phba, HS_MBRDY);
1649 lpfc_unblock_mgmt_io(phba);
1650 phba->link_state = LPFC_HBA_ERROR;
1651 return;
1652 }
1653
1654
1655
1656
1657
1658
1659
1660
1661 void
1662 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1663 {
1664 spin_lock_irq(&phba->hbalock);
1665 if (phba->link_state == LPFC_HBA_ERROR &&
1666 test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
1667 spin_unlock_irq(&phba->hbalock);
1668 return;
1669 }
1670 phba->link_state = LPFC_HBA_ERROR;
1671 spin_unlock_irq(&phba->hbalock);
1672
1673 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1674 lpfc_sli_flush_io_rings(phba);
1675 lpfc_offline(phba);
1676 lpfc_hba_down_post(phba);
1677 lpfc_unblock_mgmt_io(phba);
1678 }
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689 static void
1690 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1691 {
1692 uint32_t old_host_status = phba->work_hs;
1693 struct lpfc_sli *psli = &phba->sli;
1694
1695
1696
1697
1698 if (pci_channel_offline(phba->pcidev)) {
1699 spin_lock_irq(&phba->hbalock);
1700 phba->hba_flag &= ~DEFER_ERATT;
1701 spin_unlock_irq(&phba->hbalock);
1702 return;
1703 }
1704
1705 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1706 "0479 Deferred Adapter Hardware Error "
1707 "Data: x%x x%x x%x\n",
1708 phba->work_hs, phba->work_status[0],
1709 phba->work_status[1]);
1710
1711 spin_lock_irq(&phba->hbalock);
1712 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1713 spin_unlock_irq(&phba->hbalock);
1714
1715
1716
1717
1718
1719
1720
1721 lpfc_sli_abort_fcp_rings(phba);
1722
1723
1724
1725
1726
1727 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1728 lpfc_offline(phba);
1729
1730
1731 while (phba->work_hs & HS_FFER1) {
1732 msleep(100);
1733 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1734 phba->work_hs = UNPLUG_ERR ;
1735 break;
1736 }
1737
1738 if (phba->pport->load_flag & FC_UNLOADING) {
1739 phba->work_hs = 0;
1740 break;
1741 }
1742 }
1743
1744
1745
1746
1747
1748
1749 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1750 phba->work_hs = old_host_status & ~HS_FFER1;
1751
1752 spin_lock_irq(&phba->hbalock);
1753 phba->hba_flag &= ~DEFER_ERATT;
1754 spin_unlock_irq(&phba->hbalock);
1755 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1756 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1757 }
1758
1759 static void
1760 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1761 {
1762 struct lpfc_board_event_header board_event;
1763 struct Scsi_Host *shost;
1764
1765 board_event.event_type = FC_REG_BOARD_EVENT;
1766 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1767 shost = lpfc_shost_from_vport(phba->pport);
1768 fc_host_post_vendor_event(shost, fc_get_event_number(),
1769 sizeof(board_event),
1770 (char *) &board_event,
1771 LPFC_NL_VENDOR_ID);
1772 }
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784 static void
1785 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1786 {
1787 struct lpfc_vport *vport = phba->pport;
1788 struct lpfc_sli *psli = &phba->sli;
1789 uint32_t event_data;
1790 unsigned long temperature;
1791 struct temp_event temp_event_data;
1792 struct Scsi_Host *shost;
1793
1794
1795
1796
1797 if (pci_channel_offline(phba->pcidev)) {
1798 spin_lock_irq(&phba->hbalock);
1799 phba->hba_flag &= ~DEFER_ERATT;
1800 spin_unlock_irq(&phba->hbalock);
1801 return;
1802 }
1803
1804
1805 if (!phba->cfg_enable_hba_reset)
1806 return;
1807
1808
1809 lpfc_board_errevt_to_mgmt(phba);
1810
1811 if (phba->hba_flag & DEFER_ERATT)
1812 lpfc_handle_deferred_eratt(phba);
1813
1814 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1815 if (phba->work_hs & HS_FFER6)
1816
1817 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1818 "1301 Re-establishing Link "
1819 "Data: x%x x%x x%x\n",
1820 phba->work_hs, phba->work_status[0],
1821 phba->work_status[1]);
1822 if (phba->work_hs & HS_FFER8)
1823
1824 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1825 "2861 Host Authentication device "
1826 "zeroization Data:x%x x%x x%x\n",
1827 phba->work_hs, phba->work_status[0],
1828 phba->work_status[1]);
1829
1830 spin_lock_irq(&phba->hbalock);
1831 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1832 spin_unlock_irq(&phba->hbalock);
1833
1834
1835
1836
1837
1838
1839
1840 lpfc_sli_abort_fcp_rings(phba);
1841
1842
1843
1844
1845
1846 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1847 lpfc_offline(phba);
1848 lpfc_sli_brdrestart(phba);
1849 if (lpfc_online(phba) == 0) {
1850 lpfc_unblock_mgmt_io(phba);
1851 return;
1852 }
1853 lpfc_unblock_mgmt_io(phba);
1854 } else if (phba->work_hs & HS_CRIT_TEMP) {
1855 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1856 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1857 temp_event_data.event_code = LPFC_CRIT_TEMP;
1858 temp_event_data.data = (uint32_t)temperature;
1859
1860 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1861 "0406 Adapter maximum temperature exceeded "
1862 "(%ld), taking this port offline "
1863 "Data: x%x x%x x%x\n",
1864 temperature, phba->work_hs,
1865 phba->work_status[0], phba->work_status[1]);
1866
1867 shost = lpfc_shost_from_vport(phba->pport);
1868 fc_host_post_vendor_event(shost, fc_get_event_number(),
1869 sizeof(temp_event_data),
1870 (char *) &temp_event_data,
1871 SCSI_NL_VID_TYPE_PCI
1872 | PCI_VENDOR_ID_EMULEX);
1873
1874 spin_lock_irq(&phba->hbalock);
1875 phba->over_temp_state = HBA_OVER_TEMP;
1876 spin_unlock_irq(&phba->hbalock);
1877 lpfc_offline_eratt(phba);
1878
1879 } else {
1880
1881
1882
1883
1884 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1885 "0457 Adapter Hardware Error "
1886 "Data: x%x x%x x%x\n",
1887 phba->work_hs,
1888 phba->work_status[0], phba->work_status[1]);
1889
1890 event_data = FC_REG_DUMP_EVENT;
1891 shost = lpfc_shost_from_vport(vport);
1892 fc_host_post_vendor_event(shost, fc_get_event_number(),
1893 sizeof(event_data), (char *) &event_data,
1894 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1895
1896 lpfc_offline_eratt(phba);
1897 }
1898 return;
1899 }
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912 static int
1913 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1914 bool en_rn_msg)
1915 {
1916 int rc;
1917 uint32_t intr_mode;
1918 LPFC_MBOXQ_t *mboxq;
1919
1920 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1921 LPFC_SLI_INTF_IF_TYPE_2) {
1922
1923
1924
1925
1926 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1927 if (rc)
1928 return rc;
1929 }
1930
1931
1932 if (en_rn_msg)
1933 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1934 "2887 Reset Needed: Attempting Port "
1935 "Recovery...\n");
1936
1937
1938
1939
1940
1941 if (mbx_action == LPFC_MBX_NO_WAIT) {
1942 spin_lock_irq(&phba->hbalock);
1943 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1944 if (phba->sli.mbox_active) {
1945 mboxq = phba->sli.mbox_active;
1946 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1947 __lpfc_mbox_cmpl_put(phba, mboxq);
1948 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1949 phba->sli.mbox_active = NULL;
1950 }
1951 spin_unlock_irq(&phba->hbalock);
1952 }
1953
1954 lpfc_offline_prep(phba, mbx_action);
1955 lpfc_sli_flush_io_rings(phba);
1956 lpfc_offline(phba);
1957
1958 lpfc_sli4_disable_intr(phba);
1959 rc = lpfc_sli_brdrestart(phba);
1960 if (rc) {
1961 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1962 "6309 Failed to restart board\n");
1963 return rc;
1964 }
1965
1966 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1967 if (intr_mode == LPFC_INTR_ERROR) {
1968 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1969 "3175 Failed to enable interrupt\n");
1970 return -EIO;
1971 }
1972 phba->intr_mode = intr_mode;
1973 rc = lpfc_online(phba);
1974 if (rc == 0)
1975 lpfc_unblock_mgmt_io(phba);
1976
1977 return rc;
1978 }
1979
1980
1981
1982
1983
1984
1985
1986
1987 static void
1988 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1989 {
1990 struct lpfc_vport *vport = phba->pport;
1991 uint32_t event_data;
1992 struct Scsi_Host *shost;
1993 uint32_t if_type;
1994 struct lpfc_register portstat_reg = {0};
1995 uint32_t reg_err1, reg_err2;
1996 uint32_t uerrlo_reg, uemasklo_reg;
1997 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1998 bool en_rn_msg = true;
1999 struct temp_event temp_event_data;
2000 struct lpfc_register portsmphr_reg;
2001 int rc, i;
2002
2003
2004
2005
2006 if (pci_channel_offline(phba->pcidev)) {
2007 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2008 "3166 pci channel is offline\n");
2009 lpfc_sli_flush_io_rings(phba);
2010 return;
2011 }
2012
2013 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
2014 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
2015 switch (if_type) {
2016 case LPFC_SLI_INTF_IF_TYPE_0:
2017 pci_rd_rc1 = lpfc_readl(
2018 phba->sli4_hba.u.if_type0.UERRLOregaddr,
2019 &uerrlo_reg);
2020 pci_rd_rc2 = lpfc_readl(
2021 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
2022 &uemasklo_reg);
2023
2024 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
2025 return;
2026 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
2027 lpfc_sli4_offline_eratt(phba);
2028 return;
2029 }
2030 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2031 "7623 Checking UE recoverable");
2032
2033 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
2034 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2035 &portsmphr_reg.word0))
2036 continue;
2037
2038 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
2039 &portsmphr_reg);
2040 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2041 LPFC_PORT_SEM_UE_RECOVERABLE)
2042 break;
2043
2044 msleep(1000);
2045 }
2046
2047 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2048 "4827 smphr_port_status x%x : Waited %dSec",
2049 smphr_port_status, i);
2050
2051
2052 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2053 LPFC_PORT_SEM_UE_RECOVERABLE) {
2054 for (i = 0; i < 20; i++) {
2055 msleep(1000);
2056 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2057 &portsmphr_reg.word0) &&
2058 (LPFC_POST_STAGE_PORT_READY ==
2059 bf_get(lpfc_port_smphr_port_status,
2060 &portsmphr_reg))) {
2061 rc = lpfc_sli4_port_sta_fn_reset(phba,
2062 LPFC_MBX_NO_WAIT, en_rn_msg);
2063 if (rc == 0)
2064 return;
2065 lpfc_printf_log(phba, KERN_ERR,
2066 LOG_TRACE_EVENT,
2067 "4215 Failed to recover UE");
2068 break;
2069 }
2070 }
2071 }
2072 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2073 "7624 Firmware not ready: Failing UE recovery,"
2074 " waited %dSec", i);
2075 phba->link_state = LPFC_HBA_ERROR;
2076 break;
2077
2078 case LPFC_SLI_INTF_IF_TYPE_2:
2079 case LPFC_SLI_INTF_IF_TYPE_6:
2080 pci_rd_rc1 = lpfc_readl(
2081 phba->sli4_hba.u.if_type2.STATUSregaddr,
2082 &portstat_reg.word0);
2083
2084 if (pci_rd_rc1 == -EIO) {
2085 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2086 "3151 PCI bus read access failure: x%x\n",
2087 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2088 lpfc_sli4_offline_eratt(phba);
2089 return;
2090 }
2091 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2092 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2093 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2094 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2095 "2889 Port Overtemperature event, "
2096 "taking port offline Data: x%x x%x\n",
2097 reg_err1, reg_err2);
2098
2099 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2100 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2101 temp_event_data.event_code = LPFC_CRIT_TEMP;
2102 temp_event_data.data = 0xFFFFFFFF;
2103
2104 shost = lpfc_shost_from_vport(phba->pport);
2105 fc_host_post_vendor_event(shost, fc_get_event_number(),
2106 sizeof(temp_event_data),
2107 (char *)&temp_event_data,
2108 SCSI_NL_VID_TYPE_PCI
2109 | PCI_VENDOR_ID_EMULEX);
2110
2111 spin_lock_irq(&phba->hbalock);
2112 phba->over_temp_state = HBA_OVER_TEMP;
2113 spin_unlock_irq(&phba->hbalock);
2114 lpfc_sli4_offline_eratt(phba);
2115 return;
2116 }
2117 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2118 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2119 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2120 "3143 Port Down: Firmware Update "
2121 "Detected\n");
2122 en_rn_msg = false;
2123 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2124 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2125 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2126 "3144 Port Down: Debug Dump\n");
2127 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2128 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2129 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2130 "3145 Port Down: Provisioning\n");
2131
2132
2133 if (!phba->cfg_enable_hba_reset)
2134 return;
2135
2136
2137 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2138 en_rn_msg);
2139 if (rc == 0) {
2140
2141 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2142 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2143 return;
2144 else
2145 break;
2146 }
2147
2148 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2149 "3152 Unrecoverable error\n");
2150 phba->link_state = LPFC_HBA_ERROR;
2151 break;
2152 case LPFC_SLI_INTF_IF_TYPE_1:
2153 default:
2154 break;
2155 }
2156 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2157 "3123 Report dump event to upper layer\n");
2158
2159 lpfc_board_errevt_to_mgmt(phba);
2160
2161 event_data = FC_REG_DUMP_EVENT;
2162 shost = lpfc_shost_from_vport(vport);
2163 fc_host_post_vendor_event(shost, fc_get_event_number(),
2164 sizeof(event_data), (char *) &event_data,
2165 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2166 }
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179 void
2180 lpfc_handle_eratt(struct lpfc_hba *phba)
2181 {
2182 (*phba->lpfc_handle_eratt)(phba);
2183 }
2184
2185
2186
2187
2188
2189
2190
2191
2192 void
2193 lpfc_handle_latt(struct lpfc_hba *phba)
2194 {
2195 struct lpfc_vport *vport = phba->pport;
2196 struct lpfc_sli *psli = &phba->sli;
2197 LPFC_MBOXQ_t *pmb;
2198 volatile uint32_t control;
2199 int rc = 0;
2200
2201 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2202 if (!pmb) {
2203 rc = 1;
2204 goto lpfc_handle_latt_err_exit;
2205 }
2206
2207 rc = lpfc_mbox_rsrc_prep(phba, pmb);
2208 if (rc) {
2209 rc = 2;
2210 mempool_free(pmb, phba->mbox_mem_pool);
2211 goto lpfc_handle_latt_err_exit;
2212 }
2213
2214
2215 lpfc_els_flush_all_cmd(phba);
2216 psli->slistat.link_event++;
2217 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
2218 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2219 pmb->vport = vport;
2220
2221 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2222 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2223 if (rc == MBX_NOT_FINISHED) {
2224 rc = 4;
2225 goto lpfc_handle_latt_free_mbuf;
2226 }
2227
2228
2229 spin_lock_irq(&phba->hbalock);
2230 writel(HA_LATT, phba->HAregaddr);
2231 readl(phba->HAregaddr);
2232 spin_unlock_irq(&phba->hbalock);
2233
2234 return;
2235
2236 lpfc_handle_latt_free_mbuf:
2237 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2238 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2239 lpfc_handle_latt_err_exit:
2240
2241 spin_lock_irq(&phba->hbalock);
2242 psli->sli_flag |= LPFC_PROCESS_LA;
2243 control = readl(phba->HCregaddr);
2244 control |= HC_LAINT_ENA;
2245 writel(control, phba->HCregaddr);
2246 readl(phba->HCregaddr);
2247
2248
2249 writel(HA_LATT, phba->HAregaddr);
2250 readl(phba->HAregaddr);
2251 spin_unlock_irq(&phba->hbalock);
2252 lpfc_linkdown(phba);
2253 phba->link_state = LPFC_HBA_ERROR;
2254
2255 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2256 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2257
2258 return;
2259 }
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275 int
2276 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2277 {
2278 uint8_t lenlo, lenhi;
2279 int Length;
2280 int i, j;
2281 int finished = 0;
2282 int index = 0;
2283
2284 if (!vpd)
2285 return 0;
2286
2287
2288 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2289 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2290 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2291 (uint32_t) vpd[3]);
2292 while (!finished && (index < (len - 4))) {
2293 switch (vpd[index]) {
2294 case 0x82:
2295 case 0x91:
2296 index += 1;
2297 lenlo = vpd[index];
2298 index += 1;
2299 lenhi = vpd[index];
2300 index += 1;
2301 i = ((((unsigned short)lenhi) << 8) + lenlo);
2302 index += i;
2303 break;
2304 case 0x90:
2305 index += 1;
2306 lenlo = vpd[index];
2307 index += 1;
2308 lenhi = vpd[index];
2309 index += 1;
2310 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2311 if (Length > len - index)
2312 Length = len - index;
2313 while (Length > 0) {
2314
2315 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2316 index += 2;
2317 i = vpd[index];
2318 index += 1;
2319 j = 0;
2320 Length -= (3+i);
2321 while(i--) {
2322 phba->SerialNumber[j++] = vpd[index++];
2323 if (j == 31)
2324 break;
2325 }
2326 phba->SerialNumber[j] = 0;
2327 continue;
2328 }
2329 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2330 phba->vpd_flag |= VPD_MODEL_DESC;
2331 index += 2;
2332 i = vpd[index];
2333 index += 1;
2334 j = 0;
2335 Length -= (3+i);
2336 while(i--) {
2337 phba->ModelDesc[j++] = vpd[index++];
2338 if (j == 255)
2339 break;
2340 }
2341 phba->ModelDesc[j] = 0;
2342 continue;
2343 }
2344 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2345 phba->vpd_flag |= VPD_MODEL_NAME;
2346 index += 2;
2347 i = vpd[index];
2348 index += 1;
2349 j = 0;
2350 Length -= (3+i);
2351 while(i--) {
2352 phba->ModelName[j++] = vpd[index++];
2353 if (j == 79)
2354 break;
2355 }
2356 phba->ModelName[j] = 0;
2357 continue;
2358 }
2359 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2360 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2361 index += 2;
2362 i = vpd[index];
2363 index += 1;
2364 j = 0;
2365 Length -= (3+i);
2366 while(i--) {
2367 phba->ProgramType[j++] = vpd[index++];
2368 if (j == 255)
2369 break;
2370 }
2371 phba->ProgramType[j] = 0;
2372 continue;
2373 }
2374 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2375 phba->vpd_flag |= VPD_PORT;
2376 index += 2;
2377 i = vpd[index];
2378 index += 1;
2379 j = 0;
2380 Length -= (3+i);
2381 while(i--) {
2382 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2383 (phba->sli4_hba.pport_name_sta ==
2384 LPFC_SLI4_PPNAME_GET)) {
2385 j++;
2386 index++;
2387 } else
2388 phba->Port[j++] = vpd[index++];
2389 if (j == 19)
2390 break;
2391 }
2392 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2393 (phba->sli4_hba.pport_name_sta ==
2394 LPFC_SLI4_PPNAME_NON))
2395 phba->Port[j] = 0;
2396 continue;
2397 }
2398 else {
2399 index += 2;
2400 i = vpd[index];
2401 index += 1;
2402 index += i;
2403 Length -= (3 + i);
2404 }
2405 }
2406 finished = 0;
2407 break;
2408 case 0x78:
2409 finished = 1;
2410 break;
2411 default:
2412 index ++;
2413 break;
2414 }
2415 }
2416
2417 return(1);
2418 }
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432 static void
2433 lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2434 {
2435 uint16_t sub_dev_id = phba->pcidev->subsystem_device;
2436 char *model = "<Unknown>";
2437 int tbolt = 0;
2438
2439 switch (sub_dev_id) {
2440 case PCI_DEVICE_ID_CLRY_161E:
2441 model = "161E";
2442 break;
2443 case PCI_DEVICE_ID_CLRY_162E:
2444 model = "162E";
2445 break;
2446 case PCI_DEVICE_ID_CLRY_164E:
2447 model = "164E";
2448 break;
2449 case PCI_DEVICE_ID_CLRY_161P:
2450 model = "161P";
2451 break;
2452 case PCI_DEVICE_ID_CLRY_162P:
2453 model = "162P";
2454 break;
2455 case PCI_DEVICE_ID_CLRY_164P:
2456 model = "164P";
2457 break;
2458 case PCI_DEVICE_ID_CLRY_321E:
2459 model = "321E";
2460 break;
2461 case PCI_DEVICE_ID_CLRY_322E:
2462 model = "322E";
2463 break;
2464 case PCI_DEVICE_ID_CLRY_324E:
2465 model = "324E";
2466 break;
2467 case PCI_DEVICE_ID_CLRY_321P:
2468 model = "321P";
2469 break;
2470 case PCI_DEVICE_ID_CLRY_322P:
2471 model = "322P";
2472 break;
2473 case PCI_DEVICE_ID_CLRY_324P:
2474 model = "324P";
2475 break;
2476 case PCI_DEVICE_ID_TLFC_2XX2:
2477 model = "2XX2";
2478 tbolt = 1;
2479 break;
2480 case PCI_DEVICE_ID_TLFC_3162:
2481 model = "3162";
2482 tbolt = 1;
2483 break;
2484 case PCI_DEVICE_ID_TLFC_3322:
2485 model = "3322";
2486 tbolt = 1;
2487 break;
2488 default:
2489 model = "Unknown";
2490 break;
2491 }
2492
2493 if (mdp && mdp[0] == '\0')
2494 snprintf(mdp, 79, "%s", model);
2495
2496 if (descp && descp[0] == '\0')
2497 snprintf(descp, 255,
2498 "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
2499 (tbolt) ? "ThunderLink FC " : "Celerity FC-",
2500 model,
2501 phba->Port);
2502 }
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516 static void
2517 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2518 {
2519 lpfc_vpd_t *vp;
2520 uint16_t dev_id = phba->pcidev->device;
2521 int max_speed;
2522 int GE = 0;
2523 int oneConnect = 0;
2524 struct {
2525 char *name;
2526 char *bus;
2527 char *function;
2528 } m = {"<Unknown>", "", ""};
2529
2530 if (mdp && mdp[0] != '\0'
2531 && descp && descp[0] != '\0')
2532 return;
2533
2534 if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
2535 lpfc_get_atto_model_desc(phba, mdp, descp);
2536 return;
2537 }
2538
2539 if (phba->lmt & LMT_64Gb)
2540 max_speed = 64;
2541 else if (phba->lmt & LMT_32Gb)
2542 max_speed = 32;
2543 else if (phba->lmt & LMT_16Gb)
2544 max_speed = 16;
2545 else if (phba->lmt & LMT_10Gb)
2546 max_speed = 10;
2547 else if (phba->lmt & LMT_8Gb)
2548 max_speed = 8;
2549 else if (phba->lmt & LMT_4Gb)
2550 max_speed = 4;
2551 else if (phba->lmt & LMT_2Gb)
2552 max_speed = 2;
2553 else if (phba->lmt & LMT_1Gb)
2554 max_speed = 1;
2555 else
2556 max_speed = 0;
2557
2558 vp = &phba->vpd;
2559
2560 switch (dev_id) {
2561 case PCI_DEVICE_ID_FIREFLY:
2562 m = (typeof(m)){"LP6000", "PCI",
2563 "Obsolete, Unsupported Fibre Channel Adapter"};
2564 break;
2565 case PCI_DEVICE_ID_SUPERFLY:
2566 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2567 m = (typeof(m)){"LP7000", "PCI", ""};
2568 else
2569 m = (typeof(m)){"LP7000E", "PCI", ""};
2570 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2571 break;
2572 case PCI_DEVICE_ID_DRAGONFLY:
2573 m = (typeof(m)){"LP8000", "PCI",
2574 "Obsolete, Unsupported Fibre Channel Adapter"};
2575 break;
2576 case PCI_DEVICE_ID_CENTAUR:
2577 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2578 m = (typeof(m)){"LP9002", "PCI", ""};
2579 else
2580 m = (typeof(m)){"LP9000", "PCI", ""};
2581 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2582 break;
2583 case PCI_DEVICE_ID_RFLY:
2584 m = (typeof(m)){"LP952", "PCI",
2585 "Obsolete, Unsupported Fibre Channel Adapter"};
2586 break;
2587 case PCI_DEVICE_ID_PEGASUS:
2588 m = (typeof(m)){"LP9802", "PCI-X",
2589 "Obsolete, Unsupported Fibre Channel Adapter"};
2590 break;
2591 case PCI_DEVICE_ID_THOR:
2592 m = (typeof(m)){"LP10000", "PCI-X",
2593 "Obsolete, Unsupported Fibre Channel Adapter"};
2594 break;
2595 case PCI_DEVICE_ID_VIPER:
2596 m = (typeof(m)){"LPX1000", "PCI-X",
2597 "Obsolete, Unsupported Fibre Channel Adapter"};
2598 break;
2599 case PCI_DEVICE_ID_PFLY:
2600 m = (typeof(m)){"LP982", "PCI-X",
2601 "Obsolete, Unsupported Fibre Channel Adapter"};
2602 break;
2603 case PCI_DEVICE_ID_TFLY:
2604 m = (typeof(m)){"LP1050", "PCI-X",
2605 "Obsolete, Unsupported Fibre Channel Adapter"};
2606 break;
2607 case PCI_DEVICE_ID_HELIOS:
2608 m = (typeof(m)){"LP11000", "PCI-X2",
2609 "Obsolete, Unsupported Fibre Channel Adapter"};
2610 break;
2611 case PCI_DEVICE_ID_HELIOS_SCSP:
2612 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2613 "Obsolete, Unsupported Fibre Channel Adapter"};
2614 break;
2615 case PCI_DEVICE_ID_HELIOS_DCSP:
2616 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2617 "Obsolete, Unsupported Fibre Channel Adapter"};
2618 break;
2619 case PCI_DEVICE_ID_NEPTUNE:
2620 m = (typeof(m)){"LPe1000", "PCIe",
2621 "Obsolete, Unsupported Fibre Channel Adapter"};
2622 break;
2623 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2624 m = (typeof(m)){"LPe1000-SP", "PCIe",
2625 "Obsolete, Unsupported Fibre Channel Adapter"};
2626 break;
2627 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2628 m = (typeof(m)){"LPe1002-SP", "PCIe",
2629 "Obsolete, Unsupported Fibre Channel Adapter"};
2630 break;
2631 case PCI_DEVICE_ID_BMID:
2632 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2633 break;
2634 case PCI_DEVICE_ID_BSMB:
2635 m = (typeof(m)){"LP111", "PCI-X2",
2636 "Obsolete, Unsupported Fibre Channel Adapter"};
2637 break;
2638 case PCI_DEVICE_ID_ZEPHYR:
2639 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2640 break;
2641 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2642 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2643 break;
2644 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2645 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2646 GE = 1;
2647 break;
2648 case PCI_DEVICE_ID_ZMID:
2649 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2650 break;
2651 case PCI_DEVICE_ID_ZSMB:
2652 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2653 break;
2654 case PCI_DEVICE_ID_LP101:
2655 m = (typeof(m)){"LP101", "PCI-X",
2656 "Obsolete, Unsupported Fibre Channel Adapter"};
2657 break;
2658 case PCI_DEVICE_ID_LP10000S:
2659 m = (typeof(m)){"LP10000-S", "PCI",
2660 "Obsolete, Unsupported Fibre Channel Adapter"};
2661 break;
2662 case PCI_DEVICE_ID_LP11000S:
2663 m = (typeof(m)){"LP11000-S", "PCI-X2",
2664 "Obsolete, Unsupported Fibre Channel Adapter"};
2665 break;
2666 case PCI_DEVICE_ID_LPE11000S:
2667 m = (typeof(m)){"LPe11000-S", "PCIe",
2668 "Obsolete, Unsupported Fibre Channel Adapter"};
2669 break;
2670 case PCI_DEVICE_ID_SAT:
2671 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2672 break;
2673 case PCI_DEVICE_ID_SAT_MID:
2674 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2675 break;
2676 case PCI_DEVICE_ID_SAT_SMB:
2677 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2678 break;
2679 case PCI_DEVICE_ID_SAT_DCSP:
2680 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2681 break;
2682 case PCI_DEVICE_ID_SAT_SCSP:
2683 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2684 break;
2685 case PCI_DEVICE_ID_SAT_S:
2686 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2687 break;
2688 case PCI_DEVICE_ID_PROTEUS_VF:
2689 m = (typeof(m)){"LPev12000", "PCIe IOV",
2690 "Obsolete, Unsupported Fibre Channel Adapter"};
2691 break;
2692 case PCI_DEVICE_ID_PROTEUS_PF:
2693 m = (typeof(m)){"LPev12000", "PCIe IOV",
2694 "Obsolete, Unsupported Fibre Channel Adapter"};
2695 break;
2696 case PCI_DEVICE_ID_PROTEUS_S:
2697 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2698 "Obsolete, Unsupported Fibre Channel Adapter"};
2699 break;
2700 case PCI_DEVICE_ID_TIGERSHARK:
2701 oneConnect = 1;
2702 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2703 break;
2704 case PCI_DEVICE_ID_TOMCAT:
2705 oneConnect = 1;
2706 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2707 break;
2708 case PCI_DEVICE_ID_FALCON:
2709 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2710 "EmulexSecure Fibre"};
2711 break;
2712 case PCI_DEVICE_ID_BALIUS:
2713 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2714 "Obsolete, Unsupported Fibre Channel Adapter"};
2715 break;
2716 case PCI_DEVICE_ID_LANCER_FC:
2717 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2718 break;
2719 case PCI_DEVICE_ID_LANCER_FC_VF:
2720 m = (typeof(m)){"LPe16000", "PCIe",
2721 "Obsolete, Unsupported Fibre Channel Adapter"};
2722 break;
2723 case PCI_DEVICE_ID_LANCER_FCOE:
2724 oneConnect = 1;
2725 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2726 break;
2727 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2728 oneConnect = 1;
2729 m = (typeof(m)){"OCe15100", "PCIe",
2730 "Obsolete, Unsupported FCoE"};
2731 break;
2732 case PCI_DEVICE_ID_LANCER_G6_FC:
2733 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2734 break;
2735 case PCI_DEVICE_ID_LANCER_G7_FC:
2736 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2737 break;
2738 case PCI_DEVICE_ID_LANCER_G7P_FC:
2739 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2740 break;
2741 case PCI_DEVICE_ID_SKYHAWK:
2742 case PCI_DEVICE_ID_SKYHAWK_VF:
2743 oneConnect = 1;
2744 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2745 break;
2746 default:
2747 m = (typeof(m)){"Unknown", "", ""};
2748 break;
2749 }
2750
2751 if (mdp && mdp[0] == '\0')
2752 snprintf(mdp, 79,"%s", m.name);
2753
2754
2755
2756
2757 if (descp && descp[0] == '\0') {
2758 if (oneConnect)
2759 snprintf(descp, 255,
2760 "Emulex OneConnect %s, %s Initiator %s",
2761 m.name, m.function,
2762 phba->Port);
2763 else if (max_speed == 0)
2764 snprintf(descp, 255,
2765 "Emulex %s %s %s",
2766 m.name, m.bus, m.function);
2767 else
2768 snprintf(descp, 255,
2769 "Emulex %s %d%s %s %s",
2770 m.name, max_speed, (GE) ? "GE" : "Gb",
2771 m.bus, m.function);
2772 }
2773 }
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787 int
2788 lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2789 {
2790 IOCB_t *icmd;
2791 struct lpfc_iocbq *iocb;
2792 struct lpfc_dmabuf *mp1, *mp2;
2793
2794 cnt += pring->missbufcnt;
2795
2796
2797 while (cnt > 0) {
2798
2799 iocb = lpfc_sli_get_iocbq(phba);
2800 if (iocb == NULL) {
2801 pring->missbufcnt = cnt;
2802 return cnt;
2803 }
2804 icmd = &iocb->iocb;
2805
2806
2807
2808 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2809 if (mp1)
2810 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2811 if (!mp1 || !mp1->virt) {
2812 kfree(mp1);
2813 lpfc_sli_release_iocbq(phba, iocb);
2814 pring->missbufcnt = cnt;
2815 return cnt;
2816 }
2817
2818 INIT_LIST_HEAD(&mp1->list);
2819
2820 if (cnt > 1) {
2821 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2822 if (mp2)
2823 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2824 &mp2->phys);
2825 if (!mp2 || !mp2->virt) {
2826 kfree(mp2);
2827 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2828 kfree(mp1);
2829 lpfc_sli_release_iocbq(phba, iocb);
2830 pring->missbufcnt = cnt;
2831 return cnt;
2832 }
2833
2834 INIT_LIST_HEAD(&mp2->list);
2835 } else {
2836 mp2 = NULL;
2837 }
2838
2839 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2840 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2841 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2842 icmd->ulpBdeCount = 1;
2843 cnt--;
2844 if (mp2) {
2845 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2846 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2847 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2848 cnt--;
2849 icmd->ulpBdeCount = 2;
2850 }
2851
2852 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2853 icmd->ulpLe = 1;
2854
2855 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2856 IOCB_ERROR) {
2857 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2858 kfree(mp1);
2859 cnt++;
2860 if (mp2) {
2861 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2862 kfree(mp2);
2863 cnt++;
2864 }
2865 lpfc_sli_release_iocbq(phba, iocb);
2866 pring->missbufcnt = cnt;
2867 return cnt;
2868 }
2869 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2870 if (mp2)
2871 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2872 }
2873 pring->missbufcnt = 0;
2874 return 0;
2875 }
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888 static int
2889 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2890 {
2891 struct lpfc_sli *psli = &phba->sli;
2892
2893
2894 lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2895
2896
2897 return 0;
2898 }
2899
2900 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2901
2902
2903
2904
2905
2906
2907
2908
2909 static void
2910 lpfc_sha_init(uint32_t * HashResultPointer)
2911 {
2912 HashResultPointer[0] = 0x67452301;
2913 HashResultPointer[1] = 0xEFCDAB89;
2914 HashResultPointer[2] = 0x98BADCFE;
2915 HashResultPointer[3] = 0x10325476;
2916 HashResultPointer[4] = 0xC3D2E1F0;
2917 }
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929 static void
2930 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2931 {
2932 int t;
2933 uint32_t TEMP;
2934 uint32_t A, B, C, D, E;
2935 t = 16;
2936 do {
2937 HashWorkingPointer[t] =
2938 S(1,
2939 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2940 8] ^
2941 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2942 } while (++t <= 79);
2943 t = 0;
2944 A = HashResultPointer[0];
2945 B = HashResultPointer[1];
2946 C = HashResultPointer[2];
2947 D = HashResultPointer[3];
2948 E = HashResultPointer[4];
2949
2950 do {
2951 if (t < 20) {
2952 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2953 } else if (t < 40) {
2954 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2955 } else if (t < 60) {
2956 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2957 } else {
2958 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2959 }
2960 TEMP += S(5, A) + E + HashWorkingPointer[t];
2961 E = D;
2962 D = C;
2963 C = S(30, B);
2964 B = A;
2965 A = TEMP;
2966 } while (++t <= 79);
2967
2968 HashResultPointer[0] += A;
2969 HashResultPointer[1] += B;
2970 HashResultPointer[2] += C;
2971 HashResultPointer[3] += D;
2972 HashResultPointer[4] += E;
2973
2974 }
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986 static void
2987 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2988 {
2989 *HashWorking = (*RandomChallenge ^ *HashWorking);
2990 }
2991
2992
2993
2994
2995
2996
2997
2998
2999 void
3000 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
3001 {
3002 int t;
3003 uint32_t *HashWorking;
3004 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
3005
3006 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
3007 if (!HashWorking)
3008 return;
3009
3010 HashWorking[0] = HashWorking[78] = *pwwnn++;
3011 HashWorking[1] = HashWorking[79] = *pwwnn;
3012
3013 for (t = 0; t < 7; t++)
3014 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
3015
3016 lpfc_sha_init(hbainit);
3017 lpfc_sha_iterate(hbainit, HashWorking);
3018 kfree(HashWorking);
3019 }
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030 void
3031 lpfc_cleanup(struct lpfc_vport *vport)
3032 {
3033 struct lpfc_hba *phba = vport->phba;
3034 struct lpfc_nodelist *ndlp, *next_ndlp;
3035 int i = 0;
3036
3037 if (phba->link_state > LPFC_LINK_DOWN)
3038 lpfc_port_link_failure(vport);
3039
3040
3041 if (lpfc_is_vmid_enabled(phba))
3042 lpfc_vmid_vport_cleanup(vport);
3043
3044 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3045 if (vport->port_type != LPFC_PHYSICAL_PORT &&
3046 ndlp->nlp_DID == Fabric_DID) {
3047
3048 lpfc_nlp_put(ndlp);
3049 continue;
3050 }
3051
3052 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
3053 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3054 lpfc_nlp_put(ndlp);
3055 continue;
3056 }
3057
3058
3059
3060
3061 if (ndlp->nlp_type & NLP_FABRIC &&
3062 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
3063 lpfc_disc_state_machine(vport, ndlp, NULL,
3064 NLP_EVT_DEVICE_RECOVERY);
3065
3066 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
3067 lpfc_disc_state_machine(vport, ndlp, NULL,
3068 NLP_EVT_DEVICE_RM);
3069 }
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083 if (vport->load_flag & FC_UNLOADING &&
3084 pci_channel_offline(phba->pcidev))
3085 lpfc_sli_flush_io_rings(vport->phba);
3086
3087
3088
3089
3090
3091 while (!list_empty(&vport->fc_nodes)) {
3092 if (i++ > 3000) {
3093 lpfc_printf_vlog(vport, KERN_ERR,
3094 LOG_TRACE_EVENT,
3095 "0233 Nodelist not empty\n");
3096 list_for_each_entry_safe(ndlp, next_ndlp,
3097 &vport->fc_nodes, nlp_listp) {
3098 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
3099 LOG_DISCOVERY,
3100 "0282 did:x%x ndlp:x%px "
3101 "refcnt:%d xflags x%x nflag x%x\n",
3102 ndlp->nlp_DID, (void *)ndlp,
3103 kref_read(&ndlp->kref),
3104 ndlp->fc4_xpt_flags,
3105 ndlp->nlp_flag);
3106 }
3107 break;
3108 }
3109
3110
3111 msleep(10);
3112 }
3113 lpfc_cleanup_vports_rrqs(vport, NULL);
3114 }
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124 void
3125 lpfc_stop_vport_timers(struct lpfc_vport *vport)
3126 {
3127 del_timer_sync(&vport->els_tmofunc);
3128 del_timer_sync(&vport->delayed_disc_tmo);
3129 lpfc_can_disctmo(vport);
3130 return;
3131 }
3132
3133
3134
3135
3136
3137
3138
3139
3140 void
3141 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3142 {
3143
3144 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3145
3146
3147 del_timer(&phba->fcf.redisc_wait);
3148 }
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159 void
3160 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3161 {
3162 spin_lock_irq(&phba->hbalock);
3163 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3164
3165 spin_unlock_irq(&phba->hbalock);
3166 return;
3167 }
3168 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3169
3170 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3171 spin_unlock_irq(&phba->hbalock);
3172 }
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182 void
3183 lpfc_cmf_stop(struct lpfc_hba *phba)
3184 {
3185 int cpu;
3186 struct lpfc_cgn_stat *cgs;
3187
3188
3189 if (!phba->sli4_hba.pc_sli4_params.cmf)
3190 return;
3191
3192 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3193 "6221 Stop CMF / Cancel Timer\n");
3194
3195
3196 hrtimer_cancel(&phba->cmf_timer);
3197
3198
3199 atomic_set(&phba->cmf_busy, 0);
3200 for_each_present_cpu(cpu) {
3201 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3202 atomic64_set(&cgs->total_bytes, 0);
3203 atomic64_set(&cgs->rcv_bytes, 0);
3204 atomic_set(&cgs->rx_io_cnt, 0);
3205 atomic64_set(&cgs->rx_latency, 0);
3206 }
3207 atomic_set(&phba->cmf_bw_wait, 0);
3208
3209
3210 queue_work(phba->wq, &phba->unblock_request_work);
3211 }
3212
3213 static inline uint64_t
3214 lpfc_get_max_line_rate(struct lpfc_hba *phba)
3215 {
3216 uint64_t rate = lpfc_sli_port_speed_get(phba);
3217
3218 return ((((unsigned long)rate) * 1024 * 1024) / 10);
3219 }
3220
3221 void
3222 lpfc_cmf_signal_init(struct lpfc_hba *phba)
3223 {
3224 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3225 "6223 Signal CMF init\n");
3226
3227
3228 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3229 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3230 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3231 phba->cmf_interval_rate, 1000);
3232 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3233
3234
3235 lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3236 }
3237
3238
3239
3240
3241
3242
3243
3244
3245 void
3246 lpfc_cmf_start(struct lpfc_hba *phba)
3247 {
3248 struct lpfc_cgn_stat *cgs;
3249 int cpu;
3250
3251
3252 if (!phba->sli4_hba.pc_sli4_params.cmf ||
3253 phba->cmf_active_mode == LPFC_CFG_OFF)
3254 return;
3255
3256
3257 lpfc_init_congestion_buf(phba);
3258
3259 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3260 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3261 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3262 atomic_set(&phba->cgn_sync_warn_cnt, 0);
3263
3264 atomic_set(&phba->cmf_busy, 0);
3265 for_each_present_cpu(cpu) {
3266 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3267 atomic64_set(&cgs->total_bytes, 0);
3268 atomic64_set(&cgs->rcv_bytes, 0);
3269 atomic_set(&cgs->rx_io_cnt, 0);
3270 atomic64_set(&cgs->rx_latency, 0);
3271 }
3272 phba->cmf_latency.tv_sec = 0;
3273 phba->cmf_latency.tv_nsec = 0;
3274
3275 lpfc_cmf_signal_init(phba);
3276
3277 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3278 "6222 Start CMF / Timer\n");
3279
3280 phba->cmf_timer_cnt = 0;
3281 hrtimer_start(&phba->cmf_timer,
3282 ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
3283 HRTIMER_MODE_REL);
3284
3285 ktime_get_real_ts64(&phba->cmf_latency);
3286
3287 atomic_set(&phba->cmf_bw_wait, 0);
3288 atomic_set(&phba->cmf_stop_io, 0);
3289 }
3290
3291
3292
3293
3294
3295
3296
3297
3298 void
3299 lpfc_stop_hba_timers(struct lpfc_hba *phba)
3300 {
3301 if (phba->pport)
3302 lpfc_stop_vport_timers(phba->pport);
3303 cancel_delayed_work_sync(&phba->eq_delay_work);
3304 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3305 del_timer_sync(&phba->sli.mbox_tmo);
3306 del_timer_sync(&phba->fabric_block_timer);
3307 del_timer_sync(&phba->eratt_poll);
3308 del_timer_sync(&phba->hb_tmofunc);
3309 if (phba->sli_rev == LPFC_SLI_REV4) {
3310 del_timer_sync(&phba->rrq_tmr);
3311 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3312 }
3313 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3314
3315 switch (phba->pci_dev_grp) {
3316 case LPFC_PCI_DEV_LP:
3317
3318 del_timer_sync(&phba->fcp_poll_timer);
3319 break;
3320 case LPFC_PCI_DEV_OC:
3321
3322 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3323 break;
3324 default:
3325 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3326 "0297 Invalid device group (x%x)\n",
3327 phba->pci_dev_grp);
3328 break;
3329 }
3330 return;
3331 }
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344 static void
3345 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3346 {
3347 unsigned long iflag;
3348 uint8_t actcmd = MBX_HEARTBEAT;
3349 unsigned long timeout;
3350
3351 spin_lock_irqsave(&phba->hbalock, iflag);
3352 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3353 spin_unlock_irqrestore(&phba->hbalock, iflag);
3354 if (mbx_action == LPFC_MBX_NO_WAIT)
3355 return;
3356 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3357 spin_lock_irqsave(&phba->hbalock, iflag);
3358 if (phba->sli.mbox_active) {
3359 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3360
3361
3362
3363 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3364 phba->sli.mbox_active) * 1000) + jiffies;
3365 }
3366 spin_unlock_irqrestore(&phba->hbalock, iflag);
3367
3368
3369 while (phba->sli.mbox_active) {
3370
3371 msleep(2);
3372 if (time_after(jiffies, timeout)) {
3373 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3374 "2813 Mgmt IO is Blocked %x "
3375 "- mbox cmd %x still active\n",
3376 phba->sli.sli_flag, actcmd);
3377 break;
3378 }
3379 }
3380 }
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390 void
3391 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3392 {
3393 struct lpfc_nodelist *ndlp, *next_ndlp;
3394 struct lpfc_vport **vports;
3395 int i, rpi;
3396
3397 if (phba->sli_rev != LPFC_SLI_REV4)
3398 return;
3399
3400 vports = lpfc_create_vport_work_array(phba);
3401 if (vports == NULL)
3402 return;
3403
3404 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3405 if (vports[i]->load_flag & FC_UNLOADING)
3406 continue;
3407
3408 list_for_each_entry_safe(ndlp, next_ndlp,
3409 &vports[i]->fc_nodes,
3410 nlp_listp) {
3411 rpi = lpfc_sli4_alloc_rpi(phba);
3412 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3413
3414 continue;
3415 }
3416 ndlp->nlp_rpi = rpi;
3417 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3418 LOG_NODE | LOG_DISCOVERY,
3419 "0009 Assign RPI x%x to ndlp x%px "
3420 "DID:x%06x flg:x%x\n",
3421 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3422 ndlp->nlp_flag);
3423 }
3424 }
3425 lpfc_destroy_vport_work_array(phba, vports);
3426 }
3427
3428
3429
3430
3431
3432
3433
3434
3435 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3436 {
3437 struct lpfc_sli4_hdw_queue *qp;
3438 struct lpfc_io_buf *lpfc_ncmd;
3439 struct lpfc_io_buf *lpfc_ncmd_next;
3440 struct lpfc_epd_pool *epd_pool;
3441 unsigned long iflag;
3442
3443 epd_pool = &phba->epd_pool;
3444 qp = &phba->sli4_hba.hdwq[0];
3445
3446 spin_lock_init(&epd_pool->lock);
3447 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3448 spin_lock(&epd_pool->lock);
3449 INIT_LIST_HEAD(&epd_pool->list);
3450 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3451 &qp->lpfc_io_buf_list_put, list) {
3452 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3453 lpfc_ncmd->expedite = true;
3454 qp->put_io_bufs--;
3455 epd_pool->count++;
3456 if (epd_pool->count >= XRI_BATCH)
3457 break;
3458 }
3459 spin_unlock(&epd_pool->lock);
3460 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3461 }
3462
3463
3464
3465
3466
3467
3468
3469
3470 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3471 {
3472 struct lpfc_sli4_hdw_queue *qp;
3473 struct lpfc_io_buf *lpfc_ncmd;
3474 struct lpfc_io_buf *lpfc_ncmd_next;
3475 struct lpfc_epd_pool *epd_pool;
3476 unsigned long iflag;
3477
3478 epd_pool = &phba->epd_pool;
3479 qp = &phba->sli4_hba.hdwq[0];
3480
3481 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3482 spin_lock(&epd_pool->lock);
3483 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3484 &epd_pool->list, list) {
3485 list_move_tail(&lpfc_ncmd->list,
3486 &qp->lpfc_io_buf_list_put);
3487 lpfc_ncmd->flags = false;
3488 qp->put_io_bufs++;
3489 epd_pool->count--;
3490 }
3491 spin_unlock(&epd_pool->lock);
3492 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3493 }
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3504 {
3505 u32 i, j;
3506 u32 hwq_count;
3507 u32 count_per_hwq;
3508 struct lpfc_io_buf *lpfc_ncmd;
3509 struct lpfc_io_buf *lpfc_ncmd_next;
3510 unsigned long iflag;
3511 struct lpfc_sli4_hdw_queue *qp;
3512 struct lpfc_multixri_pool *multixri_pool;
3513 struct lpfc_pbl_pool *pbl_pool;
3514 struct lpfc_pvt_pool *pvt_pool;
3515
3516 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3517 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3518 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3519 phba->sli4_hba.io_xri_cnt);
3520
3521 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3522 lpfc_create_expedite_pool(phba);
3523
3524 hwq_count = phba->cfg_hdw_queue;
3525 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3526
3527 for (i = 0; i < hwq_count; i++) {
3528 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3529
3530 if (!multixri_pool) {
3531 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3532 "1238 Failed to allocate memory for "
3533 "multixri_pool\n");
3534
3535 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3536 lpfc_destroy_expedite_pool(phba);
3537
3538 j = 0;
3539 while (j < i) {
3540 qp = &phba->sli4_hba.hdwq[j];
3541 kfree(qp->p_multixri_pool);
3542 j++;
3543 }
3544 phba->cfg_xri_rebalancing = 0;
3545 return;
3546 }
3547
3548 qp = &phba->sli4_hba.hdwq[i];
3549 qp->p_multixri_pool = multixri_pool;
3550
3551 multixri_pool->xri_limit = count_per_hwq;
3552 multixri_pool->rrb_next_hwqid = i;
3553
3554
3555 pbl_pool = &multixri_pool->pbl_pool;
3556 spin_lock_init(&pbl_pool->lock);
3557 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3558 spin_lock(&pbl_pool->lock);
3559 INIT_LIST_HEAD(&pbl_pool->list);
3560 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3561 &qp->lpfc_io_buf_list_put, list) {
3562 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3563 qp->put_io_bufs--;
3564 pbl_pool->count++;
3565 }
3566 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3567 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3568 pbl_pool->count, i);
3569 spin_unlock(&pbl_pool->lock);
3570 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3571
3572
3573 pvt_pool = &multixri_pool->pvt_pool;
3574 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3575 pvt_pool->low_watermark = XRI_BATCH;
3576 spin_lock_init(&pvt_pool->lock);
3577 spin_lock_irqsave(&pvt_pool->lock, iflag);
3578 INIT_LIST_HEAD(&pvt_pool->list);
3579 pvt_pool->count = 0;
3580 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3581 }
3582 }
3583
3584
3585
3586
3587
3588
3589
3590 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3591 {
3592 u32 i;
3593 u32 hwq_count;
3594 struct lpfc_io_buf *lpfc_ncmd;
3595 struct lpfc_io_buf *lpfc_ncmd_next;
3596 unsigned long iflag;
3597 struct lpfc_sli4_hdw_queue *qp;
3598 struct lpfc_multixri_pool *multixri_pool;
3599 struct lpfc_pbl_pool *pbl_pool;
3600 struct lpfc_pvt_pool *pvt_pool;
3601
3602 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3603 lpfc_destroy_expedite_pool(phba);
3604
3605 if (!(phba->pport->load_flag & FC_UNLOADING))
3606 lpfc_sli_flush_io_rings(phba);
3607
3608 hwq_count = phba->cfg_hdw_queue;
3609
3610 for (i = 0; i < hwq_count; i++) {
3611 qp = &phba->sli4_hba.hdwq[i];
3612 multixri_pool = qp->p_multixri_pool;
3613 if (!multixri_pool)
3614 continue;
3615
3616 qp->p_multixri_pool = NULL;
3617
3618 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3619
3620
3621 pbl_pool = &multixri_pool->pbl_pool;
3622 spin_lock(&pbl_pool->lock);
3623
3624 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3625 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3626 pbl_pool->count, i);
3627
3628 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3629 &pbl_pool->list, list) {
3630 list_move_tail(&lpfc_ncmd->list,
3631 &qp->lpfc_io_buf_list_put);
3632 qp->put_io_bufs++;
3633 pbl_pool->count--;
3634 }
3635
3636 INIT_LIST_HEAD(&pbl_pool->list);
3637 pbl_pool->count = 0;
3638
3639 spin_unlock(&pbl_pool->lock);
3640
3641
3642 pvt_pool = &multixri_pool->pvt_pool;
3643 spin_lock(&pvt_pool->lock);
3644
3645 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3646 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3647 pvt_pool->count, i);
3648
3649 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3650 &pvt_pool->list, list) {
3651 list_move_tail(&lpfc_ncmd->list,
3652 &qp->lpfc_io_buf_list_put);
3653 qp->put_io_bufs++;
3654 pvt_pool->count--;
3655 }
3656
3657 INIT_LIST_HEAD(&pvt_pool->list);
3658 pvt_pool->count = 0;
3659
3660 spin_unlock(&pvt_pool->lock);
3661 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3662
3663 kfree(multixri_pool);
3664 }
3665 }
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679 int
3680 lpfc_online(struct lpfc_hba *phba)
3681 {
3682 struct lpfc_vport *vport;
3683 struct lpfc_vport **vports;
3684 int i, error = 0;
3685 bool vpis_cleared = false;
3686
3687 if (!phba)
3688 return 0;
3689 vport = phba->pport;
3690
3691 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3692 return 0;
3693
3694 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3695 "0458 Bring Adapter online\n");
3696
3697 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3698
3699 if (phba->sli_rev == LPFC_SLI_REV4) {
3700 if (lpfc_sli4_hba_setup(phba)) {
3701 lpfc_unblock_mgmt_io(phba);
3702 return 1;
3703 }
3704 spin_lock_irq(&phba->hbalock);
3705 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3706 vpis_cleared = true;
3707 spin_unlock_irq(&phba->hbalock);
3708
3709
3710
3711
3712 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3713 !phba->nvmet_support) {
3714 error = lpfc_nvme_create_localport(phba->pport);
3715 if (error)
3716 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3717 "6132 NVME restore reg failed "
3718 "on nvmei error x%x\n", error);
3719 }
3720 } else {
3721 lpfc_sli_queue_init(phba);
3722 if (lpfc_sli_hba_setup(phba)) {
3723 lpfc_unblock_mgmt_io(phba);
3724 return 1;
3725 }
3726 }
3727
3728 vports = lpfc_create_vport_work_array(phba);
3729 if (vports != NULL) {
3730 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3731 struct Scsi_Host *shost;
3732 shost = lpfc_shost_from_vport(vports[i]);
3733 spin_lock_irq(shost->host_lock);
3734 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3735 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3736 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3737 if (phba->sli_rev == LPFC_SLI_REV4) {
3738 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3739 if ((vpis_cleared) &&
3740 (vports[i]->port_type !=
3741 LPFC_PHYSICAL_PORT))
3742 vports[i]->vpi = 0;
3743 }
3744 spin_unlock_irq(shost->host_lock);
3745 }
3746 }
3747 lpfc_destroy_vport_work_array(phba, vports);
3748
3749 if (phba->cfg_xri_rebalancing)
3750 lpfc_create_multixri_pools(phba);
3751
3752 lpfc_cpuhp_add(phba);
3753
3754 lpfc_unblock_mgmt_io(phba);
3755 return 0;
3756 }
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769 void
3770 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3771 {
3772 unsigned long iflag;
3773
3774 spin_lock_irqsave(&phba->hbalock, iflag);
3775 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3776 spin_unlock_irqrestore(&phba->hbalock, iflag);
3777 }
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788 void
3789 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3790 {
3791 struct lpfc_vport *vport = phba->pport;
3792 struct lpfc_nodelist *ndlp, *next_ndlp;
3793 struct lpfc_vport **vports;
3794 struct Scsi_Host *shost;
3795 int i;
3796 int offline;
3797 bool hba_pci_err;
3798
3799 if (vport->fc_flag & FC_OFFLINE_MODE)
3800 return;
3801
3802 lpfc_block_mgmt_io(phba, mbx_action);
3803
3804 lpfc_linkdown(phba);
3805
3806 offline = pci_channel_offline(phba->pcidev);
3807 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
3808
3809
3810 vports = lpfc_create_vport_work_array(phba);
3811 if (vports != NULL) {
3812 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3813 if (vports[i]->load_flag & FC_UNLOADING)
3814 continue;
3815 shost = lpfc_shost_from_vport(vports[i]);
3816 spin_lock_irq(shost->host_lock);
3817 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3818 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3819 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3820 spin_unlock_irq(shost->host_lock);
3821
3822 shost = lpfc_shost_from_vport(vports[i]);
3823 list_for_each_entry_safe(ndlp, next_ndlp,
3824 &vports[i]->fc_nodes,
3825 nlp_listp) {
3826
3827 spin_lock_irq(&ndlp->lock);
3828 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3829 spin_unlock_irq(&ndlp->lock);
3830
3831 if (offline || hba_pci_err) {
3832 spin_lock_irq(&ndlp->lock);
3833 ndlp->nlp_flag &= ~(NLP_UNREG_INP |
3834 NLP_RPI_REGISTERED);
3835 spin_unlock_irq(&ndlp->lock);
3836 if (phba->sli_rev == LPFC_SLI_REV4)
3837 lpfc_sli_rpi_release(vports[i],
3838 ndlp);
3839 } else {
3840 lpfc_unreg_rpi(vports[i], ndlp);
3841 }
3842
3843
3844
3845
3846
3847 if (phba->sli_rev == LPFC_SLI_REV4) {
3848 lpfc_printf_vlog(vports[i], KERN_INFO,
3849 LOG_NODE | LOG_DISCOVERY,
3850 "0011 Free RPI x%x on "
3851 "ndlp: x%px did x%x\n",
3852 ndlp->nlp_rpi, ndlp,
3853 ndlp->nlp_DID);
3854 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3855 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3856 }
3857
3858 if (ndlp->nlp_type & NLP_FABRIC) {
3859 lpfc_disc_state_machine(vports[i], ndlp,
3860 NULL, NLP_EVT_DEVICE_RECOVERY);
3861
3862
3863
3864
3865
3866
3867
3868
3869 if (!(ndlp->save_flags &
3870 NLP_IN_RECOV_POST_DEV_LOSS) &&
3871 !(ndlp->fc4_xpt_flags &
3872 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3873 lpfc_disc_state_machine
3874 (vports[i], ndlp,
3875 NULL,
3876 NLP_EVT_DEVICE_RM);
3877 }
3878 }
3879 }
3880 }
3881 lpfc_destroy_vport_work_array(phba, vports);
3882
3883 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3884
3885 if (phba->wq)
3886 flush_workqueue(phba->wq);
3887 }
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897 void
3898 lpfc_offline(struct lpfc_hba *phba)
3899 {
3900 struct Scsi_Host *shost;
3901 struct lpfc_vport **vports;
3902 int i;
3903
3904 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3905 return;
3906
3907
3908 lpfc_stop_port(phba);
3909
3910
3911
3912
3913 lpfc_nvmet_destroy_targetport(phba);
3914 lpfc_nvme_destroy_localport(phba->pport);
3915
3916 vports = lpfc_create_vport_work_array(phba);
3917 if (vports != NULL)
3918 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3919 lpfc_stop_vport_timers(vports[i]);
3920 lpfc_destroy_vport_work_array(phba, vports);
3921 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3922 "0460 Bring Adapter offline\n");
3923
3924
3925 lpfc_sli_hba_down(phba);
3926 spin_lock_irq(&phba->hbalock);
3927 phba->work_ha = 0;
3928 spin_unlock_irq(&phba->hbalock);
3929 vports = lpfc_create_vport_work_array(phba);
3930 if (vports != NULL)
3931 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3932 shost = lpfc_shost_from_vport(vports[i]);
3933 spin_lock_irq(shost->host_lock);
3934 vports[i]->work_port_events = 0;
3935 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3936 spin_unlock_irq(shost->host_lock);
3937 }
3938 lpfc_destroy_vport_work_array(phba, vports);
3939
3940
3941
3942 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3943 __lpfc_cpuhp_remove(phba);
3944
3945 if (phba->cfg_xri_rebalancing)
3946 lpfc_destroy_multixri_pools(phba);
3947 }
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957 static void
3958 lpfc_scsi_free(struct lpfc_hba *phba)
3959 {
3960 struct lpfc_io_buf *sb, *sb_next;
3961
3962 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3963 return;
3964
3965 spin_lock_irq(&phba->hbalock);
3966
3967
3968
3969 spin_lock(&phba->scsi_buf_list_put_lock);
3970 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3971 list) {
3972 list_del(&sb->list);
3973 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3974 sb->dma_handle);
3975 kfree(sb);
3976 phba->total_scsi_bufs--;
3977 }
3978 spin_unlock(&phba->scsi_buf_list_put_lock);
3979
3980 spin_lock(&phba->scsi_buf_list_get_lock);
3981 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3982 list) {
3983 list_del(&sb->list);
3984 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3985 sb->dma_handle);
3986 kfree(sb);
3987 phba->total_scsi_bufs--;
3988 }
3989 spin_unlock(&phba->scsi_buf_list_get_lock);
3990 spin_unlock_irq(&phba->hbalock);
3991 }
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001 void
4002 lpfc_io_free(struct lpfc_hba *phba)
4003 {
4004 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
4005 struct lpfc_sli4_hdw_queue *qp;
4006 int idx;
4007
4008 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4009 qp = &phba->sli4_hba.hdwq[idx];
4010
4011 spin_lock(&qp->io_buf_list_put_lock);
4012 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4013 &qp->lpfc_io_buf_list_put,
4014 list) {
4015 list_del(&lpfc_ncmd->list);
4016 qp->put_io_bufs--;
4017 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4018 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4019 if (phba->cfg_xpsgl && !phba->nvmet_support)
4020 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4021 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4022 kfree(lpfc_ncmd);
4023 qp->total_io_bufs--;
4024 }
4025 spin_unlock(&qp->io_buf_list_put_lock);
4026
4027 spin_lock(&qp->io_buf_list_get_lock);
4028 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4029 &qp->lpfc_io_buf_list_get,
4030 list) {
4031 list_del(&lpfc_ncmd->list);
4032 qp->get_io_bufs--;
4033 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4034 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4035 if (phba->cfg_xpsgl && !phba->nvmet_support)
4036 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4037 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4038 kfree(lpfc_ncmd);
4039 qp->total_io_bufs--;
4040 }
4041 spin_unlock(&qp->io_buf_list_get_lock);
4042 }
4043 }
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057 int
4058 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
4059 {
4060 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4061 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4062 LIST_HEAD(els_sgl_list);
4063 int rc;
4064
4065
4066
4067
4068 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4069
4070 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
4071
4072 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
4073 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4074 "3157 ELS xri-sgl count increased from "
4075 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4076 els_xri_cnt);
4077
4078 for (i = 0; i < xri_cnt; i++) {
4079 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4080 GFP_KERNEL);
4081 if (sglq_entry == NULL) {
4082 lpfc_printf_log(phba, KERN_ERR,
4083 LOG_TRACE_EVENT,
4084 "2562 Failure to allocate an "
4085 "ELS sgl entry:%d\n", i);
4086 rc = -ENOMEM;
4087 goto out_free_mem;
4088 }
4089 sglq_entry->buff_type = GEN_BUFF_TYPE;
4090 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
4091 &sglq_entry->phys);
4092 if (sglq_entry->virt == NULL) {
4093 kfree(sglq_entry);
4094 lpfc_printf_log(phba, KERN_ERR,
4095 LOG_TRACE_EVENT,
4096 "2563 Failure to allocate an "
4097 "ELS mbuf:%d\n", i);
4098 rc = -ENOMEM;
4099 goto out_free_mem;
4100 }
4101 sglq_entry->sgl = sglq_entry->virt;
4102 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4103 sglq_entry->state = SGL_FREED;
4104 list_add_tail(&sglq_entry->list, &els_sgl_list);
4105 }
4106 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4107 list_splice_init(&els_sgl_list,
4108 &phba->sli4_hba.lpfc_els_sgl_list);
4109 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4110 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
4111
4112 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
4113 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4114 "3158 ELS xri-sgl count decreased from "
4115 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4116 els_xri_cnt);
4117 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4118 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
4119 &els_sgl_list);
4120
4121 for (i = 0; i < xri_cnt; i++) {
4122 list_remove_head(&els_sgl_list,
4123 sglq_entry, struct lpfc_sglq, list);
4124 if (sglq_entry) {
4125 __lpfc_mbuf_free(phba, sglq_entry->virt,
4126 sglq_entry->phys);
4127 kfree(sglq_entry);
4128 }
4129 }
4130 list_splice_init(&els_sgl_list,
4131 &phba->sli4_hba.lpfc_els_sgl_list);
4132 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4133 } else
4134 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4135 "3163 ELS xri-sgl count unchanged: %d\n",
4136 els_xri_cnt);
4137 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
4138
4139
4140 sglq_entry = NULL;
4141 sglq_entry_next = NULL;
4142 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4143 &phba->sli4_hba.lpfc_els_sgl_list, list) {
4144 lxri = lpfc_sli4_next_xritag(phba);
4145 if (lxri == NO_XRI) {
4146 lpfc_printf_log(phba, KERN_ERR,
4147 LOG_TRACE_EVENT,
4148 "2400 Failed to allocate xri for "
4149 "ELS sgl\n");
4150 rc = -ENOMEM;
4151 goto out_free_mem;
4152 }
4153 sglq_entry->sli4_lxritag = lxri;
4154 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4155 }
4156 return 0;
4157
4158 out_free_mem:
4159 lpfc_free_els_sgl_list(phba);
4160 return rc;
4161 }
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175 int
4176 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4177 {
4178 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4179 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4180 uint16_t nvmet_xri_cnt;
4181 LIST_HEAD(nvmet_sgl_list);
4182 int rc;
4183
4184
4185
4186
4187 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4188
4189
4190 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4191 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4192
4193 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4194 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4195 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4196 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4197
4198 for (i = 0; i < xri_cnt; i++) {
4199 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4200 GFP_KERNEL);
4201 if (sglq_entry == NULL) {
4202 lpfc_printf_log(phba, KERN_ERR,
4203 LOG_TRACE_EVENT,
4204 "6303 Failure to allocate an "
4205 "NVMET sgl entry:%d\n", i);
4206 rc = -ENOMEM;
4207 goto out_free_mem;
4208 }
4209 sglq_entry->buff_type = NVMET_BUFF_TYPE;
4210 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4211 &sglq_entry->phys);
4212 if (sglq_entry->virt == NULL) {
4213 kfree(sglq_entry);
4214 lpfc_printf_log(phba, KERN_ERR,
4215 LOG_TRACE_EVENT,
4216 "6304 Failure to allocate an "
4217 "NVMET buf:%d\n", i);
4218 rc = -ENOMEM;
4219 goto out_free_mem;
4220 }
4221 sglq_entry->sgl = sglq_entry->virt;
4222 memset(sglq_entry->sgl, 0,
4223 phba->cfg_sg_dma_buf_size);
4224 sglq_entry->state = SGL_FREED;
4225 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4226 }
4227 spin_lock_irq(&phba->hbalock);
4228 spin_lock(&phba->sli4_hba.sgl_list_lock);
4229 list_splice_init(&nvmet_sgl_list,
4230 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4231 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4232 spin_unlock_irq(&phba->hbalock);
4233 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4234
4235 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4236 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4237 "6305 NVMET xri-sgl count decreased from "
4238 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4239 nvmet_xri_cnt);
4240 spin_lock_irq(&phba->hbalock);
4241 spin_lock(&phba->sli4_hba.sgl_list_lock);
4242 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4243 &nvmet_sgl_list);
4244
4245 for (i = 0; i < xri_cnt; i++) {
4246 list_remove_head(&nvmet_sgl_list,
4247 sglq_entry, struct lpfc_sglq, list);
4248 if (sglq_entry) {
4249 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4250 sglq_entry->phys);
4251 kfree(sglq_entry);
4252 }
4253 }
4254 list_splice_init(&nvmet_sgl_list,
4255 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4256 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4257 spin_unlock_irq(&phba->hbalock);
4258 } else
4259 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4260 "6306 NVMET xri-sgl count unchanged: %d\n",
4261 nvmet_xri_cnt);
4262 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4263
4264
4265 sglq_entry = NULL;
4266 sglq_entry_next = NULL;
4267 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4268 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4269 lxri = lpfc_sli4_next_xritag(phba);
4270 if (lxri == NO_XRI) {
4271 lpfc_printf_log(phba, KERN_ERR,
4272 LOG_TRACE_EVENT,
4273 "6307 Failed to allocate xri for "
4274 "NVMET sgl\n");
4275 rc = -ENOMEM;
4276 goto out_free_mem;
4277 }
4278 sglq_entry->sli4_lxritag = lxri;
4279 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4280 }
4281 return 0;
4282
4283 out_free_mem:
4284 lpfc_free_nvmet_sgl_list(phba);
4285 return rc;
4286 }
4287
4288 int
4289 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4290 {
4291 LIST_HEAD(blist);
4292 struct lpfc_sli4_hdw_queue *qp;
4293 struct lpfc_io_buf *lpfc_cmd;
4294 struct lpfc_io_buf *iobufp, *prev_iobufp;
4295 int idx, cnt, xri, inserted;
4296
4297 cnt = 0;
4298 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4299 qp = &phba->sli4_hba.hdwq[idx];
4300 spin_lock_irq(&qp->io_buf_list_get_lock);
4301 spin_lock(&qp->io_buf_list_put_lock);
4302
4303
4304 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4305 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4306 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4307 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4308 cnt += qp->get_io_bufs + qp->put_io_bufs;
4309 qp->get_io_bufs = 0;
4310 qp->put_io_bufs = 0;
4311 qp->total_io_bufs = 0;
4312 spin_unlock(&qp->io_buf_list_put_lock);
4313 spin_unlock_irq(&qp->io_buf_list_get_lock);
4314 }
4315
4316
4317
4318
4319
4320
4321 for (idx = 0; idx < cnt; idx++) {
4322 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4323 if (!lpfc_cmd)
4324 return cnt;
4325 if (idx == 0) {
4326 list_add_tail(&lpfc_cmd->list, cbuf);
4327 continue;
4328 }
4329 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4330 inserted = 0;
4331 prev_iobufp = NULL;
4332 list_for_each_entry(iobufp, cbuf, list) {
4333 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4334 if (prev_iobufp)
4335 list_add(&lpfc_cmd->list,
4336 &prev_iobufp->list);
4337 else
4338 list_add(&lpfc_cmd->list, cbuf);
4339 inserted = 1;
4340 break;
4341 }
4342 prev_iobufp = iobufp;
4343 }
4344 if (!inserted)
4345 list_add_tail(&lpfc_cmd->list, cbuf);
4346 }
4347 return cnt;
4348 }
4349
4350 int
4351 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4352 {
4353 struct lpfc_sli4_hdw_queue *qp;
4354 struct lpfc_io_buf *lpfc_cmd;
4355 int idx, cnt;
4356
4357 qp = phba->sli4_hba.hdwq;
4358 cnt = 0;
4359 while (!list_empty(cbuf)) {
4360 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4361 list_remove_head(cbuf, lpfc_cmd,
4362 struct lpfc_io_buf, list);
4363 if (!lpfc_cmd)
4364 return cnt;
4365 cnt++;
4366 qp = &phba->sli4_hba.hdwq[idx];
4367 lpfc_cmd->hdwq_no = idx;
4368 lpfc_cmd->hdwq = qp;
4369 lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
4370 spin_lock(&qp->io_buf_list_put_lock);
4371 list_add_tail(&lpfc_cmd->list,
4372 &qp->lpfc_io_buf_list_put);
4373 qp->put_io_bufs++;
4374 qp->total_io_bufs++;
4375 spin_unlock(&qp->io_buf_list_put_lock);
4376 }
4377 }
4378 return cnt;
4379 }
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393 int
4394 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4395 {
4396 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4397 uint16_t i, lxri, els_xri_cnt;
4398 uint16_t io_xri_cnt, io_xri_max;
4399 LIST_HEAD(io_sgl_list);
4400 int rc, cnt;
4401
4402
4403
4404
4405
4406
4407 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4408 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4409 phba->sli4_hba.io_xri_max = io_xri_max;
4410
4411 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4412 "6074 Current allocated XRI sgl count:%d, "
4413 "maximum XRI count:%d els_xri_cnt:%d\n\n",
4414 phba->sli4_hba.io_xri_cnt,
4415 phba->sli4_hba.io_xri_max,
4416 els_xri_cnt);
4417
4418 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4419
4420 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4421
4422 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4423 phba->sli4_hba.io_xri_max;
4424
4425 for (i = 0; i < io_xri_cnt; i++) {
4426 list_remove_head(&io_sgl_list, lpfc_ncmd,
4427 struct lpfc_io_buf, list);
4428 if (lpfc_ncmd) {
4429 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4430 lpfc_ncmd->data,
4431 lpfc_ncmd->dma_handle);
4432 kfree(lpfc_ncmd);
4433 }
4434 }
4435 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4436 }
4437
4438
4439 lpfc_ncmd = NULL;
4440 lpfc_ncmd_next = NULL;
4441 phba->sli4_hba.io_xri_cnt = cnt;
4442 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4443 &io_sgl_list, list) {
4444 lxri = lpfc_sli4_next_xritag(phba);
4445 if (lxri == NO_XRI) {
4446 lpfc_printf_log(phba, KERN_ERR,
4447 LOG_TRACE_EVENT,
4448 "6075 Failed to allocate xri for "
4449 "nvme buffer\n");
4450 rc = -ENOMEM;
4451 goto out_free_mem;
4452 }
4453 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4454 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4455 }
4456 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4457 return 0;
4458
4459 out_free_mem:
4460 lpfc_io_free(phba);
4461 return rc;
4462 }
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478 int
4479 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4480 {
4481 struct lpfc_io_buf *lpfc_ncmd;
4482 struct lpfc_iocbq *pwqeq;
4483 uint16_t iotag, lxri = 0;
4484 int bcnt, num_posted;
4485 LIST_HEAD(prep_nblist);
4486 LIST_HEAD(post_nblist);
4487 LIST_HEAD(nvme_nblist);
4488
4489 phba->sli4_hba.io_xri_cnt = 0;
4490 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4491 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4492 if (!lpfc_ncmd)
4493 break;
4494
4495
4496
4497
4498
4499 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4500 GFP_KERNEL,
4501 &lpfc_ncmd->dma_handle);
4502 if (!lpfc_ncmd->data) {
4503 kfree(lpfc_ncmd);
4504 break;
4505 }
4506
4507 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4508 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4509 } else {
4510
4511
4512
4513
4514 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4515 (((unsigned long)(lpfc_ncmd->data) &
4516 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4517 lpfc_printf_log(phba, KERN_ERR,
4518 LOG_TRACE_EVENT,
4519 "3369 Memory alignment err: "
4520 "addr=%lx\n",
4521 (unsigned long)lpfc_ncmd->data);
4522 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4523 lpfc_ncmd->data,
4524 lpfc_ncmd->dma_handle);
4525 kfree(lpfc_ncmd);
4526 break;
4527 }
4528 }
4529
4530 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4531
4532 lxri = lpfc_sli4_next_xritag(phba);
4533 if (lxri == NO_XRI) {
4534 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4535 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4536 kfree(lpfc_ncmd);
4537 break;
4538 }
4539 pwqeq = &lpfc_ncmd->cur_iocbq;
4540
4541
4542 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4543 if (iotag == 0) {
4544 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4545 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4546 kfree(lpfc_ncmd);
4547 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4548 "6121 Failed to allocate IOTAG for"
4549 " XRI:0x%x\n", lxri);
4550 lpfc_sli4_free_xri(phba, lxri);
4551 break;
4552 }
4553 pwqeq->sli4_lxritag = lxri;
4554 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4555
4556
4557 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4558 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4559 lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
4560 spin_lock_init(&lpfc_ncmd->buf_lock);
4561
4562
4563 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4564 phba->sli4_hba.io_xri_cnt++;
4565 }
4566 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4567 "6114 Allocate %d out of %d requested new NVME "
4568 "buffers of size x%zu bytes\n", bcnt, num_to_alloc,
4569 sizeof(*lpfc_ncmd));
4570
4571
4572
4573 if (!list_empty(&post_nblist))
4574 num_posted = lpfc_sli4_post_io_sgl_list(
4575 phba, &post_nblist, bcnt);
4576 else
4577 num_posted = 0;
4578
4579 return num_posted;
4580 }
4581
4582 static uint64_t
4583 lpfc_get_wwpn(struct lpfc_hba *phba)
4584 {
4585 uint64_t wwn;
4586 int rc;
4587 LPFC_MBOXQ_t *mboxq;
4588 MAILBOX_t *mb;
4589
4590 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4591 GFP_KERNEL);
4592 if (!mboxq)
4593 return (uint64_t)-1;
4594
4595
4596 lpfc_read_nv(phba, mboxq);
4597 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4598 if (rc != MBX_SUCCESS) {
4599 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4600 "6019 Mailbox failed , mbxCmd x%x "
4601 "READ_NV, mbxStatus x%x\n",
4602 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4603 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4604 mempool_free(mboxq, phba->mbox_mem_pool);
4605 return (uint64_t) -1;
4606 }
4607 mb = &mboxq->u.mb;
4608 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4609
4610 mempool_free(mboxq, phba->mbox_mem_pool);
4611 if (phba->sli_rev == LPFC_SLI_REV4)
4612 return be64_to_cpu(wwn);
4613 else
4614 return rol64(wwn, 32);
4615 }
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628 static int
4629 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4630 {
4631
4632 if (phba->sli_rev == LPFC_SLI_REV3) {
4633 phba->cfg_vmid_app_header = 0;
4634 phba->cfg_vmid_priority_tagging = 0;
4635 }
4636
4637 if (lpfc_is_vmid_enabled(phba)) {
4638 vport->vmid =
4639 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4640 GFP_KERNEL);
4641 if (!vport->vmid)
4642 return -ENOMEM;
4643
4644 rwlock_init(&vport->vmid_lock);
4645
4646
4647 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4648 vport->vmid_inactivity_timeout =
4649 phba->cfg_vmid_inactivity_timeout;
4650 vport->max_vmid = phba->cfg_max_vmid;
4651 vport->cur_vmid_cnt = 0;
4652
4653 vport->vmid_priority_range = bitmap_zalloc
4654 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4655
4656 if (!vport->vmid_priority_range) {
4657 kfree(vport->vmid);
4658 return -ENOMEM;
4659 }
4660
4661 hash_init(vport->hash_table);
4662 }
4663 return 0;
4664 }
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682 struct lpfc_vport *
4683 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4684 {
4685 struct lpfc_vport *vport;
4686 struct Scsi_Host *shost = NULL;
4687 struct scsi_host_template *template;
4688 int error = 0;
4689 int i;
4690 uint64_t wwn;
4691 bool use_no_reset_hba = false;
4692 int rc;
4693
4694 if (lpfc_no_hba_reset_cnt) {
4695 if (phba->sli_rev < LPFC_SLI_REV4 &&
4696 dev == &phba->pcidev->dev) {
4697
4698 lpfc_sli_brdrestart(phba);
4699 rc = lpfc_sli_chipset_init(phba);
4700 if (rc)
4701 return NULL;
4702 }
4703 wwn = lpfc_get_wwpn(phba);
4704 }
4705
4706 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4707 if (wwn == lpfc_no_hba_reset[i]) {
4708 lpfc_printf_log(phba, KERN_ERR,
4709 LOG_TRACE_EVENT,
4710 "6020 Setting use_no_reset port=%llx\n",
4711 wwn);
4712 use_no_reset_hba = true;
4713 break;
4714 }
4715 }
4716
4717
4718 if (dev == &phba->pcidev->dev) {
4719 template = &phba->port_template;
4720
4721 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4722
4723 memcpy(template, &lpfc_template, sizeof(*template));
4724
4725 if (use_no_reset_hba)
4726
4727 template->eh_host_reset_handler = NULL;
4728
4729
4730 memcpy(&phba->vport_template, &lpfc_template,
4731 sizeof(*template));
4732 phba->vport_template.shost_groups = lpfc_vport_groups;
4733 phba->vport_template.eh_bus_reset_handler = NULL;
4734 phba->vport_template.eh_host_reset_handler = NULL;
4735 phba->vport_template.vendor_id = 0;
4736
4737
4738 if (phba->sli_rev == LPFC_SLI_REV4) {
4739 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4740 phba->vport_template.sg_tablesize =
4741 phba->cfg_scsi_seg_cnt;
4742 } else {
4743 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4744 phba->vport_template.sg_tablesize =
4745 phba->cfg_sg_seg_cnt;
4746 }
4747
4748 } else {
4749
4750 memcpy(template, &lpfc_template_nvme,
4751 sizeof(*template));
4752 }
4753 } else {
4754 template = &phba->vport_template;
4755 }
4756
4757 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4758 if (!shost)
4759 goto out;
4760
4761 vport = (struct lpfc_vport *) shost->hostdata;
4762 vport->phba = phba;
4763 vport->load_flag |= FC_LOADING;
4764 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4765 vport->fc_rscn_flush = 0;
4766 lpfc_get_vport_cfgparam(vport);
4767
4768
4769 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4770
4771 shost->unique_id = instance;
4772 shost->max_id = LPFC_MAX_TARGET;
4773 shost->max_lun = vport->cfg_max_luns;
4774 shost->this_id = -1;
4775 shost->max_cmd_len = 16;
4776
4777 if (phba->sli_rev == LPFC_SLI_REV4) {
4778 if (!phba->cfg_fcp_mq_threshold ||
4779 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4780 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4781
4782 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4783 phba->cfg_fcp_mq_threshold);
4784
4785 shost->dma_boundary =
4786 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4787
4788 if (phba->cfg_xpsgl && !phba->nvmet_support)
4789 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4790 else
4791 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4792 } else
4793
4794
4795
4796 shost->nr_hw_queues = 1;
4797
4798
4799
4800
4801
4802
4803 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4804 if (dev != &phba->pcidev->dev) {
4805 shost->transportt = lpfc_vport_transport_template;
4806 vport->port_type = LPFC_NPIV_PORT;
4807 } else {
4808 shost->transportt = lpfc_transport_template;
4809 vport->port_type = LPFC_PHYSICAL_PORT;
4810 }
4811
4812 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4813 "9081 CreatePort TMPLATE type %x TBLsize %d "
4814 "SEGcnt %d/%d\n",
4815 vport->port_type, shost->sg_tablesize,
4816 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4817
4818
4819 rc = lpfc_vmid_res_alloc(phba, vport);
4820
4821 if (rc)
4822 goto out;
4823
4824
4825 INIT_LIST_HEAD(&vport->fc_nodes);
4826 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4827 spin_lock_init(&vport->work_port_lock);
4828
4829 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4830
4831 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4832
4833 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4834
4835 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4836 lpfc_setup_bg(phba, shost);
4837
4838 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4839 if (error)
4840 goto out_put_shost;
4841
4842 spin_lock_irq(&phba->port_list_lock);
4843 list_add_tail(&vport->listentry, &phba->port_list);
4844 spin_unlock_irq(&phba->port_list_lock);
4845 return vport;
4846
4847 out_put_shost:
4848 kfree(vport->vmid);
4849 bitmap_free(vport->vmid_priority_range);
4850 scsi_host_put(shost);
4851 out:
4852 return NULL;
4853 }
4854
4855
4856
4857
4858
4859
4860
4861
4862 void
4863 destroy_port(struct lpfc_vport *vport)
4864 {
4865 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4866 struct lpfc_hba *phba = vport->phba;
4867
4868 lpfc_debugfs_terminate(vport);
4869 fc_remove_host(shost);
4870 scsi_remove_host(shost);
4871
4872 spin_lock_irq(&phba->port_list_lock);
4873 list_del_init(&vport->listentry);
4874 spin_unlock_irq(&phba->port_list_lock);
4875
4876 lpfc_cleanup(vport);
4877 return;
4878 }
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890 int
4891 lpfc_get_instance(void)
4892 {
4893 int ret;
4894
4895 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4896 return ret < 0 ? -1 : ret;
4897 }
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4915 {
4916 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4917 struct lpfc_hba *phba = vport->phba;
4918 int stat = 0;
4919
4920 spin_lock_irq(shost->host_lock);
4921
4922 if (vport->load_flag & FC_UNLOADING) {
4923 stat = 1;
4924 goto finished;
4925 }
4926 if (time >= msecs_to_jiffies(30 * 1000)) {
4927 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4928 "0461 Scanning longer than 30 "
4929 "seconds. Continuing initialization\n");
4930 stat = 1;
4931 goto finished;
4932 }
4933 if (time >= msecs_to_jiffies(15 * 1000) &&
4934 phba->link_state <= LPFC_LINK_DOWN) {
4935 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4936 "0465 Link down longer than 15 "
4937 "seconds. Continuing initialization\n");
4938 stat = 1;
4939 goto finished;
4940 }
4941
4942 if (vport->port_state != LPFC_VPORT_READY)
4943 goto finished;
4944 if (vport->num_disc_nodes || vport->fc_prli_sent)
4945 goto finished;
4946 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4947 goto finished;
4948 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4949 goto finished;
4950
4951 stat = 1;
4952
4953 finished:
4954 spin_unlock_irq(shost->host_lock);
4955 return stat;
4956 }
4957
4958 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4959 {
4960 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4961 struct lpfc_hba *phba = vport->phba;
4962
4963 fc_host_supported_speeds(shost) = 0;
4964
4965
4966
4967
4968 if (phba->hba_flag & HBA_FCOE_MODE)
4969 return;
4970
4971 if (phba->lmt & LMT_256Gb)
4972 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4973 if (phba->lmt & LMT_128Gb)
4974 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4975 if (phba->lmt & LMT_64Gb)
4976 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4977 if (phba->lmt & LMT_32Gb)
4978 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4979 if (phba->lmt & LMT_16Gb)
4980 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4981 if (phba->lmt & LMT_10Gb)
4982 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4983 if (phba->lmt & LMT_8Gb)
4984 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4985 if (phba->lmt & LMT_4Gb)
4986 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4987 if (phba->lmt & LMT_2Gb)
4988 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4989 if (phba->lmt & LMT_1Gb)
4990 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4991 }
4992
4993
4994
4995
4996
4997
4998
4999
5000 void lpfc_host_attrib_init(struct Scsi_Host *shost)
5001 {
5002 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5003 struct lpfc_hba *phba = vport->phba;
5004
5005
5006
5007
5008 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
5009 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
5010 fc_host_supported_classes(shost) = FC_COS_CLASS3;
5011
5012 memset(fc_host_supported_fc4s(shost), 0,
5013 sizeof(fc_host_supported_fc4s(shost)));
5014 fc_host_supported_fc4s(shost)[2] = 1;
5015 fc_host_supported_fc4s(shost)[7] = 1;
5016
5017 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
5018 sizeof fc_host_symbolic_name(shost));
5019
5020 lpfc_host_supported_speeds_set(shost);
5021
5022 fc_host_maxframe_size(shost) =
5023 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
5024 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
5025
5026 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
5027
5028
5029 memset(fc_host_active_fc4s(shost), 0,
5030 sizeof(fc_host_active_fc4s(shost)));
5031 fc_host_active_fc4s(shost)[2] = 1;
5032 fc_host_active_fc4s(shost)[7] = 1;
5033
5034 fc_host_max_npiv_vports(shost) = phba->max_vpi;
5035 spin_lock_irq(shost->host_lock);
5036 vport->load_flag &= ~FC_LOADING;
5037 spin_unlock_irq(shost->host_lock);
5038 }
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048 static void
5049 lpfc_stop_port_s3(struct lpfc_hba *phba)
5050 {
5051
5052 writel(0, phba->HCregaddr);
5053 readl(phba->HCregaddr);
5054
5055 writel(0xffffffff, phba->HAregaddr);
5056 readl(phba->HAregaddr);
5057
5058
5059 lpfc_stop_hba_timers(phba);
5060 phba->pport->work_port_events = 0;
5061 }
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071 static void
5072 lpfc_stop_port_s4(struct lpfc_hba *phba)
5073 {
5074
5075 lpfc_stop_hba_timers(phba);
5076 if (phba->pport)
5077 phba->pport->work_port_events = 0;
5078 phba->sli4_hba.intr_enable = 0;
5079 }
5080
5081
5082
5083
5084
5085
5086
5087
5088 void
5089 lpfc_stop_port(struct lpfc_hba *phba)
5090 {
5091 phba->lpfc_stop_port(phba);
5092
5093 if (phba->wq)
5094 flush_workqueue(phba->wq);
5095 }
5096
5097
5098
5099
5100
5101
5102
5103 void
5104 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
5105 {
5106 unsigned long fcf_redisc_wait_tmo =
5107 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
5108
5109 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
5110 spin_lock_irq(&phba->hbalock);
5111
5112 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
5113
5114 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
5115 spin_unlock_irq(&phba->hbalock);
5116 }
5117
5118
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128 static void
5129 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
5130 {
5131 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
5132
5133
5134 spin_lock_irq(&phba->hbalock);
5135 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
5136 spin_unlock_irq(&phba->hbalock);
5137 return;
5138 }
5139
5140 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
5141
5142 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
5143 spin_unlock_irq(&phba->hbalock);
5144 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
5145 "2776 FCF rediscover quiescent timer expired\n");
5146
5147 lpfc_worker_wake_up(phba);
5148 }
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159 static void
5160 lpfc_vmid_poll(struct timer_list *t)
5161 {
5162 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
5163 u32 wake_up = 0;
5164
5165
5166 if (phba->pport->vmid_priority_tagging) {
5167 wake_up = 1;
5168 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5169 }
5170
5171
5172 if (phba->pport->vmid_inactivity_timeout ||
5173 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
5174 wake_up = 1;
5175 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5176 }
5177
5178 if (wake_up)
5179 lpfc_worker_wake_up(phba);
5180
5181
5182 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5183 LPFC_VMID_TIMER));
5184 }
5185
5186
5187
5188
5189
5190
5191
5192
5193 static void
5194 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5195 struct lpfc_acqe_link *acqe_link)
5196 {
5197 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5198 case LPFC_ASYNC_LINK_FAULT_NONE:
5199 case LPFC_ASYNC_LINK_FAULT_LOCAL:
5200 case LPFC_ASYNC_LINK_FAULT_REMOTE:
5201 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5202 break;
5203 default:
5204 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5205 "0398 Unknown link fault code: x%x\n",
5206 bf_get(lpfc_acqe_link_fault, acqe_link));
5207 break;
5208 }
5209 }
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221 static uint8_t
5222 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5223 struct lpfc_acqe_link *acqe_link)
5224 {
5225 uint8_t att_type;
5226
5227 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5228 case LPFC_ASYNC_LINK_STATUS_DOWN:
5229 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5230 att_type = LPFC_ATT_LINK_DOWN;
5231 break;
5232 case LPFC_ASYNC_LINK_STATUS_UP:
5233
5234 att_type = LPFC_ATT_RESERVED;
5235 break;
5236 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5237 att_type = LPFC_ATT_LINK_UP;
5238 break;
5239 default:
5240 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5241 "0399 Invalid link attention type: x%x\n",
5242 bf_get(lpfc_acqe_link_status, acqe_link));
5243 att_type = LPFC_ATT_RESERVED;
5244 break;
5245 }
5246 return att_type;
5247 }
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257 uint32_t
5258 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5259 {
5260 uint32_t link_speed;
5261
5262 if (!lpfc_is_link_up(phba))
5263 return 0;
5264
5265 if (phba->sli_rev <= LPFC_SLI_REV3) {
5266 switch (phba->fc_linkspeed) {
5267 case LPFC_LINK_SPEED_1GHZ:
5268 link_speed = 1000;
5269 break;
5270 case LPFC_LINK_SPEED_2GHZ:
5271 link_speed = 2000;
5272 break;
5273 case LPFC_LINK_SPEED_4GHZ:
5274 link_speed = 4000;
5275 break;
5276 case LPFC_LINK_SPEED_8GHZ:
5277 link_speed = 8000;
5278 break;
5279 case LPFC_LINK_SPEED_10GHZ:
5280 link_speed = 10000;
5281 break;
5282 case LPFC_LINK_SPEED_16GHZ:
5283 link_speed = 16000;
5284 break;
5285 default:
5286 link_speed = 0;
5287 }
5288 } else {
5289 if (phba->sli4_hba.link_state.logical_speed)
5290 link_speed =
5291 phba->sli4_hba.link_state.logical_speed;
5292 else
5293 link_speed = phba->sli4_hba.link_state.speed;
5294 }
5295 return link_speed;
5296 }
5297
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309 static uint32_t
5310 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5311 uint8_t speed_code)
5312 {
5313 uint32_t port_speed;
5314
5315 switch (evt_code) {
5316 case LPFC_TRAILER_CODE_LINK:
5317 switch (speed_code) {
5318 case LPFC_ASYNC_LINK_SPEED_ZERO:
5319 port_speed = 0;
5320 break;
5321 case LPFC_ASYNC_LINK_SPEED_10MBPS:
5322 port_speed = 10;
5323 break;
5324 case LPFC_ASYNC_LINK_SPEED_100MBPS:
5325 port_speed = 100;
5326 break;
5327 case LPFC_ASYNC_LINK_SPEED_1GBPS:
5328 port_speed = 1000;
5329 break;
5330 case LPFC_ASYNC_LINK_SPEED_10GBPS:
5331 port_speed = 10000;
5332 break;
5333 case LPFC_ASYNC_LINK_SPEED_20GBPS:
5334 port_speed = 20000;
5335 break;
5336 case LPFC_ASYNC_LINK_SPEED_25GBPS:
5337 port_speed = 25000;
5338 break;
5339 case LPFC_ASYNC_LINK_SPEED_40GBPS:
5340 port_speed = 40000;
5341 break;
5342 case LPFC_ASYNC_LINK_SPEED_100GBPS:
5343 port_speed = 100000;
5344 break;
5345 default:
5346 port_speed = 0;
5347 }
5348 break;
5349 case LPFC_TRAILER_CODE_FC:
5350 switch (speed_code) {
5351 case LPFC_FC_LA_SPEED_UNKNOWN:
5352 port_speed = 0;
5353 break;
5354 case LPFC_FC_LA_SPEED_1G:
5355 port_speed = 1000;
5356 break;
5357 case LPFC_FC_LA_SPEED_2G:
5358 port_speed = 2000;
5359 break;
5360 case LPFC_FC_LA_SPEED_4G:
5361 port_speed = 4000;
5362 break;
5363 case LPFC_FC_LA_SPEED_8G:
5364 port_speed = 8000;
5365 break;
5366 case LPFC_FC_LA_SPEED_10G:
5367 port_speed = 10000;
5368 break;
5369 case LPFC_FC_LA_SPEED_16G:
5370 port_speed = 16000;
5371 break;
5372 case LPFC_FC_LA_SPEED_32G:
5373 port_speed = 32000;
5374 break;
5375 case LPFC_FC_LA_SPEED_64G:
5376 port_speed = 64000;
5377 break;
5378 case LPFC_FC_LA_SPEED_128G:
5379 port_speed = 128000;
5380 break;
5381 case LPFC_FC_LA_SPEED_256G:
5382 port_speed = 256000;
5383 break;
5384 default:
5385 port_speed = 0;
5386 }
5387 break;
5388 default:
5389 port_speed = 0;
5390 }
5391 return port_speed;
5392 }
5393
5394
5395
5396
5397
5398
5399
5400
5401 static void
5402 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5403 struct lpfc_acqe_link *acqe_link)
5404 {
5405 LPFC_MBOXQ_t *pmb;
5406 MAILBOX_t *mb;
5407 struct lpfc_mbx_read_top *la;
5408 uint8_t att_type;
5409 int rc;
5410
5411 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5412 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5413 return;
5414 phba->fcoe_eventtag = acqe_link->event_tag;
5415 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5416 if (!pmb) {
5417 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5418 "0395 The mboxq allocation failed\n");
5419 return;
5420 }
5421
5422 rc = lpfc_mbox_rsrc_prep(phba, pmb);
5423 if (rc) {
5424 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5425 "0396 mailbox allocation failed\n");
5426 goto out_free_pmb;
5427 }
5428
5429
5430 lpfc_els_flush_all_cmd(phba);
5431
5432
5433 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5434
5435
5436 phba->sli.slistat.link_event++;
5437
5438
5439 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
5440 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5441 pmb->vport = phba->pport;
5442
5443
5444 phba->sli4_hba.link_state.speed =
5445 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5446 bf_get(lpfc_acqe_link_speed, acqe_link));
5447 phba->sli4_hba.link_state.duplex =
5448 bf_get(lpfc_acqe_link_duplex, acqe_link);
5449 phba->sli4_hba.link_state.status =
5450 bf_get(lpfc_acqe_link_status, acqe_link);
5451 phba->sli4_hba.link_state.type =
5452 bf_get(lpfc_acqe_link_type, acqe_link);
5453 phba->sli4_hba.link_state.number =
5454 bf_get(lpfc_acqe_link_number, acqe_link);
5455 phba->sli4_hba.link_state.fault =
5456 bf_get(lpfc_acqe_link_fault, acqe_link);
5457 phba->sli4_hba.link_state.logical_speed =
5458 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5459
5460 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5461 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5462 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5463 "Logical speed:%dMbps Fault:%d\n",
5464 phba->sli4_hba.link_state.speed,
5465 phba->sli4_hba.link_state.topology,
5466 phba->sli4_hba.link_state.status,
5467 phba->sli4_hba.link_state.type,
5468 phba->sli4_hba.link_state.number,
5469 phba->sli4_hba.link_state.logical_speed,
5470 phba->sli4_hba.link_state.fault);
5471
5472
5473
5474
5475 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5476 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5477 if (rc == MBX_NOT_FINISHED)
5478 goto out_free_pmb;
5479 return;
5480 }
5481
5482
5483
5484
5485
5486
5487 mb = &pmb->u.mb;
5488 mb->mbxStatus = MBX_SUCCESS;
5489
5490
5491 lpfc_sli4_parse_latt_fault(phba, acqe_link);
5492
5493
5494 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5495 la->eventTag = acqe_link->event_tag;
5496 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5497 bf_set(lpfc_mbx_read_top_link_spd, la,
5498 (bf_get(lpfc_acqe_link_speed, acqe_link)));
5499
5500
5501 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5502 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5503 bf_set(lpfc_mbx_read_top_il, la, 0);
5504 bf_set(lpfc_mbx_read_top_pb, la, 0);
5505 bf_set(lpfc_mbx_read_top_fa, la, 0);
5506 bf_set(lpfc_mbx_read_top_mm, la, 0);
5507
5508
5509 lpfc_mbx_cmpl_read_topology(phba, pmb);
5510
5511 return;
5512
5513 out_free_pmb:
5514 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
5515 }
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528 static uint8_t
5529 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5530 {
5531 uint8_t port_speed;
5532
5533 switch (speed_code) {
5534 case LPFC_FC_LA_SPEED_1G:
5535 port_speed = LPFC_LINK_SPEED_1GHZ;
5536 break;
5537 case LPFC_FC_LA_SPEED_2G:
5538 port_speed = LPFC_LINK_SPEED_2GHZ;
5539 break;
5540 case LPFC_FC_LA_SPEED_4G:
5541 port_speed = LPFC_LINK_SPEED_4GHZ;
5542 break;
5543 case LPFC_FC_LA_SPEED_8G:
5544 port_speed = LPFC_LINK_SPEED_8GHZ;
5545 break;
5546 case LPFC_FC_LA_SPEED_16G:
5547 port_speed = LPFC_LINK_SPEED_16GHZ;
5548 break;
5549 case LPFC_FC_LA_SPEED_32G:
5550 port_speed = LPFC_LINK_SPEED_32GHZ;
5551 break;
5552 case LPFC_FC_LA_SPEED_64G:
5553 port_speed = LPFC_LINK_SPEED_64GHZ;
5554 break;
5555 case LPFC_FC_LA_SPEED_128G:
5556 port_speed = LPFC_LINK_SPEED_128GHZ;
5557 break;
5558 case LPFC_FC_LA_SPEED_256G:
5559 port_speed = LPFC_LINK_SPEED_256GHZ;
5560 break;
5561 default:
5562 port_speed = 0;
5563 break;
5564 }
5565
5566 return port_speed;
5567 }
5568
5569 void
5570 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5571 {
5572 struct rxtable_entry *entry;
5573 int cnt = 0, head, tail, last, start;
5574
5575 head = atomic_read(&phba->rxtable_idx_head);
5576 tail = atomic_read(&phba->rxtable_idx_tail);
5577 if (!phba->rxtable || head == tail) {
5578 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
5579 "4411 Rxtable is empty\n");
5580 return;
5581 }
5582 last = tail;
5583 start = head;
5584
5585
5586 while (start != last) {
5587 if (start)
5588 start--;
5589 else
5590 start = LPFC_MAX_RXMONITOR_ENTRY - 1;
5591 entry = &phba->rxtable[start];
5592 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5593 "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
5594 "Lat %lld ASz %lld Info %02d BWUtil %d "
5595 "Int %d slot %d\n",
5596 cnt, entry->max_bytes_per_interval,
5597 entry->total_bytes, entry->rcv_bytes,
5598 entry->avg_io_latency, entry->avg_io_size,
5599 entry->cmf_info, entry->timer_utilization,
5600 entry->timer_interval, start);
5601 cnt++;
5602 if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
5603 return;
5604 }
5605 }
5606
5607
5608
5609
5610
5611
5612
5613
5614 void
5615 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5616 {
5617 struct lpfc_cgn_info *cp;
5618 struct tm broken;
5619 struct timespec64 cur_time;
5620 u32 cnt;
5621 u32 value;
5622
5623
5624 if (!phba->cgn_i)
5625 return;
5626 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5627 ktime_get_real_ts64(&cur_time);
5628 time64_to_tm(cur_time.tv_sec, 0, &broken);
5629
5630
5631 switch (dtag) {
5632 case ELS_DTAG_LNK_INTEGRITY:
5633 cnt = le32_to_cpu(cp->link_integ_notification);
5634 cnt++;
5635 cp->link_integ_notification = cpu_to_le32(cnt);
5636
5637 cp->cgn_stat_lnk_month = broken.tm_mon + 1;
5638 cp->cgn_stat_lnk_day = broken.tm_mday;
5639 cp->cgn_stat_lnk_year = broken.tm_year - 100;
5640 cp->cgn_stat_lnk_hour = broken.tm_hour;
5641 cp->cgn_stat_lnk_min = broken.tm_min;
5642 cp->cgn_stat_lnk_sec = broken.tm_sec;
5643 break;
5644 case ELS_DTAG_DELIVERY:
5645 cnt = le32_to_cpu(cp->delivery_notification);
5646 cnt++;
5647 cp->delivery_notification = cpu_to_le32(cnt);
5648
5649 cp->cgn_stat_del_month = broken.tm_mon + 1;
5650 cp->cgn_stat_del_day = broken.tm_mday;
5651 cp->cgn_stat_del_year = broken.tm_year - 100;
5652 cp->cgn_stat_del_hour = broken.tm_hour;
5653 cp->cgn_stat_del_min = broken.tm_min;
5654 cp->cgn_stat_del_sec = broken.tm_sec;
5655 break;
5656 case ELS_DTAG_PEER_CONGEST:
5657 cnt = le32_to_cpu(cp->cgn_peer_notification);
5658 cnt++;
5659 cp->cgn_peer_notification = cpu_to_le32(cnt);
5660
5661 cp->cgn_stat_peer_month = broken.tm_mon + 1;
5662 cp->cgn_stat_peer_day = broken.tm_mday;
5663 cp->cgn_stat_peer_year = broken.tm_year - 100;
5664 cp->cgn_stat_peer_hour = broken.tm_hour;
5665 cp->cgn_stat_peer_min = broken.tm_min;
5666 cp->cgn_stat_peer_sec = broken.tm_sec;
5667 break;
5668 case ELS_DTAG_CONGESTION:
5669 cnt = le32_to_cpu(cp->cgn_notification);
5670 cnt++;
5671 cp->cgn_notification = cpu_to_le32(cnt);
5672
5673 cp->cgn_stat_cgn_month = broken.tm_mon + 1;
5674 cp->cgn_stat_cgn_day = broken.tm_mday;
5675 cp->cgn_stat_cgn_year = broken.tm_year - 100;
5676 cp->cgn_stat_cgn_hour = broken.tm_hour;
5677 cp->cgn_stat_cgn_min = broken.tm_min;
5678 cp->cgn_stat_cgn_sec = broken.tm_sec;
5679 }
5680 if (phba->cgn_fpin_frequency &&
5681 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5682 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5683 cp->cgn_stat_npm = value;
5684 }
5685 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5686 LPFC_CGN_CRC32_SEED);
5687 cp->cgn_info_crc = cpu_to_le32(value);
5688 }
5689
5690
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700 static void
5701 lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
5702 {
5703 struct lpfc_cgn_info *cp;
5704 struct tm broken;
5705 struct timespec64 cur_time;
5706 uint32_t i, index;
5707 uint16_t value, mvalue;
5708 uint64_t bps;
5709 uint32_t mbps;
5710 uint32_t dvalue, wvalue, lvalue, avalue;
5711 uint64_t latsum;
5712 __le16 *ptr;
5713 __le32 *lptr;
5714 __le16 *mptr;
5715
5716
5717 if (!phba->cgn_i)
5718 return;
5719 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5720
5721 if (time_before(jiffies, phba->cgn_evt_timestamp))
5722 return;
5723 phba->cgn_evt_timestamp = jiffies +
5724 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5725 phba->cgn_evt_minute++;
5726
5727
5728
5729 ktime_get_real_ts64(&cur_time);
5730 time64_to_tm(cur_time.tv_sec, 0, &broken);
5731
5732 if (phba->cgn_fpin_frequency &&
5733 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5734 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5735 cp->cgn_stat_npm = value;
5736 }
5737
5738
5739 lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5740 latsum = atomic64_read(&phba->cgn_latency_evt);
5741 atomic_set(&phba->cgn_latency_evt_cnt, 0);
5742 atomic64_set(&phba->cgn_latency_evt, 0);
5743
5744
5745
5746
5747
5748 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5749 phba->rx_block_cnt = 0;
5750 mvalue = bps / (1024 * 1024);
5751
5752
5753
5754 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5755 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5756 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5757 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5758
5759
5760 value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5761 cp->cgn_lunq = cpu_to_le16(value);
5762
5763
5764
5765
5766
5767
5768
5769 index = ++cp->cgn_index_minute;
5770 if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5771 cp->cgn_index_minute = 0;
5772 index = 0;
5773 }
5774
5775
5776 dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5777 atomic_set(&phba->cgn_driver_evt_cnt, 0);
5778
5779
5780 wvalue = 0;
5781 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5782 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5783 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5784 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5785 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5786
5787
5788 avalue = 0;
5789 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5790 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5791 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5792 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5793
5794
5795
5796
5797 ptr = &cp->cgn_drvr_min[index];
5798 value = (uint16_t)dvalue;
5799 *ptr = cpu_to_le16(value);
5800
5801 ptr = &cp->cgn_warn_min[index];
5802 value = (uint16_t)wvalue;
5803 *ptr = cpu_to_le16(value);
5804
5805 ptr = &cp->cgn_alarm_min[index];
5806 value = (uint16_t)avalue;
5807 *ptr = cpu_to_le16(value);
5808
5809 lptr = &cp->cgn_latency_min[index];
5810 if (lvalue) {
5811 lvalue = (uint32_t)div_u64(latsum, lvalue);
5812 *lptr = cpu_to_le32(lvalue);
5813 } else {
5814 *lptr = 0;
5815 }
5816
5817
5818 mptr = &cp->cgn_bw_min[index];
5819 *mptr = cpu_to_le16(mvalue);
5820
5821 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5822 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5823 index, dvalue, wvalue, *lptr, mvalue, avalue);
5824
5825
5826 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5827
5828
5829
5830 index = ++cp->cgn_index_hour;
5831 if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5832 cp->cgn_index_hour = 0;
5833 index = 0;
5834 }
5835
5836 dvalue = 0;
5837 wvalue = 0;
5838 lvalue = 0;
5839 avalue = 0;
5840 mvalue = 0;
5841 mbps = 0;
5842 for (i = 0; i < LPFC_MIN_HOUR; i++) {
5843 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5844 wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5845 lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5846 mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5847 avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5848 }
5849 if (lvalue)
5850 lvalue /= LPFC_MIN_HOUR;
5851 if (mbps)
5852 mvalue = mbps / LPFC_MIN_HOUR;
5853
5854 lptr = &cp->cgn_drvr_hr[index];
5855 *lptr = cpu_to_le32(dvalue);
5856 lptr = &cp->cgn_warn_hr[index];
5857 *lptr = cpu_to_le32(wvalue);
5858 lptr = &cp->cgn_latency_hr[index];
5859 *lptr = cpu_to_le32(lvalue);
5860 mptr = &cp->cgn_bw_hr[index];
5861 *mptr = cpu_to_le16(mvalue);
5862 lptr = &cp->cgn_alarm_hr[index];
5863 *lptr = cpu_to_le32(avalue);
5864
5865 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5866 "2419 Congestion Info - hour "
5867 "(%d): %d %d %d %d %d\n",
5868 index, dvalue, wvalue, lvalue, mvalue, avalue);
5869 }
5870
5871
5872 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5873
5874
5875
5876
5877 index = ++cp->cgn_index_day;
5878 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5879 cp->cgn_index_day = 0;
5880 index = 0;
5881 }
5882
5883
5884
5885
5886
5887
5888
5889 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) {
5890 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken);
5891
5892 cp->cgn_info_month = broken.tm_mon + 1;
5893 cp->cgn_info_day = broken.tm_mday;
5894 cp->cgn_info_year = broken.tm_year - 100;
5895 cp->cgn_info_hour = broken.tm_hour;
5896 cp->cgn_info_minute = broken.tm_min;
5897 cp->cgn_info_second = broken.tm_sec;
5898
5899 lpfc_printf_log
5900 (phba, KERN_INFO, LOG_CGN_MGMT,
5901 "2646 CGNInfo idx0 Start Time: "
5902 "%d/%d/%d %d:%d:%d\n",
5903 cp->cgn_info_day, cp->cgn_info_month,
5904 cp->cgn_info_year, cp->cgn_info_hour,
5905 cp->cgn_info_minute, cp->cgn_info_second);
5906 }
5907
5908 dvalue = 0;
5909 wvalue = 0;
5910 lvalue = 0;
5911 mvalue = 0;
5912 mbps = 0;
5913 avalue = 0;
5914 for (i = 0; i < LPFC_HOUR_DAY; i++) {
5915 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5916 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5917 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5918 mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5919 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5920 }
5921 if (lvalue)
5922 lvalue /= LPFC_HOUR_DAY;
5923 if (mbps)
5924 mvalue = mbps / LPFC_HOUR_DAY;
5925
5926 lptr = &cp->cgn_drvr_day[index];
5927 *lptr = cpu_to_le32(dvalue);
5928 lptr = &cp->cgn_warn_day[index];
5929 *lptr = cpu_to_le32(wvalue);
5930 lptr = &cp->cgn_latency_day[index];
5931 *lptr = cpu_to_le32(lvalue);
5932 mptr = &cp->cgn_bw_day[index];
5933 *mptr = cpu_to_le16(mvalue);
5934 lptr = &cp->cgn_alarm_day[index];
5935 *lptr = cpu_to_le32(avalue);
5936
5937 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5938 "2420 Congestion Info - daily (%d): "
5939 "%d %d %d %d %d\n",
5940 index, dvalue, wvalue, lvalue, mvalue, avalue);
5941
5942
5943
5944
5945
5946 if (index == (LPFC_MAX_CGN_DAYS - 1)) {
5947 phba->hba_flag |= HBA_CGN_DAY_WRAP;
5948 ktime_get_real_ts64(&phba->cgn_daily_ts);
5949 }
5950 }
5951
5952
5953 value = phba->cgn_fpin_frequency;
5954 cp->cgn_warn_freq = cpu_to_le16(value);
5955 cp->cgn_alarm_freq = cpu_to_le16(value);
5956
5957 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5958 LPFC_CGN_CRC32_SEED);
5959 cp->cgn_info_crc = cpu_to_le32(lvalue);
5960 }
5961
5962
5963
5964
5965
5966
5967
5968
5969
5970 uint32_t
5971 lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5972 {
5973 struct timespec64 cmpl_time;
5974 uint32_t msec = 0;
5975
5976 ktime_get_real_ts64(&cmpl_time);
5977
5978
5979
5980
5981 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5982 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5983 NSEC_PER_MSEC;
5984 } else {
5985 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5986 msec = (cmpl_time.tv_sec -
5987 phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5988 msec += ((cmpl_time.tv_nsec -
5989 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5990 } else {
5991 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5992 1) * MSEC_PER_SEC;
5993 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5994 cmpl_time.tv_nsec) / NSEC_PER_MSEC);
5995 }
5996 }
5997 return msec;
5998 }
5999
6000
6001
6002
6003
6004
6005 static enum hrtimer_restart
6006 lpfc_cmf_timer(struct hrtimer *timer)
6007 {
6008 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
6009 cmf_timer);
6010 struct rxtable_entry *entry;
6011 uint32_t io_cnt;
6012 uint32_t head, tail;
6013 uint32_t busy, max_read;
6014 uint64_t total, rcv, lat, mbpi, extra, cnt;
6015 int timer_interval = LPFC_CMF_INTERVAL;
6016 uint32_t ms;
6017 struct lpfc_cgn_stat *cgs;
6018 int cpu;
6019
6020
6021 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6022 !phba->cmf_latency.tv_sec) {
6023 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
6024 "6224 CMF timer exit: %d %lld\n",
6025 phba->cmf_active_mode,
6026 (uint64_t)phba->cmf_latency.tv_sec);
6027 return HRTIMER_NORESTART;
6028 }
6029
6030
6031
6032
6033 if (!phba->pport)
6034 goto skip;
6035
6036
6037
6038
6039 atomic_set(&phba->cmf_stop_io, 1);
6040
6041
6042
6043
6044
6045
6046 ms = lpfc_calc_cmf_latency(phba);
6047
6048
6049
6050
6051
6052
6053 ktime_get_real_ts64(&phba->cmf_latency);
6054
6055 phba->cmf_link_byte_count =
6056 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
6057
6058
6059 total = 0;
6060 io_cnt = 0;
6061 lat = 0;
6062 rcv = 0;
6063 for_each_present_cpu(cpu) {
6064 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
6065 total += atomic64_xchg(&cgs->total_bytes, 0);
6066 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
6067 lat += atomic64_xchg(&cgs->rx_latency, 0);
6068 rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
6069 }
6070
6071
6072
6073
6074
6075
6076 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
6077 phba->link_state != LPFC_LINK_DOWN &&
6078 phba->hba_flag & HBA_SETUP) {
6079 mbpi = phba->cmf_last_sync_bw;
6080 phba->cmf_last_sync_bw = 0;
6081 extra = 0;
6082
6083
6084
6085
6086
6087
6088 if (ms && ms < LPFC_CMF_INTERVAL) {
6089 cnt = div_u64(total, ms);
6090 cnt *= LPFC_CMF_INTERVAL;
6091
6092
6093
6094
6095 if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi)
6096 cnt = mbpi;
6097
6098 extra = cnt - total;
6099 }
6100 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
6101 } else {
6102
6103
6104
6105 mbpi = phba->cmf_link_byte_count;
6106 extra = 0;
6107 }
6108 phba->cmf_timer_cnt++;
6109
6110 if (io_cnt) {
6111
6112 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
6113 atomic64_add(lat, &phba->cgn_latency_evt);
6114 }
6115 busy = atomic_xchg(&phba->cmf_busy, 0);
6116 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
6117
6118
6119 if (mbpi) {
6120 if (mbpi > phba->cmf_link_byte_count ||
6121 phba->cmf_active_mode == LPFC_CFG_MONITOR)
6122 mbpi = phba->cmf_link_byte_count;
6123
6124
6125
6126
6127 if (mbpi != phba->cmf_max_bytes_per_interval)
6128 phba->cmf_max_bytes_per_interval = mbpi;
6129 }
6130
6131
6132 if (phba->rxtable) {
6133 head = atomic_xchg(&phba->rxtable_idx_head,
6134 LPFC_RXMONITOR_TABLE_IN_USE);
6135 entry = &phba->rxtable[head];
6136 entry->total_bytes = total;
6137 entry->cmf_bytes = total + extra;
6138 entry->rcv_bytes = rcv;
6139 entry->cmf_busy = busy;
6140 entry->cmf_info = phba->cmf_active_info;
6141 if (io_cnt) {
6142 entry->avg_io_latency = div_u64(lat, io_cnt);
6143 entry->avg_io_size = div_u64(rcv, io_cnt);
6144 } else {
6145 entry->avg_io_latency = 0;
6146 entry->avg_io_size = 0;
6147 }
6148 entry->max_read_cnt = max_read;
6149 entry->io_cnt = io_cnt;
6150 entry->max_bytes_per_interval = mbpi;
6151 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
6152 entry->timer_utilization = phba->cmf_last_ts;
6153 else
6154 entry->timer_utilization = ms;
6155 entry->timer_interval = ms;
6156 phba->cmf_last_ts = 0;
6157
6158
6159 head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
6160 tail = atomic_read(&phba->rxtable_idx_tail);
6161 if (head == tail) {
6162 tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
6163 atomic_set(&phba->rxtable_idx_tail, tail);
6164 }
6165 atomic_set(&phba->rxtable_idx_head, head);
6166 }
6167
6168 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
6169
6170
6171
6172 if (mbpi && total > mbpi)
6173 atomic_inc(&phba->cgn_driver_evt_cnt);
6174 }
6175 phba->rx_block_cnt += div_u64(rcv, 512);
6176
6177
6178 lpfc_cgn_save_evt_cnt(phba);
6179
6180 phba->hba_flag &= ~HBA_SHORT_CMF;
6181
6182
6183
6184
6185
6186 if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL),
6187 phba->cgn_evt_timestamp)) {
6188 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp -
6189 jiffies);
6190 if (timer_interval <= 0)
6191 timer_interval = LPFC_CMF_INTERVAL;
6192 else
6193 phba->hba_flag |= HBA_SHORT_CMF;
6194
6195
6196
6197
6198 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
6199 timer_interval, 1000);
6200 if (phba->cmf_active_mode == LPFC_CFG_MONITOR)
6201 phba->cmf_max_bytes_per_interval =
6202 phba->cmf_link_byte_count;
6203 }
6204
6205
6206
6207
6208 if (atomic_xchg(&phba->cmf_bw_wait, 0))
6209 queue_work(phba->wq, &phba->unblock_request_work);
6210
6211
6212 atomic_set(&phba->cmf_stop_io, 0);
6213
6214 skip:
6215 hrtimer_forward_now(timer,
6216 ktime_set(0, timer_interval * NSEC_PER_MSEC));
6217 return HRTIMER_RESTART;
6218 }
6219
6220 #define trunk_link_status(__idx)\
6221 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6222 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6223 "Link up" : "Link down") : "NA"
6224
6225 #define trunk_port_fault(__idx)\
6226 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6227 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6228
6229 static void
6230 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6231 struct lpfc_acqe_fc_la *acqe_fc)
6232 {
6233 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6234 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6235
6236 phba->sli4_hba.link_state.speed =
6237 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6238 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6239
6240 phba->sli4_hba.link_state.logical_speed =
6241 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6242
6243 phba->fc_linkspeed =
6244 lpfc_async_link_speed_to_read_top(
6245 phba,
6246 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6247
6248 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6249 phba->trunk_link.link0.state =
6250 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6251 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6252 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6253 }
6254 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6255 phba->trunk_link.link1.state =
6256 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6257 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6258 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6259 }
6260 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6261 phba->trunk_link.link2.state =
6262 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6263 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6264 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6265 }
6266 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6267 phba->trunk_link.link3.state =
6268 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6269 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6270 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6271 }
6272
6273 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6274 "2910 Async FC Trunking Event - Speed:%d\n"
6275 "\tLogical speed:%d "
6276 "port0: %s port1: %s port2: %s port3: %s\n",
6277 phba->sli4_hba.link_state.speed,
6278 phba->sli4_hba.link_state.logical_speed,
6279 trunk_link_status(0), trunk_link_status(1),
6280 trunk_link_status(2), trunk_link_status(3));
6281
6282 if (phba->cmf_active_mode != LPFC_CFG_OFF)
6283 lpfc_cmf_signal_init(phba);
6284
6285 if (port_fault)
6286 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6287 "3202 trunk error:0x%x (%s) seen on port0:%s "
6288
6289
6290
6291
6292
6293 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6294 "UNDEFINED. update driver." : trunk_errmsg[err],
6295 trunk_port_fault(0), trunk_port_fault(1),
6296 trunk_port_fault(2), trunk_port_fault(3));
6297 }
6298
6299
6300
6301
6302
6303
6304
6305
6306
6307
6308
6309 static void
6310 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6311 {
6312 LPFC_MBOXQ_t *pmb;
6313 MAILBOX_t *mb;
6314 struct lpfc_mbx_read_top *la;
6315 int rc;
6316
6317 if (bf_get(lpfc_trailer_type, acqe_fc) !=
6318 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6319 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6320 "2895 Non FC link Event detected.(%d)\n",
6321 bf_get(lpfc_trailer_type, acqe_fc));
6322 return;
6323 }
6324
6325 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6326 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6327 lpfc_update_trunk_link_status(phba, acqe_fc);
6328 return;
6329 }
6330
6331
6332 phba->sli4_hba.link_state.speed =
6333 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6334 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6335 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6336 phba->sli4_hba.link_state.topology =
6337 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6338 phba->sli4_hba.link_state.status =
6339 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6340 phba->sli4_hba.link_state.type =
6341 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6342 phba->sli4_hba.link_state.number =
6343 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6344 phba->sli4_hba.link_state.fault =
6345 bf_get(lpfc_acqe_link_fault, acqe_fc);
6346
6347 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6348 LPFC_FC_LA_TYPE_LINK_DOWN)
6349 phba->sli4_hba.link_state.logical_speed = 0;
6350 else if (!phba->sli4_hba.conf_trunk)
6351 phba->sli4_hba.link_state.logical_speed =
6352 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6353
6354 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6355 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6356 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6357 "%dMbps Fault:%d\n",
6358 phba->sli4_hba.link_state.speed,
6359 phba->sli4_hba.link_state.topology,
6360 phba->sli4_hba.link_state.status,
6361 phba->sli4_hba.link_state.type,
6362 phba->sli4_hba.link_state.number,
6363 phba->sli4_hba.link_state.logical_speed,
6364 phba->sli4_hba.link_state.fault);
6365 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6366 if (!pmb) {
6367 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6368 "2897 The mboxq allocation failed\n");
6369 return;
6370 }
6371 rc = lpfc_mbox_rsrc_prep(phba, pmb);
6372 if (rc) {
6373 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6374 "2898 The mboxq prep failed\n");
6375 goto out_free_pmb;
6376 }
6377
6378
6379 lpfc_els_flush_all_cmd(phba);
6380
6381
6382 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6383
6384
6385 phba->sli.slistat.link_event++;
6386
6387
6388 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
6389 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6390 pmb->vport = phba->pport;
6391
6392 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6393 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6394
6395 switch (phba->sli4_hba.link_state.status) {
6396 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6397 phba->link_flag |= LS_MDS_LINK_DOWN;
6398 break;
6399 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6400 phba->link_flag |= LS_MDS_LOOPBACK;
6401 break;
6402 default:
6403 break;
6404 }
6405
6406
6407 mb = &pmb->u.mb;
6408 mb->mbxStatus = MBX_SUCCESS;
6409
6410
6411 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6412
6413
6414 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6415 la->eventTag = acqe_fc->event_tag;
6416
6417 if (phba->sli4_hba.link_state.status ==
6418 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6419 bf_set(lpfc_mbx_read_top_att_type, la,
6420 LPFC_FC_LA_TYPE_UNEXP_WWPN);
6421 } else {
6422 bf_set(lpfc_mbx_read_top_att_type, la,
6423 LPFC_FC_LA_TYPE_LINK_DOWN);
6424 }
6425
6426 lpfc_mbx_cmpl_read_topology(phba, pmb);
6427
6428 return;
6429 }
6430
6431 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6432 if (rc == MBX_NOT_FINISHED)
6433 goto out_free_pmb;
6434 return;
6435
6436 out_free_pmb:
6437 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
6438 }
6439
6440
6441
6442
6443
6444
6445
6446
6447 static void
6448 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6449 {
6450 char port_name;
6451 char message[128];
6452 uint8_t status;
6453 uint8_t evt_type;
6454 uint8_t operational = 0;
6455 struct temp_event temp_event_data;
6456 struct lpfc_acqe_misconfigured_event *misconfigured;
6457 struct lpfc_acqe_cgn_signal *cgn_signal;
6458 struct Scsi_Host *shost;
6459 struct lpfc_vport **vports;
6460 int rc, i, cnt;
6461
6462 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6463
6464 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6465 "2901 Async SLI event - Type:%d, Event Data: x%08x "
6466 "x%08x x%08x x%08x\n", evt_type,
6467 acqe_sli->event_data1, acqe_sli->event_data2,
6468 acqe_sli->reserved, acqe_sli->trailer);
6469
6470 port_name = phba->Port[0];
6471 if (port_name == 0x00)
6472 port_name = '?';
6473
6474 switch (evt_type) {
6475 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6476 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6477 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6478 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6479
6480 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6481 "3190 Over Temperature:%d Celsius- Port Name %c\n",
6482 acqe_sli->event_data1, port_name);
6483
6484 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6485 shost = lpfc_shost_from_vport(phba->pport);
6486 fc_host_post_vendor_event(shost, fc_get_event_number(),
6487 sizeof(temp_event_data),
6488 (char *)&temp_event_data,
6489 SCSI_NL_VID_TYPE_PCI
6490 | PCI_VENDOR_ID_EMULEX);
6491 break;
6492 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6493 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6494 temp_event_data.event_code = LPFC_NORMAL_TEMP;
6495 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6496
6497 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6498 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6499 acqe_sli->event_data1, port_name);
6500
6501 shost = lpfc_shost_from_vport(phba->pport);
6502 fc_host_post_vendor_event(shost, fc_get_event_number(),
6503 sizeof(temp_event_data),
6504 (char *)&temp_event_data,
6505 SCSI_NL_VID_TYPE_PCI
6506 | PCI_VENDOR_ID_EMULEX);
6507 break;
6508 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6509 misconfigured = (struct lpfc_acqe_misconfigured_event *)
6510 &acqe_sli->event_data1;
6511
6512
6513 switch (phba->sli4_hba.lnk_info.lnk_no) {
6514 case LPFC_LINK_NUMBER_0:
6515 status = bf_get(lpfc_sli_misconfigured_port0_state,
6516 &misconfigured->theEvent);
6517 operational = bf_get(lpfc_sli_misconfigured_port0_op,
6518 &misconfigured->theEvent);
6519 break;
6520 case LPFC_LINK_NUMBER_1:
6521 status = bf_get(lpfc_sli_misconfigured_port1_state,
6522 &misconfigured->theEvent);
6523 operational = bf_get(lpfc_sli_misconfigured_port1_op,
6524 &misconfigured->theEvent);
6525 break;
6526 case LPFC_LINK_NUMBER_2:
6527 status = bf_get(lpfc_sli_misconfigured_port2_state,
6528 &misconfigured->theEvent);
6529 operational = bf_get(lpfc_sli_misconfigured_port2_op,
6530 &misconfigured->theEvent);
6531 break;
6532 case LPFC_LINK_NUMBER_3:
6533 status = bf_get(lpfc_sli_misconfigured_port3_state,
6534 &misconfigured->theEvent);
6535 operational = bf_get(lpfc_sli_misconfigured_port3_op,
6536 &misconfigured->theEvent);
6537 break;
6538 default:
6539 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6540 "3296 "
6541 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6542 "event: Invalid link %d",
6543 phba->sli4_hba.lnk_info.lnk_no);
6544 return;
6545 }
6546
6547
6548 if (phba->sli4_hba.lnk_info.optic_state == status)
6549 return;
6550
6551 switch (status) {
6552 case LPFC_SLI_EVENT_STATUS_VALID:
6553 sprintf(message, "Physical Link is functional");
6554 break;
6555 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6556 sprintf(message, "Optics faulted/incorrectly "
6557 "installed/not installed - Reseat optics, "
6558 "if issue not resolved, replace.");
6559 break;
6560 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6561 sprintf(message,
6562 "Optics of two types installed - Remove one "
6563 "optic or install matching pair of optics.");
6564 break;
6565 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6566 sprintf(message, "Incompatible optics - Replace with "
6567 "compatible optics for card to function.");
6568 break;
6569 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6570 sprintf(message, "Unqualified optics - Replace with "
6571 "Avago optics for Warranty and Technical "
6572 "Support - Link is%s operational",
6573 (operational) ? " not" : "");
6574 break;
6575 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6576 sprintf(message, "Uncertified optics - Replace with "
6577 "Avago-certified optics to enable link "
6578 "operation - Link is%s operational",
6579 (operational) ? " not" : "");
6580 break;
6581 default:
6582
6583 sprintf(message, "Unknown event status x%02x", status);
6584 break;
6585 }
6586
6587
6588 rc = lpfc_sli4_read_config(phba);
6589 if (rc) {
6590 phba->lmt = 0;
6591 lpfc_printf_log(phba, KERN_ERR,
6592 LOG_TRACE_EVENT,
6593 "3194 Unable to retrieve supported "
6594 "speeds, rc = 0x%x\n", rc);
6595 }
6596 rc = lpfc_sli4_refresh_params(phba);
6597 if (rc) {
6598 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6599 "3174 Unable to update pls support, "
6600 "rc x%x\n", rc);
6601 }
6602 vports = lpfc_create_vport_work_array(phba);
6603 if (vports != NULL) {
6604 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6605 i++) {
6606 shost = lpfc_shost_from_vport(vports[i]);
6607 lpfc_host_supported_speeds_set(shost);
6608 }
6609 }
6610 lpfc_destroy_vport_work_array(phba, vports);
6611
6612 phba->sli4_hba.lnk_info.optic_state = status;
6613 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6614 "3176 Port Name %c %s\n", port_name, message);
6615 break;
6616 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6617 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6618 "3192 Remote DPort Test Initiated - "
6619 "Event Data1:x%08x Event Data2: x%08x\n",
6620 acqe_sli->event_data1, acqe_sli->event_data2);
6621 break;
6622 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6623
6624 lpfc_sli4_cgn_parm_chg_evt(phba);
6625 break;
6626 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6627
6628
6629
6630
6631
6632 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
6633 "2699 Misconfigured FA-PWWN - Attached device "
6634 "does not support FA-PWWN\n");
6635 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
6636 memset(phba->pport->fc_portname.u.wwn, 0,
6637 sizeof(struct lpfc_name));
6638 break;
6639 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6640
6641 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6642 "2518 EEPROM failure - "
6643 "Event Data1: x%08x Event Data2: x%08x\n",
6644 acqe_sli->event_data1, acqe_sli->event_data2);
6645 break;
6646 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6647 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6648 break;
6649 cgn_signal = (struct lpfc_acqe_cgn_signal *)
6650 &acqe_sli->event_data1;
6651 phba->cgn_acqe_cnt++;
6652
6653 cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6654 atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6655 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6656
6657
6658
6659
6660 if (cgn_signal->alarm_cnt) {
6661 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6662
6663 atomic_add(cgn_signal->alarm_cnt,
6664 &phba->cgn_sync_alarm_cnt);
6665 }
6666 } else if (cnt) {
6667
6668 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6669 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6670
6671 atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6672 }
6673 }
6674 break;
6675 default:
6676 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6677 "3193 Unrecognized SLI event, type: 0x%x",
6678 evt_type);
6679 break;
6680 }
6681 }
6682
6683
6684
6685
6686
6687
6688
6689
6690
6691
6692
6693 static struct lpfc_nodelist *
6694 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6695 {
6696 struct lpfc_nodelist *ndlp;
6697 struct Scsi_Host *shost;
6698 struct lpfc_hba *phba;
6699
6700 if (!vport)
6701 return NULL;
6702 phba = vport->phba;
6703 if (!phba)
6704 return NULL;
6705 ndlp = lpfc_findnode_did(vport, Fabric_DID);
6706 if (!ndlp) {
6707
6708 ndlp = lpfc_nlp_init(vport, Fabric_DID);
6709 if (!ndlp)
6710 return NULL;
6711
6712 ndlp->nlp_type |= NLP_FABRIC;
6713
6714 lpfc_enqueue_node(vport, ndlp);
6715 }
6716 if ((phba->pport->port_state < LPFC_FLOGI) &&
6717 (phba->pport->port_state != LPFC_VPORT_FAILED))
6718 return NULL;
6719
6720 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6721 && (vport->port_state != LPFC_VPORT_FAILED))
6722 return NULL;
6723 shost = lpfc_shost_from_vport(vport);
6724 if (!shost)
6725 return NULL;
6726 lpfc_linkdown_port(vport);
6727 lpfc_cleanup_pending_mbox(vport);
6728 spin_lock_irq(shost->host_lock);
6729 vport->fc_flag |= FC_VPORT_CVL_RCVD;
6730 spin_unlock_irq(shost->host_lock);
6731
6732 return ndlp;
6733 }
6734
6735
6736
6737
6738
6739
6740
6741
6742 static void
6743 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6744 {
6745 struct lpfc_vport **vports;
6746 int i;
6747
6748 vports = lpfc_create_vport_work_array(phba);
6749 if (vports)
6750 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6751 lpfc_sli4_perform_vport_cvl(vports[i]);
6752 lpfc_destroy_vport_work_array(phba, vports);
6753 }
6754
6755
6756
6757
6758
6759
6760
6761
6762 static void
6763 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6764 struct lpfc_acqe_fip *acqe_fip)
6765 {
6766 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6767 int rc;
6768 struct lpfc_vport *vport;
6769 struct lpfc_nodelist *ndlp;
6770 int active_vlink_present;
6771 struct lpfc_vport **vports;
6772 int i;
6773
6774 phba->fc_eventTag = acqe_fip->event_tag;
6775 phba->fcoe_eventtag = acqe_fip->event_tag;
6776 switch (event_type) {
6777 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6778 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6779 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6780 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6781 "2546 New FCF event, evt_tag:x%x, "
6782 "index:x%x\n",
6783 acqe_fip->event_tag,
6784 acqe_fip->index);
6785 else
6786 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6787 LOG_DISCOVERY,
6788 "2788 FCF param modified event, "
6789 "evt_tag:x%x, index:x%x\n",
6790 acqe_fip->event_tag,
6791 acqe_fip->index);
6792 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6793
6794
6795
6796
6797
6798 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6799 LOG_DISCOVERY,
6800 "2779 Read FCF (x%x) for updating "
6801 "roundrobin FCF failover bmask\n",
6802 acqe_fip->index);
6803 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6804 }
6805
6806
6807 spin_lock_irq(&phba->hbalock);
6808 if (phba->hba_flag & FCF_TS_INPROG) {
6809 spin_unlock_irq(&phba->hbalock);
6810 break;
6811 }
6812
6813 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6814 spin_unlock_irq(&phba->hbalock);
6815 break;
6816 }
6817
6818
6819 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6820 spin_unlock_irq(&phba->hbalock);
6821 break;
6822 }
6823 spin_unlock_irq(&phba->hbalock);
6824
6825
6826 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6827 "2770 Start FCF table scan per async FCF "
6828 "event, evt_tag:x%x, index:x%x\n",
6829 acqe_fip->event_tag, acqe_fip->index);
6830 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6831 LPFC_FCOE_FCF_GET_FIRST);
6832 if (rc)
6833 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6834 "2547 Issue FCF scan read FCF mailbox "
6835 "command failed (x%x)\n", rc);
6836 break;
6837
6838 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6839 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6840 "2548 FCF Table full count 0x%x tag 0x%x\n",
6841 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6842 acqe_fip->event_tag);
6843 break;
6844
6845 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6846 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6847 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6848 "2549 FCF (x%x) disconnected from network, "
6849 "tag:x%x\n", acqe_fip->index,
6850 acqe_fip->event_tag);
6851
6852
6853
6854
6855 spin_lock_irq(&phba->hbalock);
6856 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6857 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6858 spin_unlock_irq(&phba->hbalock);
6859
6860 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6861 break;
6862 }
6863 spin_unlock_irq(&phba->hbalock);
6864
6865
6866 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6867 break;
6868
6869
6870
6871
6872
6873
6874
6875 spin_lock_irq(&phba->hbalock);
6876
6877 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6878 spin_unlock_irq(&phba->hbalock);
6879
6880 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6881 "2771 Start FCF fast failover process due to "
6882 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6883 "\n", acqe_fip->event_tag, acqe_fip->index);
6884 rc = lpfc_sli4_redisc_fcf_table(phba);
6885 if (rc) {
6886 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6887 LOG_TRACE_EVENT,
6888 "2772 Issue FCF rediscover mailbox "
6889 "command failed, fail through to FCF "
6890 "dead event\n");
6891 spin_lock_irq(&phba->hbalock);
6892 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6893 spin_unlock_irq(&phba->hbalock);
6894
6895
6896
6897
6898 lpfc_sli4_fcf_dead_failthrough(phba);
6899 } else {
6900
6901 lpfc_sli4_clear_fcf_rr_bmask(phba);
6902
6903
6904
6905
6906 lpfc_sli4_perform_all_vport_cvl(phba);
6907 }
6908 break;
6909 case LPFC_FIP_EVENT_TYPE_CVL:
6910 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6911 lpfc_printf_log(phba, KERN_ERR,
6912 LOG_TRACE_EVENT,
6913 "2718 Clear Virtual Link Received for VPI 0x%x"
6914 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6915
6916 vport = lpfc_find_vport_by_vpid(phba,
6917 acqe_fip->index);
6918 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6919 if (!ndlp)
6920 break;
6921 active_vlink_present = 0;
6922
6923 vports = lpfc_create_vport_work_array(phba);
6924 if (vports) {
6925 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6926 i++) {
6927 if ((!(vports[i]->fc_flag &
6928 FC_VPORT_CVL_RCVD)) &&
6929 (vports[i]->port_state > LPFC_FDISC)) {
6930 active_vlink_present = 1;
6931 break;
6932 }
6933 }
6934 lpfc_destroy_vport_work_array(phba, vports);
6935 }
6936
6937
6938
6939
6940
6941
6942 if (!(vport->load_flag & FC_UNLOADING) &&
6943 active_vlink_present) {
6944
6945
6946
6947
6948 mod_timer(&ndlp->nlp_delayfunc,
6949 jiffies + msecs_to_jiffies(1000));
6950 spin_lock_irq(&ndlp->lock);
6951 ndlp->nlp_flag |= NLP_DELAY_TMO;
6952 spin_unlock_irq(&ndlp->lock);
6953 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6954 vport->port_state = LPFC_FDISC;
6955 } else {
6956
6957
6958
6959
6960
6961
6962
6963 spin_lock_irq(&phba->hbalock);
6964 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6965 spin_unlock_irq(&phba->hbalock);
6966 break;
6967 }
6968
6969 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6970 spin_unlock_irq(&phba->hbalock);
6971 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6972 LOG_DISCOVERY,
6973 "2773 Start FCF failover per CVL, "
6974 "evt_tag:x%x\n", acqe_fip->event_tag);
6975 rc = lpfc_sli4_redisc_fcf_table(phba);
6976 if (rc) {
6977 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6978 LOG_TRACE_EVENT,
6979 "2774 Issue FCF rediscover "
6980 "mailbox command failed, "
6981 "through to CVL event\n");
6982 spin_lock_irq(&phba->hbalock);
6983 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6984 spin_unlock_irq(&phba->hbalock);
6985
6986
6987
6988
6989 lpfc_retry_pport_discovery(phba);
6990 } else
6991
6992
6993
6994
6995 lpfc_sli4_clear_fcf_rr_bmask(phba);
6996 }
6997 break;
6998 default:
6999 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7000 "0288 Unknown FCoE event type 0x%x event tag "
7001 "0x%x\n", event_type, acqe_fip->event_tag);
7002 break;
7003 }
7004 }
7005
7006
7007
7008
7009
7010
7011
7012
7013 static void
7014 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
7015 struct lpfc_acqe_dcbx *acqe_dcbx)
7016 {
7017 phba->fc_eventTag = acqe_dcbx->event_tag;
7018 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7019 "0290 The SLI4 DCBX asynchronous event is not "
7020 "handled yet\n");
7021 }
7022
7023
7024
7025
7026
7027
7028
7029
7030
7031
7032 static void
7033 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
7034 struct lpfc_acqe_grp5 *acqe_grp5)
7035 {
7036 uint16_t prev_ll_spd;
7037
7038 phba->fc_eventTag = acqe_grp5->event_tag;
7039 phba->fcoe_eventtag = acqe_grp5->event_tag;
7040 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
7041 phba->sli4_hba.link_state.logical_speed =
7042 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
7043 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7044 "2789 GRP5 Async Event: Updating logical link speed "
7045 "from %dMbps to %dMbps\n", prev_ll_spd,
7046 phba->sli4_hba.link_state.logical_speed);
7047 }
7048
7049
7050
7051
7052
7053
7054
7055
7056 static void
7057 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
7058 {
7059 if (!phba->cgn_i)
7060 return;
7061 lpfc_init_congestion_stat(phba);
7062 }
7063
7064
7065
7066
7067
7068
7069
7070
7071
7072 static void
7073 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
7074 {
7075 spin_lock_irq(&phba->hbalock);
7076
7077 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
7078 LPFC_CFG_MONITOR)) {
7079 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
7080 "6225 CMF mode param out of range: %d\n",
7081 p_cfg_param->cgn_param_mode);
7082 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
7083 }
7084
7085 spin_unlock_irq(&phba->hbalock);
7086 }
7087
7088
7089
7090
7091
7092
7093
7094
7095
7096
7097
7098
7099
7100
7101 static void
7102 lpfc_cgn_params_parse(struct lpfc_hba *phba,
7103 struct lpfc_cgn_param *p_cgn_param, uint32_t len)
7104 {
7105 struct lpfc_cgn_info *cp;
7106 uint32_t crc, oldmode;
7107
7108
7109
7110
7111 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
7112 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7113 "4668 FW cgn parm buffer data: "
7114 "magic 0x%x version %d mode %d "
7115 "level0 %d level1 %d "
7116 "level2 %d byte13 %d "
7117 "byte14 %d byte15 %d "
7118 "byte11 %d byte12 %d activeMode %d\n",
7119 p_cgn_param->cgn_param_magic,
7120 p_cgn_param->cgn_param_version,
7121 p_cgn_param->cgn_param_mode,
7122 p_cgn_param->cgn_param_level0,
7123 p_cgn_param->cgn_param_level1,
7124 p_cgn_param->cgn_param_level2,
7125 p_cgn_param->byte13,
7126 p_cgn_param->byte14,
7127 p_cgn_param->byte15,
7128 p_cgn_param->byte11,
7129 p_cgn_param->byte12,
7130 phba->cmf_active_mode);
7131
7132 oldmode = phba->cmf_active_mode;
7133
7134
7135
7136
7137 lpfc_cgn_params_val(phba, p_cgn_param);
7138
7139
7140 spin_lock_irq(&phba->hbalock);
7141 memcpy(&phba->cgn_p, p_cgn_param,
7142 sizeof(struct lpfc_cgn_param));
7143
7144
7145 if (phba->cgn_i) {
7146 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
7147 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
7148 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
7149 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
7150 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
7151 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
7152 LPFC_CGN_CRC32_SEED);
7153 cp->cgn_info_crc = cpu_to_le32(crc);
7154 }
7155 spin_unlock_irq(&phba->hbalock);
7156
7157 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
7158
7159 switch (oldmode) {
7160 case LPFC_CFG_OFF:
7161 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
7162
7163 lpfc_cmf_start(phba);
7164
7165 if (phba->link_state >= LPFC_LINK_UP) {
7166 phba->cgn_reg_fpin =
7167 phba->cgn_init_reg_fpin;
7168 phba->cgn_reg_signal =
7169 phba->cgn_init_reg_signal;
7170 lpfc_issue_els_edc(phba->pport, 0);
7171 }
7172 }
7173 break;
7174 case LPFC_CFG_MANAGED:
7175 switch (phba->cgn_p.cgn_param_mode) {
7176 case LPFC_CFG_OFF:
7177
7178 lpfc_cmf_stop(phba);
7179 if (phba->link_state >= LPFC_LINK_UP)
7180 lpfc_issue_els_edc(phba->pport, 0);
7181 break;
7182 case LPFC_CFG_MONITOR:
7183 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7184 "4661 Switch from MANAGED to "
7185 "`MONITOR mode\n");
7186 phba->cmf_max_bytes_per_interval =
7187 phba->cmf_link_byte_count;
7188
7189
7190 queue_work(phba->wq,
7191 &phba->unblock_request_work);
7192 break;
7193 }
7194 break;
7195 case LPFC_CFG_MONITOR:
7196 switch (phba->cgn_p.cgn_param_mode) {
7197 case LPFC_CFG_OFF:
7198
7199 lpfc_cmf_stop(phba);
7200 if (phba->link_state >= LPFC_LINK_UP)
7201 lpfc_issue_els_edc(phba->pport, 0);
7202 break;
7203 case LPFC_CFG_MANAGED:
7204 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7205 "4662 Switch from MONITOR to "
7206 "MANAGED mode\n");
7207 lpfc_cmf_signal_init(phba);
7208 break;
7209 }
7210 break;
7211 }
7212 } else {
7213 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7214 "4669 FW cgn parm buf wrong magic 0x%x "
7215 "version %d\n", p_cgn_param->cgn_param_magic,
7216 p_cgn_param->cgn_param_version);
7217 }
7218 }
7219
7220
7221
7222
7223
7224
7225
7226
7227
7228
7229
7230
7231
7232
7233 int
7234 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7235 {
7236 int ret = 0;
7237 struct lpfc_cgn_param *p_cgn_param = NULL;
7238 u32 *pdata = NULL;
7239 u32 len = 0;
7240
7241
7242 len = sizeof(struct lpfc_cgn_param);
7243 pdata = kzalloc(len, GFP_KERNEL);
7244 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7245 pdata, len);
7246
7247
7248
7249
7250 if (!ret) {
7251 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7252 "4670 CGN RD OBJ returns no data\n");
7253 goto rd_obj_err;
7254 } else if (ret < 0) {
7255
7256 goto rd_obj_err;
7257 }
7258
7259 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7260 "6234 READ CGN PARAMS Successful %d\n", len);
7261
7262
7263
7264
7265
7266 p_cgn_param = (struct lpfc_cgn_param *)pdata;
7267 lpfc_cgn_params_parse(phba, p_cgn_param, len);
7268
7269 rd_obj_err:
7270 kfree(pdata);
7271 return ret;
7272 }
7273
7274
7275
7276
7277
7278
7279
7280
7281
7282
7283
7284
7285
7286
7287
7288
7289
7290
7291
7292 static int
7293 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7294 {
7295 int ret = 0;
7296
7297 if (!phba->sli4_hba.pc_sli4_params.cmf) {
7298 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7299 "4664 Cgn Evt when E2E off. Drop event\n");
7300 return -EACCES;
7301 }
7302
7303
7304
7305
7306
7307 ret = lpfc_sli4_cgn_params_read(phba);
7308 if (ret < 0) {
7309 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7310 "4667 Error reading Cgn Params (%d)\n",
7311 ret);
7312 } else if (!ret) {
7313 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7314 "4673 CGN Event empty object.\n");
7315 }
7316 return ret;
7317 }
7318
7319
7320
7321
7322
7323
7324
7325
7326 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7327 {
7328 struct lpfc_cq_event *cq_event;
7329 unsigned long iflags;
7330
7331
7332 spin_lock_irqsave(&phba->hbalock, iflags);
7333 phba->hba_flag &= ~ASYNC_EVENT;
7334 spin_unlock_irqrestore(&phba->hbalock, iflags);
7335
7336
7337 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7338 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7339 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7340 cq_event, struct lpfc_cq_event, list);
7341 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7342 iflags);
7343
7344
7345 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7346 case LPFC_TRAILER_CODE_LINK:
7347 lpfc_sli4_async_link_evt(phba,
7348 &cq_event->cqe.acqe_link);
7349 break;
7350 case LPFC_TRAILER_CODE_FCOE:
7351 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7352 break;
7353 case LPFC_TRAILER_CODE_DCBX:
7354 lpfc_sli4_async_dcbx_evt(phba,
7355 &cq_event->cqe.acqe_dcbx);
7356 break;
7357 case LPFC_TRAILER_CODE_GRP5:
7358 lpfc_sli4_async_grp5_evt(phba,
7359 &cq_event->cqe.acqe_grp5);
7360 break;
7361 case LPFC_TRAILER_CODE_FC:
7362 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7363 break;
7364 case LPFC_TRAILER_CODE_SLI:
7365 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7366 break;
7367 case LPFC_TRAILER_CODE_CMSTAT:
7368 lpfc_sli4_async_cmstat_evt(phba);
7369 break;
7370 default:
7371 lpfc_printf_log(phba, KERN_ERR,
7372 LOG_TRACE_EVENT,
7373 "1804 Invalid asynchronous event code: "
7374 "x%x\n", bf_get(lpfc_trailer_code,
7375 &cq_event->cqe.mcqe_cmpl));
7376 break;
7377 }
7378
7379
7380 lpfc_sli4_cq_event_release(phba, cq_event);
7381 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7382 }
7383 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7384 }
7385
7386
7387
7388
7389
7390
7391
7392
7393 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7394 {
7395 int rc;
7396
7397 spin_lock_irq(&phba->hbalock);
7398
7399 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7400
7401 phba->fcf.failover_rec.flag = 0;
7402
7403 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7404 spin_unlock_irq(&phba->hbalock);
7405
7406
7407 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7408 "2777 Start post-quiescent FCF table scan\n");
7409 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7410 if (rc)
7411 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7412 "2747 Issue FCF scan read FCF mailbox "
7413 "command failed 0x%x\n", rc);
7414 }
7415
7416
7417
7418
7419
7420
7421
7422
7423
7424
7425
7426 int
7427 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7428 {
7429 int rc;
7430
7431
7432 phba->pci_dev_grp = dev_grp;
7433
7434
7435 if (dev_grp == LPFC_PCI_DEV_OC)
7436 phba->sli_rev = LPFC_SLI_REV4;
7437
7438
7439 rc = lpfc_init_api_table_setup(phba, dev_grp);
7440 if (rc)
7441 return -ENODEV;
7442
7443 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7444 if (rc)
7445 return -ENODEV;
7446
7447 rc = lpfc_sli_api_table_setup(phba, dev_grp);
7448 if (rc)
7449 return -ENODEV;
7450
7451 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7452 if (rc)
7453 return -ENODEV;
7454
7455 return 0;
7456 }
7457
7458
7459
7460
7461
7462
7463
7464
7465
7466 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7467 {
7468 switch (intr_mode) {
7469 case 0:
7470 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7471 "0470 Enable INTx interrupt mode.\n");
7472 break;
7473 case 1:
7474 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7475 "0481 Enabled MSI interrupt mode.\n");
7476 break;
7477 case 2:
7478 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7479 "0480 Enabled MSI-X interrupt mode.\n");
7480 break;
7481 default:
7482 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7483 "0482 Illegal interrupt mode.\n");
7484 break;
7485 }
7486 return;
7487 }
7488
7489
7490
7491
7492
7493
7494
7495
7496
7497
7498
7499
7500 static int
7501 lpfc_enable_pci_dev(struct lpfc_hba *phba)
7502 {
7503 struct pci_dev *pdev;
7504
7505
7506 if (!phba->pcidev)
7507 goto out_error;
7508 else
7509 pdev = phba->pcidev;
7510
7511 if (pci_enable_device_mem(pdev))
7512 goto out_error;
7513
7514 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7515 goto out_disable_device;
7516
7517 pci_set_master(pdev);
7518 pci_try_set_mwi(pdev);
7519 pci_save_state(pdev);
7520
7521
7522 if (pci_is_pcie(pdev))
7523 pdev->needs_freset = 1;
7524
7525 return 0;
7526
7527 out_disable_device:
7528 pci_disable_device(pdev);
7529 out_error:
7530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7531 "1401 Failed to enable pci device\n");
7532 return -ENODEV;
7533 }
7534
7535
7536
7537
7538
7539
7540
7541
7542 static void
7543 lpfc_disable_pci_dev(struct lpfc_hba *phba)
7544 {
7545 struct pci_dev *pdev;
7546
7547
7548 if (!phba->pcidev)
7549 return;
7550 else
7551 pdev = phba->pcidev;
7552
7553 pci_release_mem_regions(pdev);
7554 pci_disable_device(pdev);
7555
7556 return;
7557 }
7558
7559
7560
7561
7562
7563
7564
7565
7566
7567
7568 void
7569 lpfc_reset_hba(struct lpfc_hba *phba)
7570 {
7571
7572 if (!phba->cfg_enable_hba_reset) {
7573 phba->link_state = LPFC_HBA_ERROR;
7574 return;
7575 }
7576
7577
7578 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7579 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7580 } else {
7581 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7582 lpfc_sli_flush_io_rings(phba);
7583 }
7584 lpfc_offline(phba);
7585 lpfc_sli_brdrestart(phba);
7586 lpfc_online(phba);
7587 lpfc_unblock_mgmt_io(phba);
7588 }
7589
7590
7591
7592
7593
7594
7595
7596
7597
7598
7599
7600 uint16_t
7601 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7602 {
7603 struct pci_dev *pdev = phba->pcidev;
7604 uint16_t nr_virtfn;
7605 int pos;
7606
7607 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7608 if (pos == 0)
7609 return 0;
7610
7611 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7612 return nr_virtfn;
7613 }
7614
7615
7616
7617
7618
7619
7620
7621
7622
7623
7624
7625
7626 int
7627 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7628 {
7629 struct pci_dev *pdev = phba->pcidev;
7630 uint16_t max_nr_vfn;
7631 int rc;
7632
7633 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7634 if (nr_vfn > max_nr_vfn) {
7635 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7636 "3057 Requested vfs (%d) greater than "
7637 "supported vfs (%d)", nr_vfn, max_nr_vfn);
7638 return -EINVAL;
7639 }
7640
7641 rc = pci_enable_sriov(pdev, nr_vfn);
7642 if (rc) {
7643 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7644 "2806 Failed to enable sriov on this device "
7645 "with vfn number nr_vf:%d, rc:%d\n",
7646 nr_vfn, rc);
7647 } else
7648 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7649 "2807 Successful enable sriov on this device "
7650 "with vfn number nr_vf:%d\n", nr_vfn);
7651 return rc;
7652 }
7653
7654 static void
7655 lpfc_unblock_requests_work(struct work_struct *work)
7656 {
7657 struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7658 unblock_request_work);
7659
7660 lpfc_unblock_requests(phba);
7661 }
7662
7663
7664
7665
7666
7667
7668
7669
7670
7671
7672
7673
7674 static int
7675 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7676 {
7677 struct lpfc_sli *psli = &phba->sli;
7678
7679
7680
7681
7682 atomic_set(&phba->fast_event_count, 0);
7683 atomic_set(&phba->dbg_log_idx, 0);
7684 atomic_set(&phba->dbg_log_cnt, 0);
7685 atomic_set(&phba->dbg_log_dmping, 0);
7686 spin_lock_init(&phba->hbalock);
7687
7688
7689 spin_lock_init(&phba->port_list_lock);
7690 INIT_LIST_HEAD(&phba->port_list);
7691
7692 INIT_LIST_HEAD(&phba->work_list);
7693
7694
7695 init_waitqueue_head(&phba->work_waitq);
7696
7697 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7698 "1403 Protocols supported %s %s %s\n",
7699 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7700 "SCSI" : " "),
7701 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7702 "NVME" : " "),
7703 (phba->nvmet_support ? "NVMET" : " "));
7704
7705
7706 spin_lock_init(&phba->scsi_buf_list_get_lock);
7707 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7708 spin_lock_init(&phba->scsi_buf_list_put_lock);
7709 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7710
7711
7712 INIT_LIST_HEAD(&phba->fabric_iocb_list);
7713
7714
7715 INIT_LIST_HEAD(&phba->elsbuf);
7716
7717
7718 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7719
7720
7721 spin_lock_init(&phba->devicelock);
7722 INIT_LIST_HEAD(&phba->luns);
7723
7724
7725 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7726
7727 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7728
7729 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7730
7731 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7732
7733 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7734
7735 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7736 lpfc_idle_stat_delay_work);
7737 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7738 return 0;
7739 }
7740
7741
7742
7743
7744
7745
7746
7747
7748
7749
7750
7751
7752 static int
7753 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7754 {
7755 int rc, entry_sz;
7756
7757
7758
7759
7760
7761
7762 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7763
7764
7765 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7766 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7767
7768
7769 lpfc_get_cfgparam(phba);
7770
7771
7772 rc = lpfc_setup_driver_resource_phase1(phba);
7773 if (rc)
7774 return -ENODEV;
7775
7776 if (!phba->sli.sli3_ring)
7777 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7778 sizeof(struct lpfc_sli_ring),
7779 GFP_KERNEL);
7780 if (!phba->sli.sli3_ring)
7781 return -ENOMEM;
7782
7783
7784
7785
7786
7787
7788 if (phba->sli_rev == LPFC_SLI_REV4)
7789 entry_sz = sizeof(struct sli4_sge);
7790 else
7791 entry_sz = sizeof(struct ulp_bde64);
7792
7793
7794 if (phba->cfg_enable_bg) {
7795
7796
7797
7798
7799
7800
7801
7802
7803
7804 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7805 sizeof(struct fcp_rsp) +
7806 (LPFC_MAX_SG_SEG_CNT * entry_sz);
7807
7808 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7809 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7810
7811
7812 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7813 } else {
7814
7815
7816
7817
7818
7819 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7820 sizeof(struct fcp_rsp) +
7821 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7822
7823
7824 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7825 }
7826
7827 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7828 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7829 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7830 phba->cfg_total_seg_cnt);
7831
7832 phba->max_vpi = LPFC_MAX_VPI;
7833
7834 phba->max_vports = 0;
7835
7836
7837
7838
7839 lpfc_sli_setup(phba);
7840 lpfc_sli_queue_init(phba);
7841
7842
7843 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7844 return -ENOMEM;
7845
7846 phba->lpfc_sg_dma_buf_pool =
7847 dma_pool_create("lpfc_sg_dma_buf_pool",
7848 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7849 BPL_ALIGN_SZ, 0);
7850
7851 if (!phba->lpfc_sg_dma_buf_pool)
7852 goto fail_free_mem;
7853
7854 phba->lpfc_cmd_rsp_buf_pool =
7855 dma_pool_create("lpfc_cmd_rsp_buf_pool",
7856 &phba->pcidev->dev,
7857 sizeof(struct fcp_cmnd) +
7858 sizeof(struct fcp_rsp),
7859 BPL_ALIGN_SZ, 0);
7860
7861 if (!phba->lpfc_cmd_rsp_buf_pool)
7862 goto fail_free_dma_buf_pool;
7863
7864
7865
7866
7867
7868 if (phba->cfg_sriov_nr_virtfn > 0) {
7869 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7870 phba->cfg_sriov_nr_virtfn);
7871 if (rc) {
7872 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7873 "2808 Requested number of SR-IOV "
7874 "virtual functions (%d) is not "
7875 "supported\n",
7876 phba->cfg_sriov_nr_virtfn);
7877 phba->cfg_sriov_nr_virtfn = 0;
7878 }
7879 }
7880
7881 return 0;
7882
7883 fail_free_dma_buf_pool:
7884 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7885 phba->lpfc_sg_dma_buf_pool = NULL;
7886 fail_free_mem:
7887 lpfc_mem_free(phba);
7888 return -ENOMEM;
7889 }
7890
7891
7892
7893
7894
7895
7896
7897
7898 static void
7899 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7900 {
7901
7902 lpfc_mem_free_all(phba);
7903
7904 return;
7905 }
7906
7907
7908
7909
7910
7911
7912
7913
7914
7915
7916
7917
7918 static int
7919 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7920 {
7921 LPFC_MBOXQ_t *mboxq;
7922 MAILBOX_t *mb;
7923 int rc, i, max_buf_size;
7924 int longs;
7925 int extra;
7926 uint64_t wwn;
7927 u32 if_type;
7928 u32 if_fam;
7929
7930 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7931 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7932 phba->sli4_hba.curr_disp_cpu = 0;
7933
7934
7935 lpfc_get_cfgparam(phba);
7936
7937
7938 rc = lpfc_setup_driver_resource_phase1(phba);
7939 if (rc)
7940 return -ENODEV;
7941
7942
7943 rc = lpfc_sli4_post_status_check(phba);
7944 if (rc)
7945 return -ENODEV;
7946
7947
7948
7949
7950 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7951 if (!phba->wq)
7952 return -ENOMEM;
7953
7954
7955
7956
7957
7958 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7959
7960
7961 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7962
7963
7964 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7965 phba->cmf_timer.function = lpfc_cmf_timer;
7966
7967
7968
7969
7970
7971 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7972 sizeof(struct lpfc_mbox_ext_buf_ctx));
7973 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7974
7975 phba->max_vpi = LPFC_MAX_VPI;
7976
7977
7978 phba->max_vports = 0;
7979
7980
7981 phba->valid_vlan = 0;
7982 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7983 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7984 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7985
7986
7987
7988
7989
7990
7991
7992
7993 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
7994 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
7995 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
7996
7997
7998 if (lpfc_is_vmid_enabled(phba))
7999 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
8000
8001
8002
8003
8004
8005 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
8006 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
8007
8008 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8009
8010 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
8011 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8012 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
8013 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
8014 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
8015 }
8016
8017
8018 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
8019 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
8020 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
8021 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
8022
8023
8024
8025
8026
8027
8028 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
8029
8030 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
8031
8032 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
8033
8034 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
8035
8036 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
8037
8038
8039 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
8040 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
8041 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
8042 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
8043
8044
8045
8046
8047 INIT_LIST_HEAD(&phba->sli.mboxq);
8048 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
8049
8050
8051 phba->sli4_hba.lnk_info.optic_state = 0xff;
8052
8053
8054 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
8055 if (rc)
8056 goto out_destroy_workqueue;
8057
8058
8059 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
8060 LPFC_SLI_INTF_IF_TYPE_2) {
8061 rc = lpfc_pci_function_reset(phba);
8062 if (unlikely(rc)) {
8063 rc = -ENODEV;
8064 goto out_free_mem;
8065 }
8066 phba->temp_sensor_support = 1;
8067 }
8068
8069
8070 rc = lpfc_create_bootstrap_mbox(phba);
8071 if (unlikely(rc))
8072 goto out_free_mem;
8073
8074
8075 rc = lpfc_setup_endian_order(phba);
8076 if (unlikely(rc))
8077 goto out_free_bsmbx;
8078
8079
8080 rc = lpfc_sli4_read_config(phba);
8081 if (unlikely(rc))
8082 goto out_free_bsmbx;
8083
8084 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
8085
8086
8087
8088
8089
8090
8091
8092 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
8093 }
8094
8095 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
8096 if (unlikely(rc))
8097 goto out_free_bsmbx;
8098
8099
8100 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8101 LPFC_SLI_INTF_IF_TYPE_0) {
8102 rc = lpfc_pci_function_reset(phba);
8103 if (unlikely(rc))
8104 goto out_free_bsmbx;
8105 }
8106
8107 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8108 GFP_KERNEL);
8109 if (!mboxq) {
8110 rc = -ENOMEM;
8111 goto out_free_bsmbx;
8112 }
8113
8114
8115 phba->nvmet_support = 0;
8116 if (lpfc_enable_nvmet_cnt) {
8117
8118
8119 lpfc_read_nv(phba, mboxq);
8120 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8121 if (rc != MBX_SUCCESS) {
8122 lpfc_printf_log(phba, KERN_ERR,
8123 LOG_TRACE_EVENT,
8124 "6016 Mailbox failed , mbxCmd x%x "
8125 "READ_NV, mbxStatus x%x\n",
8126 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8127 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
8128 mempool_free(mboxq, phba->mbox_mem_pool);
8129 rc = -EIO;
8130 goto out_free_bsmbx;
8131 }
8132 mb = &mboxq->u.mb;
8133 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
8134 sizeof(uint64_t));
8135 wwn = cpu_to_be64(wwn);
8136 phba->sli4_hba.wwnn.u.name = wwn;
8137 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
8138 sizeof(uint64_t));
8139
8140 wwn = cpu_to_be64(wwn);
8141 phba->sli4_hba.wwpn.u.name = wwn;
8142
8143
8144 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
8145 if (wwn == lpfc_enable_nvmet[i]) {
8146 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
8147 if (lpfc_nvmet_mem_alloc(phba))
8148 break;
8149
8150 phba->nvmet_support = 1;
8151
8152 lpfc_printf_log(phba, KERN_ERR,
8153 LOG_TRACE_EVENT,
8154 "6017 NVME Target %016llx\n",
8155 wwn);
8156 #else
8157 lpfc_printf_log(phba, KERN_ERR,
8158 LOG_TRACE_EVENT,
8159 "6021 Can't enable NVME Target."
8160 " NVME_TARGET_FC infrastructure"
8161 " is not in kernel\n");
8162 #endif
8163
8164 phba->cfg_xri_rebalancing = 0;
8165 if (phba->irq_chann_mode == NHT_MODE) {
8166 phba->cfg_irq_chann =
8167 phba->sli4_hba.num_present_cpu;
8168 phba->cfg_hdw_queue =
8169 phba->sli4_hba.num_present_cpu;
8170 phba->irq_chann_mode = NORMAL_MODE;
8171 }
8172 break;
8173 }
8174 }
8175 }
8176
8177 lpfc_nvme_mod_param_dep(phba);
8178
8179
8180
8181
8182
8183
8184 rc = lpfc_get_sli4_parameters(phba, mboxq);
8185 if (rc) {
8186 if_type = bf_get(lpfc_sli_intf_if_type,
8187 &phba->sli4_hba.sli_intf);
8188 if_fam = bf_get(lpfc_sli_intf_sli_family,
8189 &phba->sli4_hba.sli_intf);
8190 if (phba->sli4_hba.extents_in_use &&
8191 phba->sli4_hba.rpi_hdrs_in_use) {
8192 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8193 "2999 Unsupported SLI4 Parameters "
8194 "Extents and RPI headers enabled.\n");
8195 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8196 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
8197 mempool_free(mboxq, phba->mbox_mem_pool);
8198 rc = -EIO;
8199 goto out_free_bsmbx;
8200 }
8201 }
8202 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8203 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
8204 mempool_free(mboxq, phba->mbox_mem_pool);
8205 rc = -EIO;
8206 goto out_free_bsmbx;
8207 }
8208 }
8209
8210
8211
8212
8213
8214 extra = 2;
8215 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8216 extra++;
8217
8218
8219
8220
8221
8222
8223 max_buf_size = (2 * SLI4_PAGE_SIZE);
8224
8225
8226
8227
8228
8229 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8230
8231
8232
8233
8234
8235
8236
8237
8238
8239
8240
8241 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8242 sizeof(struct fcp_rsp) + max_buf_size;
8243
8244
8245 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8246
8247
8248
8249
8250
8251 if (phba->cfg_enable_bg &&
8252 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8253 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8254 else
8255 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8256
8257 } else {
8258
8259
8260
8261
8262
8263 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8264 sizeof(struct fcp_rsp) +
8265 ((phba->cfg_sg_seg_cnt + extra) *
8266 sizeof(struct sli4_sge));
8267
8268
8269 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8270 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8271
8272
8273
8274
8275
8276 }
8277
8278 if (phba->cfg_xpsgl && !phba->nvmet_support)
8279 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8280 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
8281 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8282 else
8283 phba->cfg_sg_dma_buf_size =
8284 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8285
8286 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8287 sizeof(struct sli4_sge);
8288
8289
8290 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8291 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8292 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8293 "6300 Reducing NVME sg segment "
8294 "cnt to %d\n",
8295 LPFC_MAX_NVME_SEG_CNT);
8296 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8297 } else
8298 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8299 }
8300
8301 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8302 "9087 sg_seg_cnt:%d dmabuf_size:%d "
8303 "total:%d scsi:%d nvme:%d\n",
8304 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8305 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
8306 phba->cfg_nvme_seg_cnt);
8307
8308 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8309 i = phba->cfg_sg_dma_buf_size;
8310 else
8311 i = SLI4_PAGE_SIZE;
8312
8313 phba->lpfc_sg_dma_buf_pool =
8314 dma_pool_create("lpfc_sg_dma_buf_pool",
8315 &phba->pcidev->dev,
8316 phba->cfg_sg_dma_buf_size,
8317 i, 0);
8318 if (!phba->lpfc_sg_dma_buf_pool)
8319 goto out_free_bsmbx;
8320
8321 phba->lpfc_cmd_rsp_buf_pool =
8322 dma_pool_create("lpfc_cmd_rsp_buf_pool",
8323 &phba->pcidev->dev,
8324 sizeof(struct fcp_cmnd) +
8325 sizeof(struct fcp_rsp),
8326 i, 0);
8327 if (!phba->lpfc_cmd_rsp_buf_pool)
8328 goto out_free_sg_dma_buf;
8329
8330 mempool_free(mboxq, phba->mbox_mem_pool);
8331
8332
8333 lpfc_sli4_oas_verify(phba);
8334
8335
8336 lpfc_sli4_ras_init(phba);
8337
8338
8339 rc = lpfc_sli4_queue_verify(phba);
8340 if (rc)
8341 goto out_free_cmd_rsp_buf;
8342
8343
8344 rc = lpfc_sli4_cq_event_pool_create(phba);
8345 if (rc)
8346 goto out_free_cmd_rsp_buf;
8347
8348
8349 lpfc_init_sgl_list(phba);
8350
8351
8352 rc = lpfc_init_active_sgl_array(phba);
8353 if (rc) {
8354 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8355 "1430 Failed to initialize sgl list.\n");
8356 goto out_destroy_cq_event_pool;
8357 }
8358 rc = lpfc_sli4_init_rpi_hdrs(phba);
8359 if (rc) {
8360 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8361 "1432 Failed to initialize rpi headers.\n");
8362 goto out_free_active_sgl;
8363 }
8364
8365
8366 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8367 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8368 GFP_KERNEL);
8369 if (!phba->fcf.fcf_rr_bmask) {
8370 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8371 "2759 Failed allocate memory for FCF round "
8372 "robin failover bmask\n");
8373 rc = -ENOMEM;
8374 goto out_remove_rpi_hdrs;
8375 }
8376
8377 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8378 sizeof(struct lpfc_hba_eq_hdl),
8379 GFP_KERNEL);
8380 if (!phba->sli4_hba.hba_eq_hdl) {
8381 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8382 "2572 Failed allocate memory for "
8383 "fast-path per-EQ handle array\n");
8384 rc = -ENOMEM;
8385 goto out_free_fcf_rr_bmask;
8386 }
8387
8388 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8389 sizeof(struct lpfc_vector_map_info),
8390 GFP_KERNEL);
8391 if (!phba->sli4_hba.cpu_map) {
8392 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8393 "3327 Failed allocate memory for msi-x "
8394 "interrupt vector mapping\n");
8395 rc = -ENOMEM;
8396 goto out_free_hba_eq_hdl;
8397 }
8398
8399 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8400 if (!phba->sli4_hba.eq_info) {
8401 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8402 "3321 Failed allocation for per_cpu stats\n");
8403 rc = -ENOMEM;
8404 goto out_free_hba_cpu_map;
8405 }
8406
8407 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8408 sizeof(*phba->sli4_hba.idle_stat),
8409 GFP_KERNEL);
8410 if (!phba->sli4_hba.idle_stat) {
8411 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8412 "3390 Failed allocation for idle_stat\n");
8413 rc = -ENOMEM;
8414 goto out_free_hba_eq_info;
8415 }
8416
8417 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8418 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8419 if (!phba->sli4_hba.c_stat) {
8420 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8421 "3332 Failed allocating per cpu hdwq stats\n");
8422 rc = -ENOMEM;
8423 goto out_free_hba_idle_stat;
8424 }
8425 #endif
8426
8427 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8428 if (!phba->cmf_stat) {
8429 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8430 "3331 Failed allocating per cpu cgn stats\n");
8431 rc = -ENOMEM;
8432 goto out_free_hba_hdwq_info;
8433 }
8434
8435
8436
8437
8438
8439 if (phba->cfg_sriov_nr_virtfn > 0) {
8440 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8441 phba->cfg_sriov_nr_virtfn);
8442 if (rc) {
8443 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8444 "3020 Requested number of SR-IOV "
8445 "virtual functions (%d) is not "
8446 "supported\n",
8447 phba->cfg_sriov_nr_virtfn);
8448 phba->cfg_sriov_nr_virtfn = 0;
8449 }
8450 }
8451
8452 return 0;
8453
8454 out_free_hba_hdwq_info:
8455 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8456 free_percpu(phba->sli4_hba.c_stat);
8457 out_free_hba_idle_stat:
8458 #endif
8459 kfree(phba->sli4_hba.idle_stat);
8460 out_free_hba_eq_info:
8461 free_percpu(phba->sli4_hba.eq_info);
8462 out_free_hba_cpu_map:
8463 kfree(phba->sli4_hba.cpu_map);
8464 out_free_hba_eq_hdl:
8465 kfree(phba->sli4_hba.hba_eq_hdl);
8466 out_free_fcf_rr_bmask:
8467 kfree(phba->fcf.fcf_rr_bmask);
8468 out_remove_rpi_hdrs:
8469 lpfc_sli4_remove_rpi_hdrs(phba);
8470 out_free_active_sgl:
8471 lpfc_free_active_sgl(phba);
8472 out_destroy_cq_event_pool:
8473 lpfc_sli4_cq_event_pool_destroy(phba);
8474 out_free_cmd_rsp_buf:
8475 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8476 phba->lpfc_cmd_rsp_buf_pool = NULL;
8477 out_free_sg_dma_buf:
8478 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8479 phba->lpfc_sg_dma_buf_pool = NULL;
8480 out_free_bsmbx:
8481 lpfc_destroy_bootstrap_mbox(phba);
8482 out_free_mem:
8483 lpfc_mem_free(phba);
8484 out_destroy_workqueue:
8485 destroy_workqueue(phba->wq);
8486 phba->wq = NULL;
8487 return rc;
8488 }
8489
8490
8491
8492
8493
8494
8495
8496
8497 static void
8498 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8499 {
8500 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8501
8502 free_percpu(phba->sli4_hba.eq_info);
8503 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8504 free_percpu(phba->sli4_hba.c_stat);
8505 #endif
8506 free_percpu(phba->cmf_stat);
8507 kfree(phba->sli4_hba.idle_stat);
8508
8509
8510 kfree(phba->sli4_hba.cpu_map);
8511 phba->sli4_hba.num_possible_cpu = 0;
8512 phba->sli4_hba.num_present_cpu = 0;
8513 phba->sli4_hba.curr_disp_cpu = 0;
8514 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8515
8516
8517 kfree(phba->sli4_hba.hba_eq_hdl);
8518
8519
8520 lpfc_sli4_remove_rpi_hdrs(phba);
8521 lpfc_sli4_remove_rpis(phba);
8522
8523
8524 kfree(phba->fcf.fcf_rr_bmask);
8525
8526
8527 lpfc_free_active_sgl(phba);
8528 lpfc_free_els_sgl_list(phba);
8529 lpfc_free_nvmet_sgl_list(phba);
8530
8531
8532 lpfc_sli4_cq_event_release_all(phba);
8533 lpfc_sli4_cq_event_pool_destroy(phba);
8534
8535
8536 lpfc_sli4_dealloc_resource_identifiers(phba);
8537
8538
8539 lpfc_destroy_bootstrap_mbox(phba);
8540
8541
8542 lpfc_mem_free_all(phba);
8543
8544
8545 list_for_each_entry_safe(conn_entry, next_conn_entry,
8546 &phba->fcf_conn_rec_list, list) {
8547 list_del_init(&conn_entry->list);
8548 kfree(conn_entry);
8549 }
8550
8551 return;
8552 }
8553
8554
8555
8556
8557
8558
8559
8560
8561
8562
8563
8564 int
8565 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8566 {
8567 phba->lpfc_hba_init_link = lpfc_hba_init_link;
8568 phba->lpfc_hba_down_link = lpfc_hba_down_link;
8569 phba->lpfc_selective_reset = lpfc_selective_reset;
8570 switch (dev_grp) {
8571 case LPFC_PCI_DEV_LP:
8572 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8573 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8574 phba->lpfc_stop_port = lpfc_stop_port_s3;
8575 break;
8576 case LPFC_PCI_DEV_OC:
8577 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8578 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8579 phba->lpfc_stop_port = lpfc_stop_port_s4;
8580 break;
8581 default:
8582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8583 "1431 Invalid HBA PCI-device group: 0x%x\n",
8584 dev_grp);
8585 return -ENODEV;
8586 }
8587 return 0;
8588 }
8589
8590
8591
8592
8593
8594
8595
8596
8597
8598
8599
8600
8601 static int
8602 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8603 {
8604 int error;
8605
8606
8607 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8608 "lpfc_worker_%d", phba->brd_no);
8609 if (IS_ERR(phba->worker_thread)) {
8610 error = PTR_ERR(phba->worker_thread);
8611 return error;
8612 }
8613
8614 return 0;
8615 }
8616
8617
8618
8619
8620
8621
8622
8623
8624
8625 static void
8626 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8627 {
8628 if (phba->wq) {
8629 destroy_workqueue(phba->wq);
8630 phba->wq = NULL;
8631 }
8632
8633
8634 if (phba->worker_thread)
8635 kthread_stop(phba->worker_thread);
8636 }
8637
8638
8639
8640
8641
8642
8643
8644 void
8645 lpfc_free_iocb_list(struct lpfc_hba *phba)
8646 {
8647 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8648
8649 spin_lock_irq(&phba->hbalock);
8650 list_for_each_entry_safe(iocbq_entry, iocbq_next,
8651 &phba->lpfc_iocb_list, list) {
8652 list_del(&iocbq_entry->list);
8653 kfree(iocbq_entry);
8654 phba->total_iocbq_bufs--;
8655 }
8656 spin_unlock_irq(&phba->hbalock);
8657
8658 return;
8659 }
8660
8661
8662
8663
8664
8665
8666
8667
8668
8669
8670
8671
8672
8673 int
8674 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8675 {
8676 struct lpfc_iocbq *iocbq_entry = NULL;
8677 uint16_t iotag;
8678 int i;
8679
8680
8681 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8682 for (i = 0; i < iocb_count; i++) {
8683 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
8684 if (iocbq_entry == NULL) {
8685 printk(KERN_ERR "%s: only allocated %d iocbs of "
8686 "expected %d count. Unloading driver.\n",
8687 __func__, i, iocb_count);
8688 goto out_free_iocbq;
8689 }
8690
8691 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8692 if (iotag == 0) {
8693 kfree(iocbq_entry);
8694 printk(KERN_ERR "%s: failed to allocate IOTAG. "
8695 "Unloading driver.\n", __func__);
8696 goto out_free_iocbq;
8697 }
8698 iocbq_entry->sli4_lxritag = NO_XRI;
8699 iocbq_entry->sli4_xritag = NO_XRI;
8700
8701 spin_lock_irq(&phba->hbalock);
8702 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8703 phba->total_iocbq_bufs++;
8704 spin_unlock_irq(&phba->hbalock);
8705 }
8706
8707 return 0;
8708
8709 out_free_iocbq:
8710 lpfc_free_iocb_list(phba);
8711
8712 return -ENOMEM;
8713 }
8714
8715
8716
8717
8718
8719
8720
8721
8722 void
8723 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8724 {
8725 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8726
8727 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8728 list_del(&sglq_entry->list);
8729 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8730 kfree(sglq_entry);
8731 }
8732 }
8733
8734
8735
8736
8737
8738
8739
8740 static void
8741 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8742 {
8743 LIST_HEAD(sglq_list);
8744
8745
8746 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8747 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8748 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8749
8750
8751 lpfc_free_sgl_list(phba, &sglq_list);
8752 }
8753
8754
8755
8756
8757
8758
8759
8760 static void
8761 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8762 {
8763 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8764 LIST_HEAD(sglq_list);
8765
8766
8767 spin_lock_irq(&phba->hbalock);
8768 spin_lock(&phba->sli4_hba.sgl_list_lock);
8769 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8770 spin_unlock(&phba->sli4_hba.sgl_list_lock);
8771 spin_unlock_irq(&phba->hbalock);
8772
8773
8774 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8775 list_del(&sglq_entry->list);
8776 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8777 kfree(sglq_entry);
8778 }
8779
8780
8781
8782
8783
8784 phba->sli4_hba.nvmet_xri_cnt = 0;
8785 }
8786
8787
8788
8789
8790
8791
8792
8793
8794 static int
8795 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8796 {
8797 int size;
8798 size = sizeof(struct lpfc_sglq *);
8799 size *= phba->sli4_hba.max_cfg_param.max_xri;
8800
8801 phba->sli4_hba.lpfc_sglq_active_list =
8802 kzalloc(size, GFP_KERNEL);
8803 if (!phba->sli4_hba.lpfc_sglq_active_list)
8804 return -ENOMEM;
8805 return 0;
8806 }
8807
8808
8809
8810
8811
8812
8813
8814
8815
8816 static void
8817 lpfc_free_active_sgl(struct lpfc_hba *phba)
8818 {
8819 kfree(phba->sli4_hba.lpfc_sglq_active_list);
8820 }
8821
8822
8823
8824
8825
8826
8827
8828
8829
8830 static void
8831 lpfc_init_sgl_list(struct lpfc_hba *phba)
8832 {
8833
8834 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8835 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8836 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8837 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8838
8839
8840 phba->sli4_hba.els_xri_cnt = 0;
8841
8842
8843 phba->sli4_hba.io_xri_cnt = 0;
8844 }
8845
8846
8847
8848
8849
8850
8851
8852
8853
8854
8855
8856
8857
8858
8859
8860 int
8861 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8862 {
8863 int rc = 0;
8864 struct lpfc_rpi_hdr *rpi_hdr;
8865
8866 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8867 if (!phba->sli4_hba.rpi_hdrs_in_use)
8868 return rc;
8869 if (phba->sli4_hba.extents_in_use)
8870 return -EIO;
8871
8872 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8873 if (!rpi_hdr) {
8874 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8875 "0391 Error during rpi post operation\n");
8876 lpfc_sli4_remove_rpis(phba);
8877 rc = -ENODEV;
8878 }
8879
8880 return rc;
8881 }
8882
8883
8884
8885
8886
8887
8888
8889
8890
8891
8892
8893
8894
8895
8896 struct lpfc_rpi_hdr *
8897 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8898 {
8899 uint16_t rpi_limit, curr_rpi_range;
8900 struct lpfc_dmabuf *dmabuf;
8901 struct lpfc_rpi_hdr *rpi_hdr;
8902
8903
8904
8905
8906
8907
8908 if (!phba->sli4_hba.rpi_hdrs_in_use)
8909 return NULL;
8910 if (phba->sli4_hba.extents_in_use)
8911 return NULL;
8912
8913
8914 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8915
8916 spin_lock_irq(&phba->hbalock);
8917
8918
8919
8920
8921
8922 curr_rpi_range = phba->sli4_hba.next_rpi;
8923 spin_unlock_irq(&phba->hbalock);
8924
8925
8926 if (curr_rpi_range == rpi_limit)
8927 return NULL;
8928
8929
8930
8931
8932
8933 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8934 if (!dmabuf)
8935 return NULL;
8936
8937 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8938 LPFC_HDR_TEMPLATE_SIZE,
8939 &dmabuf->phys, GFP_KERNEL);
8940 if (!dmabuf->virt) {
8941 rpi_hdr = NULL;
8942 goto err_free_dmabuf;
8943 }
8944
8945 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8946 rpi_hdr = NULL;
8947 goto err_free_coherent;
8948 }
8949
8950
8951 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
8952 if (!rpi_hdr)
8953 goto err_free_coherent;
8954
8955 rpi_hdr->dmabuf = dmabuf;
8956 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8957 rpi_hdr->page_count = 1;
8958 spin_lock_irq(&phba->hbalock);
8959
8960
8961 rpi_hdr->start_rpi = curr_rpi_range;
8962 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8963 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8964
8965 spin_unlock_irq(&phba->hbalock);
8966 return rpi_hdr;
8967
8968 err_free_coherent:
8969 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8970 dmabuf->virt, dmabuf->phys);
8971 err_free_dmabuf:
8972 kfree(dmabuf);
8973 return NULL;
8974 }
8975
8976
8977
8978
8979
8980
8981
8982
8983
8984
8985 void
8986 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
8987 {
8988 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
8989
8990 if (!phba->sli4_hba.rpi_hdrs_in_use)
8991 goto exit;
8992
8993 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
8994 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
8995 list_del(&rpi_hdr->list);
8996 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
8997 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
8998 kfree(rpi_hdr->dmabuf);
8999 kfree(rpi_hdr);
9000 }
9001 exit:
9002
9003 phba->sli4_hba.next_rpi = 0;
9004 }
9005
9006
9007
9008
9009
9010
9011
9012
9013
9014
9015
9016
9017
9018 static struct lpfc_hba *
9019 lpfc_hba_alloc(struct pci_dev *pdev)
9020 {
9021 struct lpfc_hba *phba;
9022
9023
9024 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
9025 if (!phba) {
9026 dev_err(&pdev->dev, "failed to allocate hba struct\n");
9027 return NULL;
9028 }
9029
9030
9031 phba->pcidev = pdev;
9032
9033
9034 phba->brd_no = lpfc_get_instance();
9035 if (phba->brd_no < 0) {
9036 kfree(phba);
9037 return NULL;
9038 }
9039 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
9040
9041 spin_lock_init(&phba->ct_ev_lock);
9042 INIT_LIST_HEAD(&phba->ct_ev_waiters);
9043
9044 return phba;
9045 }
9046
9047
9048
9049
9050
9051
9052
9053
9054 static void
9055 lpfc_hba_free(struct lpfc_hba *phba)
9056 {
9057 if (phba->sli_rev == LPFC_SLI_REV4)
9058 kfree(phba->sli4_hba.hdwq);
9059
9060
9061 idr_remove(&lpfc_hba_index, phba->brd_no);
9062
9063
9064 kfree(phba->sli.sli3_ring);
9065 phba->sli.sli3_ring = NULL;
9066
9067 kfree(phba);
9068 return;
9069 }
9070
9071
9072
9073
9074
9075
9076
9077
9078
9079
9080 void
9081 lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
9082 {
9083 struct lpfc_hba *phba = vport->phba;
9084
9085 vport->load_flag |= FC_ALLOW_FDMI;
9086 if (phba->cfg_enable_SmartSAN ||
9087 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
9088
9089 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
9090 if (phba->cfg_enable_SmartSAN)
9091 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
9092 else
9093 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
9094 }
9095
9096 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
9097 "6077 Setup FDMI mask: hba x%x port x%x\n",
9098 vport->fdmi_hba_mask, vport->fdmi_port_mask);
9099 }
9100
9101
9102
9103
9104
9105
9106
9107
9108
9109
9110
9111
9112 static int
9113 lpfc_create_shost(struct lpfc_hba *phba)
9114 {
9115 struct lpfc_vport *vport;
9116 struct Scsi_Host *shost;
9117
9118
9119 phba->fc_edtov = FF_DEF_EDTOV;
9120 phba->fc_ratov = FF_DEF_RATOV;
9121 phba->fc_altov = FF_DEF_ALTOV;
9122 phba->fc_arbtov = FF_DEF_ARBTOV;
9123
9124 atomic_set(&phba->sdev_cnt, 0);
9125 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
9126 if (!vport)
9127 return -ENODEV;
9128
9129 shost = lpfc_shost_from_vport(vport);
9130 phba->pport = vport;
9131
9132 if (phba->nvmet_support) {
9133
9134 phba->targetport = NULL;
9135 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
9136 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
9137 "6076 NVME Target Found\n");
9138 }
9139
9140 lpfc_debugfs_initialize(vport);
9141
9142 pci_set_drvdata(phba->pcidev, shost);
9143
9144 lpfc_setup_fdmi_mask(vport);
9145
9146
9147
9148
9149
9150 return 0;
9151 }
9152
9153
9154
9155
9156
9157
9158
9159
9160 static void
9161 lpfc_destroy_shost(struct lpfc_hba *phba)
9162 {
9163 struct lpfc_vport *vport = phba->pport;
9164
9165
9166 destroy_port(vport);
9167
9168 return;
9169 }
9170
9171
9172
9173
9174
9175
9176
9177
9178
9179 static void
9180 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
9181 {
9182 uint32_t old_mask;
9183 uint32_t old_guard;
9184
9185 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9186 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9187 "1478 Registering BlockGuard with the "
9188 "SCSI layer\n");
9189
9190 old_mask = phba->cfg_prot_mask;
9191 old_guard = phba->cfg_prot_guard;
9192
9193
9194 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
9195 SHOST_DIX_TYPE0_PROTECTION |
9196 SHOST_DIX_TYPE1_PROTECTION);
9197 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9198 SHOST_DIX_GUARD_CRC);
9199
9200
9201 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9202 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9203
9204 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9205 if ((old_mask != phba->cfg_prot_mask) ||
9206 (old_guard != phba->cfg_prot_guard))
9207 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9208 "1475 Registering BlockGuard with the "
9209 "SCSI layer: mask %d guard %d\n",
9210 phba->cfg_prot_mask,
9211 phba->cfg_prot_guard);
9212
9213 scsi_host_set_prot(shost, phba->cfg_prot_mask);
9214 scsi_host_set_guard(shost, phba->cfg_prot_guard);
9215 } else
9216 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9217 "1479 Not Registering BlockGuard with the SCSI "
9218 "layer, Bad protection parameters: %d %d\n",
9219 old_mask, old_guard);
9220 }
9221 }
9222
9223
9224
9225
9226
9227
9228
9229
9230 static void
9231 lpfc_post_init_setup(struct lpfc_hba *phba)
9232 {
9233 struct Scsi_Host *shost;
9234 struct lpfc_adapter_event_header adapter_event;
9235
9236
9237 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9238
9239
9240
9241
9242
9243 shost = pci_get_drvdata(phba->pcidev);
9244 shost->can_queue = phba->cfg_hba_queue_depth - 10;
9245
9246 lpfc_host_attrib_init(shost);
9247
9248 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9249 spin_lock_irq(shost->host_lock);
9250 lpfc_poll_start_timer(phba);
9251 spin_unlock_irq(shost->host_lock);
9252 }
9253
9254 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9255 "0428 Perform SCSI scan\n");
9256
9257 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9258 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9259 fc_host_post_vendor_event(shost, fc_get_event_number(),
9260 sizeof(adapter_event),
9261 (char *) &adapter_event,
9262 LPFC_NL_VENDOR_ID);
9263 return;
9264 }
9265
9266
9267
9268
9269
9270
9271
9272
9273
9274
9275
9276
9277 static int
9278 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9279 {
9280 struct pci_dev *pdev = phba->pcidev;
9281 unsigned long bar0map_len, bar2map_len;
9282 int i, hbq_count;
9283 void *ptr;
9284 int error;
9285
9286 if (!pdev)
9287 return -ENODEV;
9288
9289
9290 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9291 if (error)
9292 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9293 if (error)
9294 return error;
9295 error = -ENODEV;
9296
9297
9298
9299
9300 phba->pci_bar0_map = pci_resource_start(pdev, 0);
9301 bar0map_len = pci_resource_len(pdev, 0);
9302
9303 phba->pci_bar2_map = pci_resource_start(pdev, 2);
9304 bar2map_len = pci_resource_len(pdev, 2);
9305
9306
9307 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9308 if (!phba->slim_memmap_p) {
9309 dev_printk(KERN_ERR, &pdev->dev,
9310 "ioremap failed for SLIM memory.\n");
9311 goto out;
9312 }
9313
9314
9315 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9316 if (!phba->ctrl_regs_memmap_p) {
9317 dev_printk(KERN_ERR, &pdev->dev,
9318 "ioremap failed for HBA control registers.\n");
9319 goto out_iounmap_slim;
9320 }
9321
9322
9323 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9324 &phba->slim2p.phys, GFP_KERNEL);
9325 if (!phba->slim2p.virt)
9326 goto out_iounmap;
9327
9328 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9329 phba->mbox_ext = (phba->slim2p.virt +
9330 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9331 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9332 phba->IOCBs = (phba->slim2p.virt +
9333 offsetof(struct lpfc_sli2_slim, IOCBs));
9334
9335 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9336 lpfc_sli_hbq_size(),
9337 &phba->hbqslimp.phys,
9338 GFP_KERNEL);
9339 if (!phba->hbqslimp.virt)
9340 goto out_free_slim;
9341
9342 hbq_count = lpfc_sli_hbq_count();
9343 ptr = phba->hbqslimp.virt;
9344 for (i = 0; i < hbq_count; ++i) {
9345 phba->hbqs[i].hbq_virt = ptr;
9346 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9347 ptr += (lpfc_hbq_defs[i]->entry_count *
9348 sizeof(struct lpfc_hbq_entry));
9349 }
9350 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9351 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9352
9353 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9354
9355 phba->MBslimaddr = phba->slim_memmap_p;
9356 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9357 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9358 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9359 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9360
9361 return 0;
9362
9363 out_free_slim:
9364 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9365 phba->slim2p.virt, phba->slim2p.phys);
9366 out_iounmap:
9367 iounmap(phba->ctrl_regs_memmap_p);
9368 out_iounmap_slim:
9369 iounmap(phba->slim_memmap_p);
9370 out:
9371 return error;
9372 }
9373
9374
9375
9376
9377
9378
9379
9380
9381 static void
9382 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9383 {
9384 struct pci_dev *pdev;
9385
9386
9387 if (!phba->pcidev)
9388 return;
9389 else
9390 pdev = phba->pcidev;
9391
9392
9393 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9394 phba->hbqslimp.virt, phba->hbqslimp.phys);
9395 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9396 phba->slim2p.virt, phba->slim2p.phys);
9397
9398
9399 iounmap(phba->ctrl_regs_memmap_p);
9400 iounmap(phba->slim_memmap_p);
9401
9402 return;
9403 }
9404
9405
9406
9407
9408
9409
9410
9411
9412
9413
9414 int
9415 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9416 {
9417 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9418 struct lpfc_register reg_data;
9419 int i, port_error = 0;
9420 uint32_t if_type;
9421
9422 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9423 memset(®_data, 0, sizeof(reg_data));
9424 if (!phba->sli4_hba.PSMPHRregaddr)
9425 return -ENODEV;
9426
9427
9428 for (i = 0; i < 3000; i++) {
9429 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9430 &portsmphr_reg.word0) ||
9431 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9432
9433 port_error = -ENODEV;
9434 break;
9435 }
9436 if (LPFC_POST_STAGE_PORT_READY ==
9437 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9438 break;
9439 msleep(10);
9440 }
9441
9442
9443
9444
9445
9446 if (port_error) {
9447 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9448 "1408 Port Failed POST - portsmphr=0x%x, "
9449 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9450 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9451 portsmphr_reg.word0,
9452 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9453 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9454 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9455 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9456 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9457 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9458 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9459 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9460 } else {
9461 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9462 "2534 Device Info: SLIFamily=0x%x, "
9463 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9464 "SLIHint_2=0x%x, FT=0x%x\n",
9465 bf_get(lpfc_sli_intf_sli_family,
9466 &phba->sli4_hba.sli_intf),
9467 bf_get(lpfc_sli_intf_slirev,
9468 &phba->sli4_hba.sli_intf),
9469 bf_get(lpfc_sli_intf_if_type,
9470 &phba->sli4_hba.sli_intf),
9471 bf_get(lpfc_sli_intf_sli_hint1,
9472 &phba->sli4_hba.sli_intf),
9473 bf_get(lpfc_sli_intf_sli_hint2,
9474 &phba->sli4_hba.sli_intf),
9475 bf_get(lpfc_sli_intf_func_type,
9476 &phba->sli4_hba.sli_intf));
9477
9478
9479
9480
9481
9482 if_type = bf_get(lpfc_sli_intf_if_type,
9483 &phba->sli4_hba.sli_intf);
9484 switch (if_type) {
9485 case LPFC_SLI_INTF_IF_TYPE_0:
9486 phba->sli4_hba.ue_mask_lo =
9487 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9488 phba->sli4_hba.ue_mask_hi =
9489 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9490 uerrlo_reg.word0 =
9491 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9492 uerrhi_reg.word0 =
9493 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9494 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9495 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9496 lpfc_printf_log(phba, KERN_ERR,
9497 LOG_TRACE_EVENT,
9498 "1422 Unrecoverable Error "
9499 "Detected during POST "
9500 "uerr_lo_reg=0x%x, "
9501 "uerr_hi_reg=0x%x, "
9502 "ue_mask_lo_reg=0x%x, "
9503 "ue_mask_hi_reg=0x%x\n",
9504 uerrlo_reg.word0,
9505 uerrhi_reg.word0,
9506 phba->sli4_hba.ue_mask_lo,
9507 phba->sli4_hba.ue_mask_hi);
9508 port_error = -ENODEV;
9509 }
9510 break;
9511 case LPFC_SLI_INTF_IF_TYPE_2:
9512 case LPFC_SLI_INTF_IF_TYPE_6:
9513
9514 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9515 ®_data.word0) ||
9516 (bf_get(lpfc_sliport_status_err, ®_data) &&
9517 !bf_get(lpfc_sliport_status_rn, ®_data))) {
9518 phba->work_status[0] =
9519 readl(phba->sli4_hba.u.if_type2.
9520 ERR1regaddr);
9521 phba->work_status[1] =
9522 readl(phba->sli4_hba.u.if_type2.
9523 ERR2regaddr);
9524 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9525 "2888 Unrecoverable port error "
9526 "following POST: port status reg "
9527 "0x%x, port_smphr reg 0x%x, "
9528 "error 1=0x%x, error 2=0x%x\n",
9529 reg_data.word0,
9530 portsmphr_reg.word0,
9531 phba->work_status[0],
9532 phba->work_status[1]);
9533 port_error = -ENODEV;
9534 break;
9535 }
9536
9537 if (lpfc_pldv_detect &&
9538 bf_get(lpfc_sli_intf_sli_family,
9539 &phba->sli4_hba.sli_intf) ==
9540 LPFC_SLI_INTF_FAMILY_G6)
9541 pci_write_config_byte(phba->pcidev,
9542 LPFC_SLI_INTF, CFG_PLD);
9543 break;
9544 case LPFC_SLI_INTF_IF_TYPE_1:
9545 default:
9546 break;
9547 }
9548 }
9549 return port_error;
9550 }
9551
9552
9553
9554
9555
9556
9557
9558
9559
9560 static void
9561 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9562 {
9563 switch (if_type) {
9564 case LPFC_SLI_INTF_IF_TYPE_0:
9565 phba->sli4_hba.u.if_type0.UERRLOregaddr =
9566 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9567 phba->sli4_hba.u.if_type0.UERRHIregaddr =
9568 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9569 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9570 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9571 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9572 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9573 phba->sli4_hba.SLIINTFregaddr =
9574 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9575 break;
9576 case LPFC_SLI_INTF_IF_TYPE_2:
9577 phba->sli4_hba.u.if_type2.EQDregaddr =
9578 phba->sli4_hba.conf_regs_memmap_p +
9579 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9580 phba->sli4_hba.u.if_type2.ERR1regaddr =
9581 phba->sli4_hba.conf_regs_memmap_p +
9582 LPFC_CTL_PORT_ER1_OFFSET;
9583 phba->sli4_hba.u.if_type2.ERR2regaddr =
9584 phba->sli4_hba.conf_regs_memmap_p +
9585 LPFC_CTL_PORT_ER2_OFFSET;
9586 phba->sli4_hba.u.if_type2.CTRLregaddr =
9587 phba->sli4_hba.conf_regs_memmap_p +
9588 LPFC_CTL_PORT_CTL_OFFSET;
9589 phba->sli4_hba.u.if_type2.STATUSregaddr =
9590 phba->sli4_hba.conf_regs_memmap_p +
9591 LPFC_CTL_PORT_STA_OFFSET;
9592 phba->sli4_hba.SLIINTFregaddr =
9593 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9594 phba->sli4_hba.PSMPHRregaddr =
9595 phba->sli4_hba.conf_regs_memmap_p +
9596 LPFC_CTL_PORT_SEM_OFFSET;
9597 phba->sli4_hba.RQDBregaddr =
9598 phba->sli4_hba.conf_regs_memmap_p +
9599 LPFC_ULP0_RQ_DOORBELL;
9600 phba->sli4_hba.WQDBregaddr =
9601 phba->sli4_hba.conf_regs_memmap_p +
9602 LPFC_ULP0_WQ_DOORBELL;
9603 phba->sli4_hba.CQDBregaddr =
9604 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9605 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9606 phba->sli4_hba.MQDBregaddr =
9607 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9608 phba->sli4_hba.BMBXregaddr =
9609 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9610 break;
9611 case LPFC_SLI_INTF_IF_TYPE_6:
9612 phba->sli4_hba.u.if_type2.EQDregaddr =
9613 phba->sli4_hba.conf_regs_memmap_p +
9614 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9615 phba->sli4_hba.u.if_type2.ERR1regaddr =
9616 phba->sli4_hba.conf_regs_memmap_p +
9617 LPFC_CTL_PORT_ER1_OFFSET;
9618 phba->sli4_hba.u.if_type2.ERR2regaddr =
9619 phba->sli4_hba.conf_regs_memmap_p +
9620 LPFC_CTL_PORT_ER2_OFFSET;
9621 phba->sli4_hba.u.if_type2.CTRLregaddr =
9622 phba->sli4_hba.conf_regs_memmap_p +
9623 LPFC_CTL_PORT_CTL_OFFSET;
9624 phba->sli4_hba.u.if_type2.STATUSregaddr =
9625 phba->sli4_hba.conf_regs_memmap_p +
9626 LPFC_CTL_PORT_STA_OFFSET;
9627 phba->sli4_hba.PSMPHRregaddr =
9628 phba->sli4_hba.conf_regs_memmap_p +
9629 LPFC_CTL_PORT_SEM_OFFSET;
9630 phba->sli4_hba.BMBXregaddr =
9631 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9632 break;
9633 case LPFC_SLI_INTF_IF_TYPE_1:
9634 default:
9635 dev_printk(KERN_ERR, &phba->pcidev->dev,
9636 "FATAL - unsupported SLI4 interface type - %d\n",
9637 if_type);
9638 break;
9639 }
9640 }
9641
9642
9643
9644
9645
9646
9647
9648
9649 static void
9650 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9651 {
9652 switch (if_type) {
9653 case LPFC_SLI_INTF_IF_TYPE_0:
9654 phba->sli4_hba.PSMPHRregaddr =
9655 phba->sli4_hba.ctrl_regs_memmap_p +
9656 LPFC_SLIPORT_IF0_SMPHR;
9657 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9658 LPFC_HST_ISR0;
9659 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9660 LPFC_HST_IMR0;
9661 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9662 LPFC_HST_ISCR0;
9663 break;
9664 case LPFC_SLI_INTF_IF_TYPE_6:
9665 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9666 LPFC_IF6_RQ_DOORBELL;
9667 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9668 LPFC_IF6_WQ_DOORBELL;
9669 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9670 LPFC_IF6_CQ_DOORBELL;
9671 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9672 LPFC_IF6_EQ_DOORBELL;
9673 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9674 LPFC_IF6_MQ_DOORBELL;
9675 break;
9676 case LPFC_SLI_INTF_IF_TYPE_2:
9677 case LPFC_SLI_INTF_IF_TYPE_1:
9678 default:
9679 dev_err(&phba->pcidev->dev,
9680 "FATAL - unsupported SLI4 interface type - %d\n",
9681 if_type);
9682 break;
9683 }
9684 }
9685
9686
9687
9688
9689
9690
9691
9692
9693
9694
9695
9696 static int
9697 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9698 {
9699 if (vf > LPFC_VIR_FUNC_MAX)
9700 return -ENODEV;
9701
9702 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9703 vf * LPFC_VFR_PAGE_SIZE +
9704 LPFC_ULP0_RQ_DOORBELL);
9705 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9706 vf * LPFC_VFR_PAGE_SIZE +
9707 LPFC_ULP0_WQ_DOORBELL);
9708 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9709 vf * LPFC_VFR_PAGE_SIZE +
9710 LPFC_EQCQ_DOORBELL);
9711 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9712 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9713 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9714 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9715 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9716 return 0;
9717 }
9718
9719
9720
9721
9722
9723
9724
9725
9726
9727
9728
9729
9730
9731
9732
9733
9734 static int
9735 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9736 {
9737 uint32_t bmbx_size;
9738 struct lpfc_dmabuf *dmabuf;
9739 struct dma_address *dma_address;
9740 uint32_t pa_addr;
9741 uint64_t phys_addr;
9742
9743 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9744 if (!dmabuf)
9745 return -ENOMEM;
9746
9747
9748
9749
9750
9751 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9752 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9753 &dmabuf->phys, GFP_KERNEL);
9754 if (!dmabuf->virt) {
9755 kfree(dmabuf);
9756 return -ENOMEM;
9757 }
9758
9759
9760
9761
9762
9763
9764
9765
9766 phba->sli4_hba.bmbx.dmabuf = dmabuf;
9767 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9768
9769 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9770 LPFC_ALIGN_16_BYTE);
9771 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9772 LPFC_ALIGN_16_BYTE);
9773
9774
9775
9776
9777
9778
9779
9780
9781
9782 dma_address = &phba->sli4_hba.bmbx.dma_address;
9783 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9784 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9785 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9786 LPFC_BMBX_BIT1_ADDR_HI);
9787
9788 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9789 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9790 LPFC_BMBX_BIT1_ADDR_LO);
9791 return 0;
9792 }
9793
9794
9795
9796
9797
9798
9799
9800
9801
9802
9803
9804
9805 static void
9806 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9807 {
9808 dma_free_coherent(&phba->pcidev->dev,
9809 phba->sli4_hba.bmbx.bmbx_size,
9810 phba->sli4_hba.bmbx.dmabuf->virt,
9811 phba->sli4_hba.bmbx.dmabuf->phys);
9812
9813 kfree(phba->sli4_hba.bmbx.dmabuf);
9814 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9815 }
9816
9817 static const char * const lpfc_topo_to_str[] = {
9818 "Loop then P2P",
9819 "Loopback",
9820 "P2P Only",
9821 "Unsupported",
9822 "Loop Only",
9823 "Unsupported",
9824 "P2P then Loop",
9825 };
9826
9827 #define LINK_FLAGS_DEF 0x0
9828 #define LINK_FLAGS_P2P 0x1
9829 #define LINK_FLAGS_LOOP 0x2
9830
9831
9832
9833
9834
9835
9836
9837
9838
9839
9840 static void
9841 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9842 {
9843 u8 ptv, tf, pt;
9844
9845 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9846 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9847 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9848
9849 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9850 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9851 ptv, tf, pt);
9852 if (!ptv) {
9853 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9854 "2019 FW does not support persistent topology "
9855 "Using driver parameter defined value [%s]",
9856 lpfc_topo_to_str[phba->cfg_topology]);
9857 return;
9858 }
9859
9860 phba->hba_flag |= HBA_PERSISTENT_TOPO;
9861
9862
9863 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9864 LPFC_SLI_INTF_IF_TYPE_6) ||
9865 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9866 LPFC_SLI_INTF_FAMILY_G6)) {
9867 if (!tf) {
9868 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9869 ? FLAGS_TOPOLOGY_MODE_LOOP
9870 : FLAGS_TOPOLOGY_MODE_PT_PT);
9871 } else {
9872 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
9873 }
9874 } else {
9875 if (tf) {
9876
9877 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9878 FLAGS_TOPOLOGY_MODE_LOOP_PT);
9879 } else {
9880 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9881 ? FLAGS_TOPOLOGY_MODE_PT_PT
9882 : FLAGS_TOPOLOGY_MODE_LOOP);
9883 }
9884 }
9885 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
9886 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9887 "2020 Using persistent topology value [%s]",
9888 lpfc_topo_to_str[phba->cfg_topology]);
9889 } else {
9890 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9891 "2021 Invalid topology values from FW "
9892 "Using driver parameter defined value [%s]",
9893 lpfc_topo_to_str[phba->cfg_topology]);
9894 }
9895 }
9896
9897
9898
9899
9900
9901
9902
9903
9904
9905
9906
9907
9908
9909
9910
9911 int
9912 lpfc_sli4_read_config(struct lpfc_hba *phba)
9913 {
9914 LPFC_MBOXQ_t *pmb;
9915 struct lpfc_mbx_read_config *rd_config;
9916 union lpfc_sli4_cfg_shdr *shdr;
9917 uint32_t shdr_status, shdr_add_status;
9918 struct lpfc_mbx_get_func_cfg *get_func_cfg;
9919 struct lpfc_rsrc_desc_fcfcoe *desc;
9920 char *pdesc_0;
9921 uint16_t forced_link_speed;
9922 uint32_t if_type, qmin, fawwpn;
9923 int length, i, rc = 0, rc2;
9924
9925 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9926 if (!pmb) {
9927 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9928 "2011 Unable to allocate memory for issuing "
9929 "SLI_CONFIG_SPECIAL mailbox command\n");
9930 return -ENOMEM;
9931 }
9932
9933 lpfc_read_config(phba, pmb);
9934
9935 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9936 if (rc != MBX_SUCCESS) {
9937 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9938 "2012 Mailbox failed , mbxCmd x%x "
9939 "READ_CONFIG, mbxStatus x%x\n",
9940 bf_get(lpfc_mqe_command, &pmb->u.mqe),
9941 bf_get(lpfc_mqe_status, &pmb->u.mqe));
9942 rc = -EIO;
9943 } else {
9944 rd_config = &pmb->u.mqe.un.rd_config;
9945 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
9946 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9947 phba->sli4_hba.lnk_info.lnk_tp =
9948 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
9949 phba->sli4_hba.lnk_info.lnk_no =
9950 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
9951 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9952 "3081 lnk_type:%d, lnk_numb:%d\n",
9953 phba->sli4_hba.lnk_info.lnk_tp,
9954 phba->sli4_hba.lnk_info.lnk_no);
9955 } else
9956 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9957 "3082 Mailbox (x%x) returned ldv:x0\n",
9958 bf_get(lpfc_mqe_command, &pmb->u.mqe));
9959 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
9960 phba->bbcredit_support = 1;
9961 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9962 }
9963
9964 fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config);
9965
9966 if (fawwpn) {
9967 lpfc_printf_log(phba, KERN_INFO,
9968 LOG_INIT | LOG_DISCOVERY,
9969 "2702 READ_CONFIG: FA-PWWN is "
9970 "configured on\n");
9971 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
9972 } else {
9973
9974 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG;
9975 }
9976
9977 phba->sli4_hba.conf_trunk =
9978 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
9979 phba->sli4_hba.extents_in_use =
9980 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
9981
9982 phba->sli4_hba.max_cfg_param.max_xri =
9983 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
9984
9985 if (is_kdump_kernel() &&
9986 phba->sli4_hba.max_cfg_param.max_xri > 512)
9987 phba->sli4_hba.max_cfg_param.max_xri = 512;
9988 phba->sli4_hba.max_cfg_param.xri_base =
9989 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
9990 phba->sli4_hba.max_cfg_param.max_vpi =
9991 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
9992
9993 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9994 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
9995 phba->sli4_hba.max_cfg_param.vpi_base =
9996 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
9997 phba->sli4_hba.max_cfg_param.max_rpi =
9998 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
9999 phba->sli4_hba.max_cfg_param.rpi_base =
10000 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
10001 phba->sli4_hba.max_cfg_param.max_vfi =
10002 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
10003 phba->sli4_hba.max_cfg_param.vfi_base =
10004 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
10005 phba->sli4_hba.max_cfg_param.max_fcfi =
10006 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
10007 phba->sli4_hba.max_cfg_param.max_eq =
10008 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
10009 phba->sli4_hba.max_cfg_param.max_rq =
10010 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
10011 phba->sli4_hba.max_cfg_param.max_wq =
10012 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
10013 phba->sli4_hba.max_cfg_param.max_cq =
10014 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
10015 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
10016 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
10017 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
10018 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
10019 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
10020 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
10021 phba->max_vports = phba->max_vpi;
10022
10023
10024
10025
10026
10027
10028
10029
10030
10031
10032 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10033 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
10034 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
10035
10036 if (lpfc_use_cgn_signal) {
10037 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
10038 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
10039 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
10040 }
10041 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
10042
10043
10044
10045 if (phba->cgn_reg_signal !=
10046 EDC_CG_SIG_WARN_ONLY) {
10047
10048 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10049 phba->cgn_reg_signal =
10050 EDC_CG_SIG_NOTSUPPORTED;
10051 } else {
10052 phba->cgn_reg_signal =
10053 EDC_CG_SIG_WARN_ALARM;
10054 phba->cgn_reg_fpin =
10055 LPFC_CGN_FPIN_NONE;
10056 }
10057 }
10058 }
10059
10060
10061 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
10062 phba->cgn_init_reg_signal = phba->cgn_reg_signal;
10063
10064 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
10065 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
10066 phba->cgn_reg_signal, phba->cgn_reg_fpin);
10067
10068 lpfc_map_topology(phba, rd_config);
10069 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10070 "2003 cfg params Extents? %d "
10071 "XRI(B:%d M:%d), "
10072 "VPI(B:%d M:%d) "
10073 "VFI(B:%d M:%d) "
10074 "RPI(B:%d M:%d) "
10075 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
10076 phba->sli4_hba.extents_in_use,
10077 phba->sli4_hba.max_cfg_param.xri_base,
10078 phba->sli4_hba.max_cfg_param.max_xri,
10079 phba->sli4_hba.max_cfg_param.vpi_base,
10080 phba->sli4_hba.max_cfg_param.max_vpi,
10081 phba->sli4_hba.max_cfg_param.vfi_base,
10082 phba->sli4_hba.max_cfg_param.max_vfi,
10083 phba->sli4_hba.max_cfg_param.rpi_base,
10084 phba->sli4_hba.max_cfg_param.max_rpi,
10085 phba->sli4_hba.max_cfg_param.max_fcfi,
10086 phba->sli4_hba.max_cfg_param.max_eq,
10087 phba->sli4_hba.max_cfg_param.max_cq,
10088 phba->sli4_hba.max_cfg_param.max_wq,
10089 phba->sli4_hba.max_cfg_param.max_rq,
10090 phba->lmt);
10091
10092
10093
10094
10095
10096 qmin = phba->sli4_hba.max_cfg_param.max_wq;
10097 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
10098 qmin = phba->sli4_hba.max_cfg_param.max_cq;
10099 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
10100 qmin = phba->sli4_hba.max_cfg_param.max_eq;
10101
10102
10103
10104
10105
10106
10107 qmin -= 4;
10108
10109
10110 if ((phba->cfg_irq_chann > qmin) ||
10111 (phba->cfg_hdw_queue > qmin)) {
10112 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10113 "2005 Reducing Queues - "
10114 "FW resource limitation: "
10115 "WQ %d CQ %d EQ %d: min %d: "
10116 "IRQ %d HDWQ %d\n",
10117 phba->sli4_hba.max_cfg_param.max_wq,
10118 phba->sli4_hba.max_cfg_param.max_cq,
10119 phba->sli4_hba.max_cfg_param.max_eq,
10120 qmin, phba->cfg_irq_chann,
10121 phba->cfg_hdw_queue);
10122
10123 if (phba->cfg_irq_chann > qmin)
10124 phba->cfg_irq_chann = qmin;
10125 if (phba->cfg_hdw_queue > qmin)
10126 phba->cfg_hdw_queue = qmin;
10127 }
10128 }
10129
10130 if (rc)
10131 goto read_cfg_out;
10132
10133
10134 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10135 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10136 forced_link_speed =
10137 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
10138 if (forced_link_speed) {
10139 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
10140
10141 switch (forced_link_speed) {
10142 case LINK_SPEED_1G:
10143 phba->cfg_link_speed =
10144 LPFC_USER_LINK_SPEED_1G;
10145 break;
10146 case LINK_SPEED_2G:
10147 phba->cfg_link_speed =
10148 LPFC_USER_LINK_SPEED_2G;
10149 break;
10150 case LINK_SPEED_4G:
10151 phba->cfg_link_speed =
10152 LPFC_USER_LINK_SPEED_4G;
10153 break;
10154 case LINK_SPEED_8G:
10155 phba->cfg_link_speed =
10156 LPFC_USER_LINK_SPEED_8G;
10157 break;
10158 case LINK_SPEED_10G:
10159 phba->cfg_link_speed =
10160 LPFC_USER_LINK_SPEED_10G;
10161 break;
10162 case LINK_SPEED_16G:
10163 phba->cfg_link_speed =
10164 LPFC_USER_LINK_SPEED_16G;
10165 break;
10166 case LINK_SPEED_32G:
10167 phba->cfg_link_speed =
10168 LPFC_USER_LINK_SPEED_32G;
10169 break;
10170 case LINK_SPEED_64G:
10171 phba->cfg_link_speed =
10172 LPFC_USER_LINK_SPEED_64G;
10173 break;
10174 case 0xffff:
10175 phba->cfg_link_speed =
10176 LPFC_USER_LINK_SPEED_AUTO;
10177 break;
10178 default:
10179 lpfc_printf_log(phba, KERN_ERR,
10180 LOG_TRACE_EVENT,
10181 "0047 Unrecognized link "
10182 "speed : %d\n",
10183 forced_link_speed);
10184 phba->cfg_link_speed =
10185 LPFC_USER_LINK_SPEED_AUTO;
10186 }
10187 }
10188 }
10189
10190
10191 length = phba->sli4_hba.max_cfg_param.max_xri -
10192 lpfc_sli4_get_els_iocb_cnt(phba);
10193 if (phba->cfg_hba_queue_depth > length) {
10194 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10195 "3361 HBA queue depth changed from %d to %d\n",
10196 phba->cfg_hba_queue_depth, length);
10197 phba->cfg_hba_queue_depth = length;
10198 }
10199
10200 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
10201 LPFC_SLI_INTF_IF_TYPE_2)
10202 goto read_cfg_out;
10203
10204
10205 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
10206 sizeof(struct lpfc_sli4_cfg_mhdr));
10207 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
10208 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
10209 length, LPFC_SLI4_MBX_EMBED);
10210
10211 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10212 shdr = (union lpfc_sli4_cfg_shdr *)
10213 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
10214 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10215 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10216 if (rc2 || shdr_status || shdr_add_status) {
10217 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10218 "3026 Mailbox failed , mbxCmd x%x "
10219 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10220 bf_get(lpfc_mqe_command, &pmb->u.mqe),
10221 bf_get(lpfc_mqe_status, &pmb->u.mqe));
10222 goto read_cfg_out;
10223 }
10224
10225
10226 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10227
10228 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10229 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10230 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10231 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10232 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10233 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10234 goto read_cfg_out;
10235
10236 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10237 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10238 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10239 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10240 phba->sli4_hba.iov.pf_number =
10241 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10242 phba->sli4_hba.iov.vf_number =
10243 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10244 break;
10245 }
10246 }
10247
10248 if (i < LPFC_RSRC_DESC_MAX_NUM)
10249 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10250 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10251 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10252 phba->sli4_hba.iov.vf_number);
10253 else
10254 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10255 "3028 GET_FUNCTION_CONFIG: failed to find "
10256 "Resource Descriptor:x%x\n",
10257 LPFC_RSRC_DESC_TYPE_FCFCOE);
10258
10259 read_cfg_out:
10260 mempool_free(pmb, phba->mbox_mem_pool);
10261 return rc;
10262 }
10263
10264
10265
10266
10267
10268
10269
10270
10271
10272
10273
10274
10275
10276
10277 static int
10278 lpfc_setup_endian_order(struct lpfc_hba *phba)
10279 {
10280 LPFC_MBOXQ_t *mboxq;
10281 uint32_t if_type, rc = 0;
10282 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10283 HOST_ENDIAN_HIGH_WORD1};
10284
10285 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10286 switch (if_type) {
10287 case LPFC_SLI_INTF_IF_TYPE_0:
10288 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10289 GFP_KERNEL);
10290 if (!mboxq) {
10291 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10292 "0492 Unable to allocate memory for "
10293 "issuing SLI_CONFIG_SPECIAL mailbox "
10294 "command\n");
10295 return -ENOMEM;
10296 }
10297
10298
10299
10300
10301
10302 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10303 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10304 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10305 if (rc != MBX_SUCCESS) {
10306 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10307 "0493 SLI_CONFIG_SPECIAL mailbox "
10308 "failed with status x%x\n",
10309 rc);
10310 rc = -EIO;
10311 }
10312 mempool_free(mboxq, phba->mbox_mem_pool);
10313 break;
10314 case LPFC_SLI_INTF_IF_TYPE_6:
10315 case LPFC_SLI_INTF_IF_TYPE_2:
10316 case LPFC_SLI_INTF_IF_TYPE_1:
10317 default:
10318 break;
10319 }
10320 return rc;
10321 }
10322
10323
10324
10325
10326
10327
10328
10329
10330
10331
10332
10333
10334
10335
10336 static int
10337 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10338 {
10339
10340
10341
10342
10343
10344 if (phba->nvmet_support) {
10345 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10346 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10347 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10348 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10349 }
10350
10351 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10352 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10353 phba->cfg_hdw_queue, phba->cfg_irq_chann,
10354 phba->cfg_nvmet_mrq);
10355
10356
10357 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10358 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10359
10360
10361 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10362 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10363 return 0;
10364 }
10365
10366 static int
10367 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10368 {
10369 struct lpfc_queue *qdesc;
10370 u32 wqesize;
10371 int cpu;
10372
10373 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10374
10375 if (phba->enab_exp_wqcq_pages)
10376
10377 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10378 phba->sli4_hba.cq_esize,
10379 LPFC_CQE_EXP_COUNT, cpu);
10380
10381 else
10382 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10383 phba->sli4_hba.cq_esize,
10384 phba->sli4_hba.cq_ecount, cpu);
10385 if (!qdesc) {
10386 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10387 "0499 Failed allocate fast-path IO CQ (%d)\n",
10388 idx);
10389 return 1;
10390 }
10391 qdesc->qe_valid = 1;
10392 qdesc->hdwq = idx;
10393 qdesc->chann = cpu;
10394 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10395
10396
10397 if (phba->enab_exp_wqcq_pages) {
10398
10399 wqesize = (phba->fcp_embed_io) ?
10400 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10401 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10402 wqesize,
10403 LPFC_WQE_EXP_COUNT, cpu);
10404 } else
10405 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10406 phba->sli4_hba.wq_esize,
10407 phba->sli4_hba.wq_ecount, cpu);
10408
10409 if (!qdesc) {
10410 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10411 "0503 Failed allocate fast-path IO WQ (%d)\n",
10412 idx);
10413 return 1;
10414 }
10415 qdesc->hdwq = idx;
10416 qdesc->chann = cpu;
10417 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10418 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10419 return 0;
10420 }
10421
10422
10423
10424
10425
10426
10427
10428
10429
10430
10431
10432
10433
10434
10435
10436 int
10437 lpfc_sli4_queue_create(struct lpfc_hba *phba)
10438 {
10439 struct lpfc_queue *qdesc;
10440 int idx, cpu, eqcpu;
10441 struct lpfc_sli4_hdw_queue *qp;
10442 struct lpfc_vector_map_info *cpup;
10443 struct lpfc_vector_map_info *eqcpup;
10444 struct lpfc_eq_intr_info *eqi;
10445
10446
10447
10448
10449
10450 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10451 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10452 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10453 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10454 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10455 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10456 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10457 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10458 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10459 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10460
10461 if (!phba->sli4_hba.hdwq) {
10462 phba->sli4_hba.hdwq = kcalloc(
10463 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10464 GFP_KERNEL);
10465 if (!phba->sli4_hba.hdwq) {
10466 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10467 "6427 Failed allocate memory for "
10468 "fast-path Hardware Queue array\n");
10469 goto out_error;
10470 }
10471
10472 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10473 qp = &phba->sli4_hba.hdwq[idx];
10474 spin_lock_init(&qp->io_buf_list_get_lock);
10475 spin_lock_init(&qp->io_buf_list_put_lock);
10476 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10477 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10478 qp->get_io_bufs = 0;
10479 qp->put_io_bufs = 0;
10480 qp->total_io_bufs = 0;
10481 spin_lock_init(&qp->abts_io_buf_list_lock);
10482 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10483 qp->abts_scsi_io_bufs = 0;
10484 qp->abts_nvme_io_bufs = 0;
10485 INIT_LIST_HEAD(&qp->sgl_list);
10486 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10487 spin_lock_init(&qp->hdwq_lock);
10488 }
10489 }
10490
10491 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10492 if (phba->nvmet_support) {
10493 phba->sli4_hba.nvmet_cqset = kcalloc(
10494 phba->cfg_nvmet_mrq,
10495 sizeof(struct lpfc_queue *),
10496 GFP_KERNEL);
10497 if (!phba->sli4_hba.nvmet_cqset) {
10498 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10499 "3121 Fail allocate memory for "
10500 "fast-path CQ set array\n");
10501 goto out_error;
10502 }
10503 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10504 phba->cfg_nvmet_mrq,
10505 sizeof(struct lpfc_queue *),
10506 GFP_KERNEL);
10507 if (!phba->sli4_hba.nvmet_mrq_hdr) {
10508 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10509 "3122 Fail allocate memory for "
10510 "fast-path RQ set hdr array\n");
10511 goto out_error;
10512 }
10513 phba->sli4_hba.nvmet_mrq_data = kcalloc(
10514 phba->cfg_nvmet_mrq,
10515 sizeof(struct lpfc_queue *),
10516 GFP_KERNEL);
10517 if (!phba->sli4_hba.nvmet_mrq_data) {
10518 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10519 "3124 Fail allocate memory for "
10520 "fast-path RQ set data array\n");
10521 goto out_error;
10522 }
10523 }
10524 }
10525
10526 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10527
10528
10529 for_each_present_cpu(cpu) {
10530
10531
10532
10533
10534 cpup = &phba->sli4_hba.cpu_map[cpu];
10535 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10536 continue;
10537
10538
10539 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10540
10541
10542 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10543 phba->sli4_hba.eq_esize,
10544 phba->sli4_hba.eq_ecount, cpu);
10545 if (!qdesc) {
10546 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10547 "0497 Failed allocate EQ (%d)\n",
10548 cpup->hdwq);
10549 goto out_error;
10550 }
10551 qdesc->qe_valid = 1;
10552 qdesc->hdwq = cpup->hdwq;
10553 qdesc->chann = cpu;
10554 qdesc->last_cpu = qdesc->chann;
10555
10556
10557 qp->hba_eq = qdesc;
10558
10559 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10560 list_add(&qdesc->cpu_list, &eqi->list);
10561 }
10562
10563
10564
10565
10566 for_each_present_cpu(cpu) {
10567 cpup = &phba->sli4_hba.cpu_map[cpu];
10568
10569
10570 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10571 continue;
10572
10573
10574 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10575 if (qp->hba_eq)
10576 continue;
10577
10578
10579 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10580 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10581 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10582 }
10583
10584
10585 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10586 if (lpfc_alloc_io_wq_cq(phba, idx))
10587 goto out_error;
10588 }
10589
10590 if (phba->nvmet_support) {
10591 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10592 cpu = lpfc_find_cpu_handle(phba, idx,
10593 LPFC_FIND_BY_HDWQ);
10594 qdesc = lpfc_sli4_queue_alloc(phba,
10595 LPFC_DEFAULT_PAGE_SIZE,
10596 phba->sli4_hba.cq_esize,
10597 phba->sli4_hba.cq_ecount,
10598 cpu);
10599 if (!qdesc) {
10600 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10601 "3142 Failed allocate NVME "
10602 "CQ Set (%d)\n", idx);
10603 goto out_error;
10604 }
10605 qdesc->qe_valid = 1;
10606 qdesc->hdwq = idx;
10607 qdesc->chann = cpu;
10608 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10609 }
10610 }
10611
10612
10613
10614
10615
10616 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10617
10618 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10619 phba->sli4_hba.cq_esize,
10620 phba->sli4_hba.cq_ecount, cpu);
10621 if (!qdesc) {
10622 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10623 "0500 Failed allocate slow-path mailbox CQ\n");
10624 goto out_error;
10625 }
10626 qdesc->qe_valid = 1;
10627 phba->sli4_hba.mbx_cq = qdesc;
10628
10629
10630 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10631 phba->sli4_hba.cq_esize,
10632 phba->sli4_hba.cq_ecount, cpu);
10633 if (!qdesc) {
10634 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10635 "0501 Failed allocate slow-path ELS CQ\n");
10636 goto out_error;
10637 }
10638 qdesc->qe_valid = 1;
10639 qdesc->chann = cpu;
10640 phba->sli4_hba.els_cq = qdesc;
10641
10642
10643
10644
10645
10646
10647
10648
10649 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10650 phba->sli4_hba.mq_esize,
10651 phba->sli4_hba.mq_ecount, cpu);
10652 if (!qdesc) {
10653 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10654 "0505 Failed allocate slow-path MQ\n");
10655 goto out_error;
10656 }
10657 qdesc->chann = cpu;
10658 phba->sli4_hba.mbx_wq = qdesc;
10659
10660
10661
10662
10663
10664
10665 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10666 phba->sli4_hba.wq_esize,
10667 phba->sli4_hba.wq_ecount, cpu);
10668 if (!qdesc) {
10669 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10670 "0504 Failed allocate slow-path ELS WQ\n");
10671 goto out_error;
10672 }
10673 qdesc->chann = cpu;
10674 phba->sli4_hba.els_wq = qdesc;
10675 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10676
10677 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10678
10679 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10680 phba->sli4_hba.cq_esize,
10681 phba->sli4_hba.cq_ecount, cpu);
10682 if (!qdesc) {
10683 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10684 "6079 Failed allocate NVME LS CQ\n");
10685 goto out_error;
10686 }
10687 qdesc->chann = cpu;
10688 qdesc->qe_valid = 1;
10689 phba->sli4_hba.nvmels_cq = qdesc;
10690
10691
10692 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10693 phba->sli4_hba.wq_esize,
10694 phba->sli4_hba.wq_ecount, cpu);
10695 if (!qdesc) {
10696 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10697 "6080 Failed allocate NVME LS WQ\n");
10698 goto out_error;
10699 }
10700 qdesc->chann = cpu;
10701 phba->sli4_hba.nvmels_wq = qdesc;
10702 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10703 }
10704
10705
10706
10707
10708
10709
10710 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10711 phba->sli4_hba.rq_esize,
10712 phba->sli4_hba.rq_ecount, cpu);
10713 if (!qdesc) {
10714 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10715 "0506 Failed allocate receive HRQ\n");
10716 goto out_error;
10717 }
10718 phba->sli4_hba.hdr_rq = qdesc;
10719
10720
10721 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10722 phba->sli4_hba.rq_esize,
10723 phba->sli4_hba.rq_ecount, cpu);
10724 if (!qdesc) {
10725 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10726 "0507 Failed allocate receive DRQ\n");
10727 goto out_error;
10728 }
10729 phba->sli4_hba.dat_rq = qdesc;
10730
10731 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10732 phba->nvmet_support) {
10733 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10734 cpu = lpfc_find_cpu_handle(phba, idx,
10735 LPFC_FIND_BY_HDWQ);
10736
10737 qdesc = lpfc_sli4_queue_alloc(phba,
10738 LPFC_DEFAULT_PAGE_SIZE,
10739 phba->sli4_hba.rq_esize,
10740 LPFC_NVMET_RQE_DEF_COUNT,
10741 cpu);
10742 if (!qdesc) {
10743 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10744 "3146 Failed allocate "
10745 "receive HRQ\n");
10746 goto out_error;
10747 }
10748 qdesc->hdwq = idx;
10749 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10750
10751
10752 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10753 GFP_KERNEL,
10754 cpu_to_node(cpu));
10755 if (qdesc->rqbp == NULL) {
10756 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10757 "6131 Failed allocate "
10758 "Header RQBP\n");
10759 goto out_error;
10760 }
10761
10762
10763 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10764
10765
10766 qdesc = lpfc_sli4_queue_alloc(phba,
10767 LPFC_DEFAULT_PAGE_SIZE,
10768 phba->sli4_hba.rq_esize,
10769 LPFC_NVMET_RQE_DEF_COUNT,
10770 cpu);
10771 if (!qdesc) {
10772 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10773 "3156 Failed allocate "
10774 "receive DRQ\n");
10775 goto out_error;
10776 }
10777 qdesc->hdwq = idx;
10778 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10779 }
10780 }
10781
10782
10783 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10784 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10785 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10786 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10787 }
10788 }
10789
10790
10791 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10792 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10793 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10794 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10795 }
10796 }
10797
10798 return 0;
10799
10800 out_error:
10801 lpfc_sli4_queue_destroy(phba);
10802 return -ENOMEM;
10803 }
10804
10805 static inline void
10806 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
10807 {
10808 if (*qp != NULL) {
10809 lpfc_sli4_queue_free(*qp);
10810 *qp = NULL;
10811 }
10812 }
10813
10814 static inline void
10815 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10816 {
10817 int idx;
10818
10819 if (*qs == NULL)
10820 return;
10821
10822 for (idx = 0; idx < max; idx++)
10823 __lpfc_sli4_release_queue(&(*qs)[idx]);
10824
10825 kfree(*qs);
10826 *qs = NULL;
10827 }
10828
10829 static inline void
10830 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10831 {
10832 struct lpfc_sli4_hdw_queue *hdwq;
10833 struct lpfc_queue *eq;
10834 uint32_t idx;
10835
10836 hdwq = phba->sli4_hba.hdwq;
10837
10838
10839 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10840
10841 lpfc_sli4_queue_free(hdwq[idx].io_cq);
10842 lpfc_sli4_queue_free(hdwq[idx].io_wq);
10843 hdwq[idx].hba_eq = NULL;
10844 hdwq[idx].io_cq = NULL;
10845 hdwq[idx].io_wq = NULL;
10846 if (phba->cfg_xpsgl && !phba->nvmet_support)
10847 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10848 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10849 }
10850
10851 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10852
10853 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10854 lpfc_sli4_queue_free(eq);
10855 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10856 }
10857 }
10858
10859
10860
10861
10862
10863
10864
10865
10866
10867
10868
10869
10870
10871 void
10872 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10873 {
10874
10875
10876
10877
10878
10879 spin_lock_irq(&phba->hbalock);
10880 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10881 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10882 spin_unlock_irq(&phba->hbalock);
10883 msleep(20);
10884 spin_lock_irq(&phba->hbalock);
10885 }
10886 spin_unlock_irq(&phba->hbalock);
10887
10888 lpfc_sli4_cleanup_poll_list(phba);
10889
10890
10891 if (phba->sli4_hba.hdwq)
10892 lpfc_sli4_release_hdwq(phba);
10893
10894 if (phba->nvmet_support) {
10895 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10896 phba->cfg_nvmet_mrq);
10897
10898 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10899 phba->cfg_nvmet_mrq);
10900 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10901 phba->cfg_nvmet_mrq);
10902 }
10903
10904
10905 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10906
10907
10908 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10909
10910
10911 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10912
10913
10914 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10915 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10916
10917
10918 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10919
10920
10921 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10922
10923
10924 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10925
10926
10927 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10928
10929
10930 spin_lock_irq(&phba->hbalock);
10931 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10932 spin_unlock_irq(&phba->hbalock);
10933 }
10934
10935 int
10936 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10937 {
10938 struct lpfc_rqb *rqbp;
10939 struct lpfc_dmabuf *h_buf;
10940 struct rqb_dmabuf *rqb_buffer;
10941
10942 rqbp = rq->rqbp;
10943 while (!list_empty(&rqbp->rqb_buffer_list)) {
10944 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10945 struct lpfc_dmabuf, list);
10946
10947 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
10948 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
10949 rqbp->buffer_count--;
10950 }
10951 return 1;
10952 }
10953
10954 static int
10955 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10956 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
10957 int qidx, uint32_t qtype)
10958 {
10959 struct lpfc_sli_ring *pring;
10960 int rc;
10961
10962 if (!eq || !cq || !wq) {
10963 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10964 "6085 Fast-path %s (%d) not allocated\n",
10965 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
10966 return -ENOMEM;
10967 }
10968
10969
10970 rc = lpfc_cq_create(phba, cq, eq,
10971 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
10972 if (rc) {
10973 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10974 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
10975 qidx, (uint32_t)rc);
10976 return rc;
10977 }
10978
10979 if (qtype != LPFC_MBOX) {
10980
10981 if (cq_map)
10982 *cq_map = cq->queue_id;
10983
10984 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10985 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10986 qidx, cq->queue_id, qidx, eq->queue_id);
10987
10988
10989 rc = lpfc_wq_create(phba, wq, cq, qtype);
10990 if (rc) {
10991 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10992 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
10993 qidx, (uint32_t)rc);
10994
10995 return rc;
10996 }
10997
10998
10999 pring = wq->pring;
11000 pring->sli.sli4.wqp = (void *)wq;
11001 cq->pring = pring;
11002
11003 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11004 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
11005 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
11006 } else {
11007 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
11008 if (rc) {
11009 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11010 "0539 Failed setup of slow-path MQ: "
11011 "rc = 0x%x\n", rc);
11012
11013 return rc;
11014 }
11015
11016 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11017 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
11018 phba->sli4_hba.mbx_wq->queue_id,
11019 phba->sli4_hba.mbx_cq->queue_id);
11020 }
11021
11022 return 0;
11023 }
11024
11025
11026
11027
11028
11029
11030
11031
11032 static void
11033 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
11034 {
11035 struct lpfc_queue *eq, *childq;
11036 int qidx;
11037
11038 memset(phba->sli4_hba.cq_lookup, 0,
11039 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
11040
11041 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11042
11043 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11044 if (!eq)
11045 continue;
11046
11047 list_for_each_entry(childq, &eq->child_list, list) {
11048 if (childq->queue_id > phba->sli4_hba.cq_max)
11049 continue;
11050 if (childq->subtype == LPFC_IO)
11051 phba->sli4_hba.cq_lookup[childq->queue_id] =
11052 childq;
11053 }
11054 }
11055 }
11056
11057
11058
11059
11060
11061
11062
11063
11064
11065
11066
11067
11068
11069 int
11070 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
11071 {
11072 uint32_t shdr_status, shdr_add_status;
11073 union lpfc_sli4_cfg_shdr *shdr;
11074 struct lpfc_vector_map_info *cpup;
11075 struct lpfc_sli4_hdw_queue *qp;
11076 LPFC_MBOXQ_t *mboxq;
11077 int qidx, cpu;
11078 uint32_t length, usdelay;
11079 int rc = -ENOMEM;
11080
11081
11082 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11083 if (!mboxq) {
11084 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11085 "3249 Unable to allocate memory for "
11086 "QUERY_FW_CFG mailbox command\n");
11087 return -ENOMEM;
11088 }
11089 length = (sizeof(struct lpfc_mbx_query_fw_config) -
11090 sizeof(struct lpfc_sli4_cfg_mhdr));
11091 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11092 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
11093 length, LPFC_SLI4_MBX_EMBED);
11094
11095 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11096
11097 shdr = (union lpfc_sli4_cfg_shdr *)
11098 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11099 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11100 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11101 if (shdr_status || shdr_add_status || rc) {
11102 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11103 "3250 QUERY_FW_CFG mailbox failed with status "
11104 "x%x add_status x%x, mbx status x%x\n",
11105 shdr_status, shdr_add_status, rc);
11106 mempool_free(mboxq, phba->mbox_mem_pool);
11107 rc = -ENXIO;
11108 goto out_error;
11109 }
11110
11111 phba->sli4_hba.fw_func_mode =
11112 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
11113 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
11114 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
11115 phba->sli4_hba.physical_port =
11116 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
11117 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11118 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
11119 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
11120 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
11121
11122 mempool_free(mboxq, phba->mbox_mem_pool);
11123
11124
11125
11126
11127 qp = phba->sli4_hba.hdwq;
11128
11129
11130 if (!qp) {
11131 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11132 "3147 Fast-path EQs not allocated\n");
11133 rc = -ENOMEM;
11134 goto out_error;
11135 }
11136
11137
11138 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11139
11140 for_each_present_cpu(cpu) {
11141 cpup = &phba->sli4_hba.cpu_map[cpu];
11142
11143
11144
11145
11146 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11147 continue;
11148 if (qidx != cpup->eq)
11149 continue;
11150
11151
11152 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
11153 phba->cfg_fcp_imax);
11154 if (rc) {
11155 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11156 "0523 Failed setup of fast-path"
11157 " EQ (%d), rc = 0x%x\n",
11158 cpup->eq, (uint32_t)rc);
11159 goto out_destroy;
11160 }
11161
11162
11163 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
11164 qp[cpup->hdwq].hba_eq;
11165
11166 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11167 "2584 HBA EQ setup: queue[%d]-id=%d\n",
11168 cpup->eq,
11169 qp[cpup->hdwq].hba_eq->queue_id);
11170 }
11171 }
11172
11173
11174 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11175 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
11176 cpup = &phba->sli4_hba.cpu_map[cpu];
11177
11178
11179 rc = lpfc_create_wq_cq(phba,
11180 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
11181 qp[qidx].io_cq,
11182 qp[qidx].io_wq,
11183 &phba->sli4_hba.hdwq[qidx].io_cq_map,
11184 qidx,
11185 LPFC_IO);
11186 if (rc) {
11187 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11188 "0535 Failed to setup fastpath "
11189 "IO WQ/CQ (%d), rc = 0x%x\n",
11190 qidx, (uint32_t)rc);
11191 goto out_destroy;
11192 }
11193 }
11194
11195
11196
11197
11198
11199
11200
11201 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
11202 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11203 "0528 %s not allocated\n",
11204 phba->sli4_hba.mbx_cq ?
11205 "Mailbox WQ" : "Mailbox CQ");
11206 rc = -ENOMEM;
11207 goto out_destroy;
11208 }
11209
11210 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11211 phba->sli4_hba.mbx_cq,
11212 phba->sli4_hba.mbx_wq,
11213 NULL, 0, LPFC_MBOX);
11214 if (rc) {
11215 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11216 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
11217 (uint32_t)rc);
11218 goto out_destroy;
11219 }
11220 if (phba->nvmet_support) {
11221 if (!phba->sli4_hba.nvmet_cqset) {
11222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11223 "3165 Fast-path NVME CQ Set "
11224 "array not allocated\n");
11225 rc = -ENOMEM;
11226 goto out_destroy;
11227 }
11228 if (phba->cfg_nvmet_mrq > 1) {
11229 rc = lpfc_cq_create_set(phba,
11230 phba->sli4_hba.nvmet_cqset,
11231 qp,
11232 LPFC_WCQ, LPFC_NVMET);
11233 if (rc) {
11234 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11235 "3164 Failed setup of NVME CQ "
11236 "Set, rc = 0x%x\n",
11237 (uint32_t)rc);
11238 goto out_destroy;
11239 }
11240 } else {
11241
11242 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11243 qp[0].hba_eq,
11244 LPFC_WCQ, LPFC_NVMET);
11245 if (rc) {
11246 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11247 "6089 Failed setup NVMET CQ: "
11248 "rc = 0x%x\n", (uint32_t)rc);
11249 goto out_destroy;
11250 }
11251 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11252
11253 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11254 "6090 NVMET CQ setup: cq-id=%d, "
11255 "parent eq-id=%d\n",
11256 phba->sli4_hba.nvmet_cqset[0]->queue_id,
11257 qp[0].hba_eq->queue_id);
11258 }
11259 }
11260
11261
11262 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11263 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11264 "0530 ELS %s not allocated\n",
11265 phba->sli4_hba.els_cq ? "WQ" : "CQ");
11266 rc = -ENOMEM;
11267 goto out_destroy;
11268 }
11269 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11270 phba->sli4_hba.els_cq,
11271 phba->sli4_hba.els_wq,
11272 NULL, 0, LPFC_ELS);
11273 if (rc) {
11274 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11275 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11276 (uint32_t)rc);
11277 goto out_destroy;
11278 }
11279 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11280 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11281 phba->sli4_hba.els_wq->queue_id,
11282 phba->sli4_hba.els_cq->queue_id);
11283
11284 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11285
11286 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11287 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11288 "6091 LS %s not allocated\n",
11289 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11290 rc = -ENOMEM;
11291 goto out_destroy;
11292 }
11293 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11294 phba->sli4_hba.nvmels_cq,
11295 phba->sli4_hba.nvmels_wq,
11296 NULL, 0, LPFC_NVME_LS);
11297 if (rc) {
11298 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11299 "0526 Failed setup of NVVME LS WQ/CQ: "
11300 "rc = 0x%x\n", (uint32_t)rc);
11301 goto out_destroy;
11302 }
11303
11304 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11305 "6096 ELS WQ setup: wq-id=%d, "
11306 "parent cq-id=%d\n",
11307 phba->sli4_hba.nvmels_wq->queue_id,
11308 phba->sli4_hba.nvmels_cq->queue_id);
11309 }
11310
11311
11312
11313
11314 if (phba->nvmet_support) {
11315 if ((!phba->sli4_hba.nvmet_cqset) ||
11316 (!phba->sli4_hba.nvmet_mrq_hdr) ||
11317 (!phba->sli4_hba.nvmet_mrq_data)) {
11318 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11319 "6130 MRQ CQ Queues not "
11320 "allocated\n");
11321 rc = -ENOMEM;
11322 goto out_destroy;
11323 }
11324 if (phba->cfg_nvmet_mrq > 1) {
11325 rc = lpfc_mrq_create(phba,
11326 phba->sli4_hba.nvmet_mrq_hdr,
11327 phba->sli4_hba.nvmet_mrq_data,
11328 phba->sli4_hba.nvmet_cqset,
11329 LPFC_NVMET);
11330 if (rc) {
11331 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11332 "6098 Failed setup of NVMET "
11333 "MRQ: rc = 0x%x\n",
11334 (uint32_t)rc);
11335 goto out_destroy;
11336 }
11337
11338 } else {
11339 rc = lpfc_rq_create(phba,
11340 phba->sli4_hba.nvmet_mrq_hdr[0],
11341 phba->sli4_hba.nvmet_mrq_data[0],
11342 phba->sli4_hba.nvmet_cqset[0],
11343 LPFC_NVMET);
11344 if (rc) {
11345 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11346 "6057 Failed setup of NVMET "
11347 "Receive Queue: rc = 0x%x\n",
11348 (uint32_t)rc);
11349 goto out_destroy;
11350 }
11351
11352 lpfc_printf_log(
11353 phba, KERN_INFO, LOG_INIT,
11354 "6099 NVMET RQ setup: hdr-rq-id=%d, "
11355 "dat-rq-id=%d parent cq-id=%d\n",
11356 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11357 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11358 phba->sli4_hba.nvmet_cqset[0]->queue_id);
11359
11360 }
11361 }
11362
11363 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11364 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11365 "0540 Receive Queue not allocated\n");
11366 rc = -ENOMEM;
11367 goto out_destroy;
11368 }
11369
11370 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11371 phba->sli4_hba.els_cq, LPFC_USOL);
11372 if (rc) {
11373 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11374 "0541 Failed setup of Receive Queue: "
11375 "rc = 0x%x\n", (uint32_t)rc);
11376 goto out_destroy;
11377 }
11378
11379 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11380 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11381 "parent cq-id=%d\n",
11382 phba->sli4_hba.hdr_rq->queue_id,
11383 phba->sli4_hba.dat_rq->queue_id,
11384 phba->sli4_hba.els_cq->queue_id);
11385
11386 if (phba->cfg_fcp_imax)
11387 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11388 else
11389 usdelay = 0;
11390
11391 for (qidx = 0; qidx < phba->cfg_irq_chann;
11392 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11393 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11394 usdelay);
11395
11396 if (phba->sli4_hba.cq_max) {
11397 kfree(phba->sli4_hba.cq_lookup);
11398 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11399 sizeof(struct lpfc_queue *), GFP_KERNEL);
11400 if (!phba->sli4_hba.cq_lookup) {
11401 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11402 "0549 Failed setup of CQ Lookup table: "
11403 "size 0x%x\n", phba->sli4_hba.cq_max);
11404 rc = -ENOMEM;
11405 goto out_destroy;
11406 }
11407 lpfc_setup_cq_lookup(phba);
11408 }
11409 return 0;
11410
11411 out_destroy:
11412 lpfc_sli4_queue_unset(phba);
11413 out_error:
11414 return rc;
11415 }
11416
11417
11418
11419
11420
11421
11422
11423
11424
11425
11426
11427
11428
11429 void
11430 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11431 {
11432 struct lpfc_sli4_hdw_queue *qp;
11433 struct lpfc_queue *eq;
11434 int qidx;
11435
11436
11437 if (phba->sli4_hba.mbx_wq)
11438 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11439
11440
11441 if (phba->sli4_hba.nvmels_wq)
11442 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11443
11444
11445 if (phba->sli4_hba.els_wq)
11446 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11447
11448
11449 if (phba->sli4_hba.hdr_rq)
11450 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11451 phba->sli4_hba.dat_rq);
11452
11453
11454 if (phba->sli4_hba.mbx_cq)
11455 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11456
11457
11458 if (phba->sli4_hba.els_cq)
11459 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11460
11461
11462 if (phba->sli4_hba.nvmels_cq)
11463 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11464
11465 if (phba->nvmet_support) {
11466
11467 if (phba->sli4_hba.nvmet_mrq_hdr) {
11468 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11469 lpfc_rq_destroy(
11470 phba,
11471 phba->sli4_hba.nvmet_mrq_hdr[qidx],
11472 phba->sli4_hba.nvmet_mrq_data[qidx]);
11473 }
11474
11475
11476 if (phba->sli4_hba.nvmet_cqset) {
11477 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11478 lpfc_cq_destroy(
11479 phba, phba->sli4_hba.nvmet_cqset[qidx]);
11480 }
11481 }
11482
11483
11484 if (phba->sli4_hba.hdwq) {
11485
11486 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11487
11488 qp = &phba->sli4_hba.hdwq[qidx];
11489 lpfc_wq_destroy(phba, qp->io_wq);
11490 lpfc_cq_destroy(phba, qp->io_cq);
11491 }
11492
11493 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11494
11495 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11496 lpfc_eq_destroy(phba, eq);
11497 }
11498 }
11499
11500 kfree(phba->sli4_hba.cq_lookup);
11501 phba->sli4_hba.cq_lookup = NULL;
11502 phba->sli4_hba.cq_max = 0;
11503 }
11504
11505
11506
11507
11508
11509
11510
11511
11512
11513
11514
11515
11516
11517
11518
11519
11520
11521 static int
11522 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11523 {
11524 struct lpfc_cq_event *cq_event;
11525 int i;
11526
11527 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11528 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
11529 if (!cq_event)
11530 goto out_pool_create_fail;
11531 list_add_tail(&cq_event->list,
11532 &phba->sli4_hba.sp_cqe_event_pool);
11533 }
11534 return 0;
11535
11536 out_pool_create_fail:
11537 lpfc_sli4_cq_event_pool_destroy(phba);
11538 return -ENOMEM;
11539 }
11540
11541
11542
11543
11544
11545
11546
11547
11548
11549
11550
11551 static void
11552 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11553 {
11554 struct lpfc_cq_event *cq_event, *next_cq_event;
11555
11556 list_for_each_entry_safe(cq_event, next_cq_event,
11557 &phba->sli4_hba.sp_cqe_event_pool, list) {
11558 list_del(&cq_event->list);
11559 kfree(cq_event);
11560 }
11561 }
11562
11563
11564
11565
11566
11567
11568
11569
11570
11571
11572
11573 struct lpfc_cq_event *
11574 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11575 {
11576 struct lpfc_cq_event *cq_event = NULL;
11577
11578 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11579 struct lpfc_cq_event, list);
11580 return cq_event;
11581 }
11582
11583
11584
11585
11586
11587
11588
11589
11590
11591
11592
11593 struct lpfc_cq_event *
11594 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11595 {
11596 struct lpfc_cq_event *cq_event;
11597 unsigned long iflags;
11598
11599 spin_lock_irqsave(&phba->hbalock, iflags);
11600 cq_event = __lpfc_sli4_cq_event_alloc(phba);
11601 spin_unlock_irqrestore(&phba->hbalock, iflags);
11602 return cq_event;
11603 }
11604
11605
11606
11607
11608
11609
11610
11611
11612
11613 void
11614 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11615 struct lpfc_cq_event *cq_event)
11616 {
11617 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11618 }
11619
11620
11621
11622
11623
11624
11625
11626
11627
11628 void
11629 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11630 struct lpfc_cq_event *cq_event)
11631 {
11632 unsigned long iflags;
11633 spin_lock_irqsave(&phba->hbalock, iflags);
11634 __lpfc_sli4_cq_event_release(phba, cq_event);
11635 spin_unlock_irqrestore(&phba->hbalock, iflags);
11636 }
11637
11638
11639
11640
11641
11642
11643
11644
11645 static void
11646 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11647 {
11648 LIST_HEAD(cq_event_list);
11649 struct lpfc_cq_event *cq_event;
11650 unsigned long iflags;
11651
11652
11653
11654
11655 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11656 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11657 &cq_event_list);
11658 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11659
11660
11661 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11662 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11663 &cq_event_list);
11664 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11665
11666 while (!list_empty(&cq_event_list)) {
11667 list_remove_head(&cq_event_list, cq_event,
11668 struct lpfc_cq_event, list);
11669 lpfc_sli4_cq_event_release(phba, cq_event);
11670 }
11671 }
11672
11673
11674
11675
11676
11677
11678
11679
11680
11681
11682
11683
11684
11685 int
11686 lpfc_pci_function_reset(struct lpfc_hba *phba)
11687 {
11688 LPFC_MBOXQ_t *mboxq;
11689 uint32_t rc = 0, if_type;
11690 uint32_t shdr_status, shdr_add_status;
11691 uint32_t rdy_chk;
11692 uint32_t port_reset = 0;
11693 union lpfc_sli4_cfg_shdr *shdr;
11694 struct lpfc_register reg_data;
11695 uint16_t devid;
11696
11697 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11698 switch (if_type) {
11699 case LPFC_SLI_INTF_IF_TYPE_0:
11700 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11701 GFP_KERNEL);
11702 if (!mboxq) {
11703 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11704 "0494 Unable to allocate memory for "
11705 "issuing SLI_FUNCTION_RESET mailbox "
11706 "command\n");
11707 return -ENOMEM;
11708 }
11709
11710
11711 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11712 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11713 LPFC_SLI4_MBX_EMBED);
11714 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11715 shdr = (union lpfc_sli4_cfg_shdr *)
11716 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11717 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11718 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11719 &shdr->response);
11720 mempool_free(mboxq, phba->mbox_mem_pool);
11721 if (shdr_status || shdr_add_status || rc) {
11722 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11723 "0495 SLI_FUNCTION_RESET mailbox "
11724 "failed with status x%x add_status x%x,"
11725 " mbx status x%x\n",
11726 shdr_status, shdr_add_status, rc);
11727 rc = -ENXIO;
11728 }
11729 break;
11730 case LPFC_SLI_INTF_IF_TYPE_2:
11731 case LPFC_SLI_INTF_IF_TYPE_6:
11732 wait:
11733
11734
11735
11736
11737
11738 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11739 if (lpfc_readl(phba->sli4_hba.u.if_type2.
11740 STATUSregaddr, ®_data.word0)) {
11741 rc = -ENODEV;
11742 goto out;
11743 }
11744 if (bf_get(lpfc_sliport_status_rdy, ®_data))
11745 break;
11746 msleep(20);
11747 }
11748
11749 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
11750 phba->work_status[0] = readl(
11751 phba->sli4_hba.u.if_type2.ERR1regaddr);
11752 phba->work_status[1] = readl(
11753 phba->sli4_hba.u.if_type2.ERR2regaddr);
11754 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11755 "2890 Port not ready, port status reg "
11756 "0x%x error 1=0x%x, error 2=0x%x\n",
11757 reg_data.word0,
11758 phba->work_status[0],
11759 phba->work_status[1]);
11760 rc = -ENODEV;
11761 goto out;
11762 }
11763
11764 if (bf_get(lpfc_sliport_status_pldv, ®_data))
11765 lpfc_pldv_detect = true;
11766
11767 if (!port_reset) {
11768
11769
11770
11771 reg_data.word0 = 0;
11772 bf_set(lpfc_sliport_ctrl_end, ®_data,
11773 LPFC_SLIPORT_LITTLE_ENDIAN);
11774 bf_set(lpfc_sliport_ctrl_ip, ®_data,
11775 LPFC_SLIPORT_INIT_PORT);
11776 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11777 CTRLregaddr);
11778
11779 pci_read_config_word(phba->pcidev,
11780 PCI_DEVICE_ID, &devid);
11781
11782 port_reset = 1;
11783 msleep(20);
11784 goto wait;
11785 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
11786 rc = -ENODEV;
11787 goto out;
11788 }
11789 break;
11790
11791 case LPFC_SLI_INTF_IF_TYPE_1:
11792 default:
11793 break;
11794 }
11795
11796 out:
11797
11798 if (rc) {
11799 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11800 "3317 HBA not functional: IP Reset Failed "
11801 "try: echo fw_reset > board_mode\n");
11802 rc = -ENODEV;
11803 }
11804
11805 return rc;
11806 }
11807
11808
11809
11810
11811
11812
11813
11814
11815
11816
11817
11818
11819 static int
11820 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11821 {
11822 struct pci_dev *pdev = phba->pcidev;
11823 unsigned long bar0map_len, bar1map_len, bar2map_len;
11824 int error;
11825 uint32_t if_type;
11826
11827 if (!pdev)
11828 return -ENODEV;
11829
11830
11831 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11832 if (error)
11833 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11834 if (error)
11835 return error;
11836
11837
11838
11839
11840
11841 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11842 &phba->sli4_hba.sli_intf.word0)) {
11843 return -ENODEV;
11844 }
11845
11846
11847 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11848 LPFC_SLI_INTF_VALID) {
11849 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11850 "2894 SLI_INTF reg contents invalid "
11851 "sli_intf reg 0x%x\n",
11852 phba->sli4_hba.sli_intf.word0);
11853 return -ENODEV;
11854 }
11855
11856 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11857
11858
11859
11860
11861
11862
11863 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11864 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11865 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11866
11867
11868
11869
11870
11871 phba->sli4_hba.conf_regs_memmap_p =
11872 ioremap(phba->pci_bar0_map, bar0map_len);
11873 if (!phba->sli4_hba.conf_regs_memmap_p) {
11874 dev_printk(KERN_ERR, &pdev->dev,
11875 "ioremap failed for SLI4 PCI config "
11876 "registers.\n");
11877 return -ENODEV;
11878 }
11879 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11880
11881 lpfc_sli4_bar0_register_memmap(phba, if_type);
11882 } else {
11883 phba->pci_bar0_map = pci_resource_start(pdev, 1);
11884 bar0map_len = pci_resource_len(pdev, 1);
11885 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11886 dev_printk(KERN_ERR, &pdev->dev,
11887 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11888 return -ENODEV;
11889 }
11890 phba->sli4_hba.conf_regs_memmap_p =
11891 ioremap(phba->pci_bar0_map, bar0map_len);
11892 if (!phba->sli4_hba.conf_regs_memmap_p) {
11893 dev_printk(KERN_ERR, &pdev->dev,
11894 "ioremap failed for SLI4 PCI config "
11895 "registers.\n");
11896 return -ENODEV;
11897 }
11898 lpfc_sli4_bar0_register_memmap(phba, if_type);
11899 }
11900
11901 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11902 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11903
11904
11905
11906
11907 phba->pci_bar1_map = pci_resource_start(pdev,
11908 PCI_64BIT_BAR2);
11909 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11910 phba->sli4_hba.ctrl_regs_memmap_p =
11911 ioremap(phba->pci_bar1_map,
11912 bar1map_len);
11913 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11914 dev_err(&pdev->dev,
11915 "ioremap failed for SLI4 HBA "
11916 "control registers.\n");
11917 error = -ENOMEM;
11918 goto out_iounmap_conf;
11919 }
11920 phba->pci_bar2_memmap_p =
11921 phba->sli4_hba.ctrl_regs_memmap_p;
11922 lpfc_sli4_bar1_register_memmap(phba, if_type);
11923 } else {
11924 error = -ENOMEM;
11925 goto out_iounmap_conf;
11926 }
11927 }
11928
11929 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11930 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11931
11932
11933
11934
11935 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11936 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11937 phba->sli4_hba.drbl_regs_memmap_p =
11938 ioremap(phba->pci_bar1_map, bar1map_len);
11939 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11940 dev_err(&pdev->dev,
11941 "ioremap failed for SLI4 HBA doorbell registers.\n");
11942 error = -ENOMEM;
11943 goto out_iounmap_conf;
11944 }
11945 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11946 lpfc_sli4_bar1_register_memmap(phba, if_type);
11947 }
11948
11949 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11950 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11951
11952
11953
11954
11955 phba->pci_bar2_map = pci_resource_start(pdev,
11956 PCI_64BIT_BAR4);
11957 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11958 phba->sli4_hba.drbl_regs_memmap_p =
11959 ioremap(phba->pci_bar2_map,
11960 bar2map_len);
11961 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11962 dev_err(&pdev->dev,
11963 "ioremap failed for SLI4 HBA"
11964 " doorbell registers.\n");
11965 error = -ENOMEM;
11966 goto out_iounmap_ctrl;
11967 }
11968 phba->pci_bar4_memmap_p =
11969 phba->sli4_hba.drbl_regs_memmap_p;
11970 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11971 if (error)
11972 goto out_iounmap_all;
11973 } else {
11974 error = -ENOMEM;
11975 goto out_iounmap_all;
11976 }
11977 }
11978
11979 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
11980 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11981
11982
11983
11984
11985 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11986 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11987 phba->sli4_hba.dpp_regs_memmap_p =
11988 ioremap(phba->pci_bar2_map, bar2map_len);
11989 if (!phba->sli4_hba.dpp_regs_memmap_p) {
11990 dev_err(&pdev->dev,
11991 "ioremap failed for SLI4 HBA dpp registers.\n");
11992 error = -ENOMEM;
11993 goto out_iounmap_ctrl;
11994 }
11995 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
11996 }
11997
11998
11999 switch (if_type) {
12000 case LPFC_SLI_INTF_IF_TYPE_0:
12001 case LPFC_SLI_INTF_IF_TYPE_2:
12002 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
12003 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
12004 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
12005 break;
12006 case LPFC_SLI_INTF_IF_TYPE_6:
12007 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
12008 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
12009 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
12010 break;
12011 default:
12012 break;
12013 }
12014
12015 return 0;
12016
12017 out_iounmap_all:
12018 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12019 out_iounmap_ctrl:
12020 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12021 out_iounmap_conf:
12022 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12023
12024 return error;
12025 }
12026
12027
12028
12029
12030
12031
12032
12033
12034 static void
12035 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
12036 {
12037 uint32_t if_type;
12038 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12039
12040 switch (if_type) {
12041 case LPFC_SLI_INTF_IF_TYPE_0:
12042 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12043 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12044 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12045 break;
12046 case LPFC_SLI_INTF_IF_TYPE_2:
12047 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12048 break;
12049 case LPFC_SLI_INTF_IF_TYPE_6:
12050 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12051 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12052 if (phba->sli4_hba.dpp_regs_memmap_p)
12053 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
12054 break;
12055 case LPFC_SLI_INTF_IF_TYPE_1:
12056 default:
12057 dev_printk(KERN_ERR, &phba->pcidev->dev,
12058 "FATAL - unsupported SLI4 interface type - %d\n",
12059 if_type);
12060 break;
12061 }
12062 }
12063
12064
12065
12066
12067
12068
12069
12070
12071
12072
12073
12074
12075 static int
12076 lpfc_sli_enable_msix(struct lpfc_hba *phba)
12077 {
12078 int rc;
12079 LPFC_MBOXQ_t *pmb;
12080
12081
12082 rc = pci_alloc_irq_vectors(phba->pcidev,
12083 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
12084 if (rc < 0) {
12085 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12086 "0420 PCI enable MSI-X failed (%d)\n", rc);
12087 goto vec_fail_out;
12088 }
12089
12090
12091
12092
12093
12094
12095 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
12096 &lpfc_sli_sp_intr_handler, 0,
12097 LPFC_SP_DRIVER_HANDLER_NAME, phba);
12098 if (rc) {
12099 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12100 "0421 MSI-X slow-path request_irq failed "
12101 "(%d)\n", rc);
12102 goto msi_fail_out;
12103 }
12104
12105
12106 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
12107 &lpfc_sli_fp_intr_handler, 0,
12108 LPFC_FP_DRIVER_HANDLER_NAME, phba);
12109
12110 if (rc) {
12111 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12112 "0429 MSI-X fast-path request_irq failed "
12113 "(%d)\n", rc);
12114 goto irq_fail_out;
12115 }
12116
12117
12118
12119
12120 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12121
12122 if (!pmb) {
12123 rc = -ENOMEM;
12124 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12125 "0474 Unable to allocate memory for issuing "
12126 "MBOX_CONFIG_MSI command\n");
12127 goto mem_fail_out;
12128 }
12129 rc = lpfc_config_msi(phba, pmb);
12130 if (rc)
12131 goto mbx_fail_out;
12132 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
12133 if (rc != MBX_SUCCESS) {
12134 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
12135 "0351 Config MSI mailbox command failed, "
12136 "mbxCmd x%x, mbxStatus x%x\n",
12137 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
12138 goto mbx_fail_out;
12139 }
12140
12141
12142 mempool_free(pmb, phba->mbox_mem_pool);
12143 return rc;
12144
12145 mbx_fail_out:
12146
12147 mempool_free(pmb, phba->mbox_mem_pool);
12148
12149 mem_fail_out:
12150
12151 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
12152
12153 irq_fail_out:
12154
12155 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
12156
12157 msi_fail_out:
12158
12159 pci_free_irq_vectors(phba->pcidev);
12160
12161 vec_fail_out:
12162 return rc;
12163 }
12164
12165
12166
12167
12168
12169
12170
12171
12172
12173
12174
12175
12176
12177
12178
12179 static int
12180 lpfc_sli_enable_msi(struct lpfc_hba *phba)
12181 {
12182 int rc;
12183
12184 rc = pci_enable_msi(phba->pcidev);
12185 if (!rc)
12186 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12187 "0012 PCI enable MSI mode success.\n");
12188 else {
12189 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12190 "0471 PCI enable MSI mode failed (%d)\n", rc);
12191 return rc;
12192 }
12193
12194 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12195 0, LPFC_DRIVER_NAME, phba);
12196 if (rc) {
12197 pci_disable_msi(phba->pcidev);
12198 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12199 "0478 MSI request_irq failed (%d)\n", rc);
12200 }
12201 return rc;
12202 }
12203
12204
12205
12206
12207
12208
12209
12210
12211
12212
12213
12214
12215
12216
12217
12218
12219
12220
12221 static uint32_t
12222 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12223 {
12224 uint32_t intr_mode = LPFC_INTR_ERROR;
12225 int retval;
12226
12227
12228 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12229 if (retval)
12230 return intr_mode;
12231 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
12232
12233 if (cfg_mode == 2) {
12234
12235 retval = lpfc_sli_enable_msix(phba);
12236 if (!retval) {
12237
12238 phba->intr_type = MSIX;
12239 intr_mode = 2;
12240 }
12241 }
12242
12243
12244 if (cfg_mode >= 1 && phba->intr_type == NONE) {
12245 retval = lpfc_sli_enable_msi(phba);
12246 if (!retval) {
12247
12248 phba->intr_type = MSI;
12249 intr_mode = 1;
12250 }
12251 }
12252
12253
12254 if (phba->intr_type == NONE) {
12255 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12256 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12257 if (!retval) {
12258
12259 phba->intr_type = INTx;
12260 intr_mode = 0;
12261 }
12262 }
12263 return intr_mode;
12264 }
12265
12266
12267
12268
12269
12270
12271
12272
12273
12274
12275 static void
12276 lpfc_sli_disable_intr(struct lpfc_hba *phba)
12277 {
12278 int nr_irqs, i;
12279
12280 if (phba->intr_type == MSIX)
12281 nr_irqs = LPFC_MSIX_VECTORS;
12282 else
12283 nr_irqs = 1;
12284
12285 for (i = 0; i < nr_irqs; i++)
12286 free_irq(pci_irq_vector(phba->pcidev, i), phba);
12287 pci_free_irq_vectors(phba->pcidev);
12288
12289
12290 phba->intr_type = NONE;
12291 phba->sli.slistat.sli_intr = 0;
12292 }
12293
12294
12295
12296
12297
12298
12299
12300
12301
12302 static uint16_t
12303 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12304 {
12305 struct lpfc_vector_map_info *cpup;
12306 int cpu;
12307
12308
12309 for_each_present_cpu(cpu) {
12310 cpup = &phba->sli4_hba.cpu_map[cpu];
12311
12312
12313
12314
12315
12316 if ((match == LPFC_FIND_BY_EQ) &&
12317 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12318 (cpup->eq == id))
12319 return cpu;
12320
12321
12322 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12323 return cpu;
12324 }
12325 return 0;
12326 }
12327
12328 #ifdef CONFIG_X86
12329
12330
12331
12332
12333
12334
12335
12336 static int
12337 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12338 uint16_t phys_id, uint16_t core_id)
12339 {
12340 struct lpfc_vector_map_info *cpup;
12341 int idx;
12342
12343 for_each_present_cpu(idx) {
12344 cpup = &phba->sli4_hba.cpu_map[idx];
12345
12346 if ((cpup->phys_id == phys_id) &&
12347 (cpup->core_id == core_id) &&
12348 (cpu != idx))
12349 return 1;
12350 }
12351 return 0;
12352 }
12353 #endif
12354
12355
12356
12357
12358
12359
12360
12361
12362
12363
12364 static inline void
12365 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12366 unsigned int cpu)
12367 {
12368 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12369 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12370
12371 cpup->eq = eqidx;
12372 cpup->flag |= flag;
12373
12374 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12375 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12376 cpu, eqhdl->irq, cpup->eq, cpup->flag);
12377 }
12378
12379
12380
12381
12382
12383
12384
12385 static void
12386 lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12387 {
12388 struct lpfc_vector_map_info *cpup;
12389 struct lpfc_eq_intr_info *eqi;
12390 int cpu;
12391
12392 for_each_possible_cpu(cpu) {
12393 cpup = &phba->sli4_hba.cpu_map[cpu];
12394 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12395 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12396 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12397 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12398 cpup->flag = 0;
12399 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12400 INIT_LIST_HEAD(&eqi->list);
12401 eqi->icnt = 0;
12402 }
12403 }
12404
12405
12406
12407
12408
12409
12410
12411 static void
12412 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12413 {
12414 struct lpfc_hba_eq_hdl *eqhdl;
12415 int i;
12416
12417 for (i = 0; i < phba->cfg_irq_chann; i++) {
12418 eqhdl = lpfc_get_eq_hdl(i);
12419 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
12420 eqhdl->phba = phba;
12421 }
12422 }
12423
12424
12425
12426
12427
12428
12429
12430
12431
12432
12433
12434 static void
12435 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12436 {
12437 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12438 int max_phys_id, min_phys_id;
12439 int max_core_id, min_core_id;
12440 struct lpfc_vector_map_info *cpup;
12441 struct lpfc_vector_map_info *new_cpup;
12442 #ifdef CONFIG_X86
12443 struct cpuinfo_x86 *cpuinfo;
12444 #endif
12445 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12446 struct lpfc_hdwq_stat *c_stat;
12447 #endif
12448
12449 max_phys_id = 0;
12450 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12451 max_core_id = 0;
12452 min_core_id = LPFC_VECTOR_MAP_EMPTY;
12453
12454
12455 for_each_present_cpu(cpu) {
12456 cpup = &phba->sli4_hba.cpu_map[cpu];
12457 #ifdef CONFIG_X86
12458 cpuinfo = &cpu_data(cpu);
12459 cpup->phys_id = cpuinfo->phys_proc_id;
12460 cpup->core_id = cpuinfo->cpu_core_id;
12461 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12462 cpup->flag |= LPFC_CPU_MAP_HYPER;
12463 #else
12464
12465 cpup->phys_id = 0;
12466 cpup->core_id = cpu;
12467 #endif
12468
12469 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12470 "3328 CPU %d physid %d coreid %d flag x%x\n",
12471 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12472
12473 if (cpup->phys_id > max_phys_id)
12474 max_phys_id = cpup->phys_id;
12475 if (cpup->phys_id < min_phys_id)
12476 min_phys_id = cpup->phys_id;
12477
12478 if (cpup->core_id > max_core_id)
12479 max_core_id = cpup->core_id;
12480 if (cpup->core_id < min_core_id)
12481 min_core_id = cpup->core_id;
12482 }
12483
12484
12485
12486
12487
12488
12489 first_cpu = cpumask_first(cpu_present_mask);
12490 start_cpu = first_cpu;
12491
12492 for_each_present_cpu(cpu) {
12493 cpup = &phba->sli4_hba.cpu_map[cpu];
12494
12495
12496 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12497
12498 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12499
12500
12501
12502
12503
12504
12505 new_cpu = start_cpu;
12506 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12507 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12508 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12509 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12510 (new_cpup->phys_id == cpup->phys_id))
12511 goto found_same;
12512 new_cpu = cpumask_next(
12513 new_cpu, cpu_present_mask);
12514 if (new_cpu == nr_cpumask_bits)
12515 new_cpu = first_cpu;
12516 }
12517
12518 continue;
12519 found_same:
12520
12521 cpup->eq = new_cpup->eq;
12522
12523
12524
12525
12526
12527 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12528 if (start_cpu == nr_cpumask_bits)
12529 start_cpu = first_cpu;
12530
12531 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12532 "3337 Set Affinity: CPU %d "
12533 "eq %d from peer cpu %d same "
12534 "phys_id (%d)\n",
12535 cpu, cpup->eq, new_cpu,
12536 cpup->phys_id);
12537 }
12538 }
12539
12540
12541 start_cpu = first_cpu;
12542
12543 for_each_present_cpu(cpu) {
12544 cpup = &phba->sli4_hba.cpu_map[cpu];
12545
12546
12547 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12548
12549 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12550
12551
12552
12553
12554
12555
12556 new_cpu = start_cpu;
12557 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12558 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12559 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12560 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12561 goto found_any;
12562 new_cpu = cpumask_next(
12563 new_cpu, cpu_present_mask);
12564 if (new_cpu == nr_cpumask_bits)
12565 new_cpu = first_cpu;
12566 }
12567
12568 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12569 "3339 Set Affinity: CPU %d "
12570 "eq %d UNASSIGNED\n",
12571 cpup->hdwq, cpup->eq);
12572 continue;
12573 found_any:
12574
12575 cpup->eq = new_cpup->eq;
12576
12577
12578
12579
12580
12581 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12582 if (start_cpu == nr_cpumask_bits)
12583 start_cpu = first_cpu;
12584
12585 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12586 "3338 Set Affinity: CPU %d "
12587 "eq %d from peer cpu %d (%d/%d)\n",
12588 cpu, cpup->eq, new_cpu,
12589 new_cpup->phys_id, new_cpup->core_id);
12590 }
12591 }
12592
12593
12594
12595
12596 idx = 0;
12597 for_each_present_cpu(cpu) {
12598 cpup = &phba->sli4_hba.cpu_map[cpu];
12599
12600
12601 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12602 continue;
12603
12604
12605 cpup->hdwq = idx;
12606 idx++;
12607 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12608 "3333 Set Affinity: CPU %d (phys %d core %d): "
12609 "hdwq %d eq %d flg x%x\n",
12610 cpu, cpup->phys_id, cpup->core_id,
12611 cpup->hdwq, cpup->eq, cpup->flag);
12612 }
12613
12614
12615
12616
12617
12618
12619
12620
12621 next_idx = idx;
12622 start_cpu = 0;
12623 idx = 0;
12624 for_each_present_cpu(cpu) {
12625 cpup = &phba->sli4_hba.cpu_map[cpu];
12626
12627
12628 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12629 continue;
12630
12631
12632
12633
12634
12635 if (next_idx < phba->cfg_hdw_queue) {
12636 cpup->hdwq = next_idx;
12637 next_idx++;
12638 continue;
12639 }
12640
12641
12642
12643
12644
12645
12646 new_cpu = start_cpu;
12647 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12648 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12649 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12650 new_cpup->phys_id == cpup->phys_id &&
12651 new_cpup->core_id == cpup->core_id) {
12652 goto found_hdwq;
12653 }
12654 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12655 if (new_cpu == nr_cpumask_bits)
12656 new_cpu = first_cpu;
12657 }
12658
12659
12660
12661
12662 new_cpu = start_cpu;
12663 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12664 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12665 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12666 new_cpup->phys_id == cpup->phys_id)
12667 goto found_hdwq;
12668
12669 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12670 if (new_cpu == nr_cpumask_bits)
12671 new_cpu = first_cpu;
12672 }
12673
12674
12675 cpup->hdwq = idx % phba->cfg_hdw_queue;
12676 idx++;
12677 goto logit;
12678 found_hdwq:
12679
12680 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12681 if (start_cpu == nr_cpumask_bits)
12682 start_cpu = first_cpu;
12683 cpup->hdwq = new_cpup->hdwq;
12684 logit:
12685 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12686 "3335 Set Affinity: CPU %d (phys %d core %d): "
12687 "hdwq %d eq %d flg x%x\n",
12688 cpu, cpup->phys_id, cpup->core_id,
12689 cpup->hdwq, cpup->eq, cpup->flag);
12690 }
12691
12692
12693
12694
12695
12696 idx = 0;
12697 for_each_possible_cpu(cpu) {
12698 cpup = &phba->sli4_hba.cpu_map[cpu];
12699 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12700 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12701 c_stat->hdwq_no = cpup->hdwq;
12702 #endif
12703 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12704 continue;
12705
12706 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12707 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12708 c_stat->hdwq_no = cpup->hdwq;
12709 #endif
12710 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12711 "3340 Set Affinity: not present "
12712 "CPU %d hdwq %d\n",
12713 cpu, cpup->hdwq);
12714 }
12715
12716
12717
12718
12719 return;
12720 }
12721
12722
12723
12724
12725
12726
12727
12728
12729 static int
12730 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12731 struct list_head *eqlist)
12732 {
12733 const struct cpumask *maskp;
12734 struct lpfc_queue *eq;
12735 struct cpumask *tmp;
12736 u16 idx;
12737
12738 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12739 if (!tmp)
12740 return -ENOMEM;
12741
12742 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12743 maskp = pci_irq_get_affinity(phba->pcidev, idx);
12744 if (!maskp)
12745 continue;
12746
12747
12748
12749
12750
12751 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12752 continue;
12753
12754
12755
12756
12757
12758
12759 cpumask_and(tmp, maskp, cpu_online_mask);
12760 if (cpumask_weight(tmp) > 1)
12761 continue;
12762
12763
12764
12765
12766
12767
12768 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12769 list_add(&eq->_poll_list, eqlist);
12770 }
12771 kfree(tmp);
12772 return 0;
12773 }
12774
12775 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12776 {
12777 if (phba->sli_rev != LPFC_SLI_REV4)
12778 return;
12779
12780 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12781 &phba->cpuhp);
12782
12783
12784
12785
12786 synchronize_rcu();
12787 del_timer_sync(&phba->cpuhp_poll_timer);
12788 }
12789
12790 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12791 {
12792 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
12793 return;
12794
12795 __lpfc_cpuhp_remove(phba);
12796 }
12797
12798 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12799 {
12800 if (phba->sli_rev != LPFC_SLI_REV4)
12801 return;
12802
12803 rcu_read_lock();
12804
12805 if (!list_empty(&phba->poll_list))
12806 mod_timer(&phba->cpuhp_poll_timer,
12807 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12808
12809 rcu_read_unlock();
12810
12811 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12812 &phba->cpuhp);
12813 }
12814
12815 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12816 {
12817 if (phba->pport->load_flag & FC_UNLOADING) {
12818 *retval = -EAGAIN;
12819 return true;
12820 }
12821
12822 if (phba->sli_rev != LPFC_SLI_REV4) {
12823 *retval = 0;
12824 return true;
12825 }
12826
12827
12828 return false;
12829 }
12830
12831
12832
12833
12834
12835
12836
12837 static inline void
12838 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12839 {
12840 cpumask_clear(&eqhdl->aff_mask);
12841 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12842 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12843 irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
12844 }
12845
12846
12847
12848
12849
12850
12851 static inline void
12852 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12853 {
12854 cpumask_clear(&eqhdl->aff_mask);
12855 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12856 }
12857
12858
12859
12860
12861
12862
12863
12864
12865
12866
12867
12868
12869
12870
12871
12872
12873
12874 static void
12875 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12876 {
12877 struct lpfc_vector_map_info *cpup;
12878 struct cpumask *aff_mask;
12879 unsigned int cpu_select, cpu_next, idx;
12880 const struct cpumask *orig_mask;
12881
12882 if (phba->irq_chann_mode == NORMAL_MODE)
12883 return;
12884
12885 orig_mask = &phba->sli4_hba.irq_aff_mask;
12886
12887 if (!cpumask_test_cpu(cpu, orig_mask))
12888 return;
12889
12890 cpup = &phba->sli4_hba.cpu_map[cpu];
12891
12892 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12893 return;
12894
12895 if (offline) {
12896
12897 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12898 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12899
12900
12901 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12902
12903
12904
12905 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12906 aff_mask = lpfc_get_aff_mask(idx);
12907
12908
12909 if (cpumask_test_cpu(cpu, aff_mask))
12910 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12911 cpu_select);
12912 }
12913 } else {
12914
12915 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12916 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12917 }
12918 } else {
12919
12920 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12921 }
12922 }
12923
12924 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12925 {
12926 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12927 struct lpfc_queue *eq, *next;
12928 LIST_HEAD(eqlist);
12929 int retval;
12930
12931 if (!phba) {
12932 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12933 return 0;
12934 }
12935
12936 if (__lpfc_cpuhp_checks(phba, &retval))
12937 return retval;
12938
12939 lpfc_irq_rebalance(phba, cpu, true);
12940
12941 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12942 if (retval)
12943 return retval;
12944
12945
12946 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12947 list_del_init(&eq->_poll_list);
12948 lpfc_sli4_start_polling(eq);
12949 }
12950
12951 return 0;
12952 }
12953
12954 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
12955 {
12956 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12957 struct lpfc_queue *eq, *next;
12958 unsigned int n;
12959 int retval;
12960
12961 if (!phba) {
12962 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12963 return 0;
12964 }
12965
12966 if (__lpfc_cpuhp_checks(phba, &retval))
12967 return retval;
12968
12969 lpfc_irq_rebalance(phba, cpu, false);
12970
12971 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12972 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12973 if (n == cpu)
12974 lpfc_sli4_stop_polling(eq);
12975 }
12976
12977 return 0;
12978 }
12979
12980
12981
12982
12983
12984
12985
12986
12987
12988
12989
12990
12991
12992
12993
12994
12995
12996
12997
12998
12999
13000
13001
13002
13003
13004
13005
13006
13007
13008 static int
13009 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
13010 {
13011 int vectors, rc, index;
13012 char *name;
13013 const struct cpumask *aff_mask = NULL;
13014 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
13015 struct lpfc_vector_map_info *cpup;
13016 struct lpfc_hba_eq_hdl *eqhdl;
13017 const struct cpumask *maskp;
13018 unsigned int flags = PCI_IRQ_MSIX;
13019
13020
13021 vectors = phba->cfg_irq_chann;
13022
13023 if (phba->irq_chann_mode != NORMAL_MODE)
13024 aff_mask = &phba->sli4_hba.irq_aff_mask;
13025
13026 if (aff_mask) {
13027 cpu_cnt = cpumask_weight(aff_mask);
13028 vectors = min(phba->cfg_irq_chann, cpu_cnt);
13029
13030
13031
13032
13033 cpu = cpumask_first(aff_mask);
13034 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13035 } else {
13036 flags |= PCI_IRQ_AFFINITY;
13037 }
13038
13039 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
13040 if (rc < 0) {
13041 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13042 "0484 PCI enable MSI-X failed (%d)\n", rc);
13043 goto vec_fail_out;
13044 }
13045 vectors = rc;
13046
13047
13048 for (index = 0; index < vectors; index++) {
13049 eqhdl = lpfc_get_eq_hdl(index);
13050 name = eqhdl->handler_name;
13051 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
13052 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
13053 LPFC_DRIVER_HANDLER_NAME"%d", index);
13054
13055 eqhdl->idx = index;
13056 rc = request_irq(pci_irq_vector(phba->pcidev, index),
13057 &lpfc_sli4_hba_intr_handler, 0,
13058 name, eqhdl);
13059 if (rc) {
13060 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13061 "0486 MSI-X fast-path (%d) "
13062 "request_irq failed (%d)\n", index, rc);
13063 goto cfg_fail_out;
13064 }
13065
13066 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
13067
13068 if (aff_mask) {
13069
13070 if (cpu_select < nr_cpu_ids)
13071 lpfc_irq_set_aff(eqhdl, cpu_select);
13072
13073
13074 lpfc_assign_eq_map_info(phba, index,
13075 LPFC_CPU_FIRST_IRQ,
13076 cpu);
13077
13078
13079 cpu = cpumask_next(cpu, aff_mask);
13080
13081
13082 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13083 } else if (vectors == 1) {
13084 cpu = cpumask_first(cpu_present_mask);
13085 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
13086 cpu);
13087 } else {
13088 maskp = pci_irq_get_affinity(phba->pcidev, index);
13089
13090
13091 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
13092 cpup = &phba->sli4_hba.cpu_map[cpu];
13093
13094
13095
13096
13097
13098
13099
13100
13101
13102
13103
13104
13105
13106 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
13107 continue;
13108 lpfc_assign_eq_map_info(phba, index,
13109 LPFC_CPU_FIRST_IRQ,
13110 cpu);
13111 break;
13112 }
13113 }
13114 }
13115
13116 if (vectors != phba->cfg_irq_chann) {
13117 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13118 "3238 Reducing IO channels to match number of "
13119 "MSI-X vectors, requested %d got %d\n",
13120 phba->cfg_irq_chann, vectors);
13121 if (phba->cfg_irq_chann > vectors)
13122 phba->cfg_irq_chann = vectors;
13123 }
13124
13125 return rc;
13126
13127 cfg_fail_out:
13128
13129 for (--index; index >= 0; index--) {
13130 eqhdl = lpfc_get_eq_hdl(index);
13131 lpfc_irq_clear_aff(eqhdl);
13132 free_irq(eqhdl->irq, eqhdl);
13133 }
13134
13135
13136 pci_free_irq_vectors(phba->pcidev);
13137
13138 vec_fail_out:
13139 return rc;
13140 }
13141
13142
13143
13144
13145
13146
13147
13148
13149
13150
13151
13152
13153
13154
13155
13156 static int
13157 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
13158 {
13159 int rc, index;
13160 unsigned int cpu;
13161 struct lpfc_hba_eq_hdl *eqhdl;
13162
13163 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
13164 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
13165 if (rc > 0)
13166 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13167 "0487 PCI enable MSI mode success.\n");
13168 else {
13169 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13170 "0488 PCI enable MSI mode failed (%d)\n", rc);
13171 return rc ? rc : -1;
13172 }
13173
13174 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13175 0, LPFC_DRIVER_NAME, phba);
13176 if (rc) {
13177 pci_free_irq_vectors(phba->pcidev);
13178 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13179 "0490 MSI request_irq failed (%d)\n", rc);
13180 return rc;
13181 }
13182
13183 eqhdl = lpfc_get_eq_hdl(0);
13184 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13185
13186 cpu = cpumask_first(cpu_present_mask);
13187 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
13188
13189 for (index = 0; index < phba->cfg_irq_chann; index++) {
13190 eqhdl = lpfc_get_eq_hdl(index);
13191 eqhdl->idx = index;
13192 }
13193
13194 return 0;
13195 }
13196
13197
13198
13199
13200
13201
13202
13203
13204
13205
13206
13207
13208
13209
13210
13211
13212
13213
13214 static uint32_t
13215 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
13216 {
13217 uint32_t intr_mode = LPFC_INTR_ERROR;
13218 int retval, idx;
13219
13220 if (cfg_mode == 2) {
13221
13222 retval = 0;
13223 if (!retval) {
13224
13225 retval = lpfc_sli4_enable_msix(phba);
13226 if (!retval) {
13227
13228 phba->intr_type = MSIX;
13229 intr_mode = 2;
13230 }
13231 }
13232 }
13233
13234
13235 if (cfg_mode >= 1 && phba->intr_type == NONE) {
13236 retval = lpfc_sli4_enable_msi(phba);
13237 if (!retval) {
13238
13239 phba->intr_type = MSI;
13240 intr_mode = 1;
13241 }
13242 }
13243
13244
13245 if (phba->intr_type == NONE) {
13246 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13247 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13248 if (!retval) {
13249 struct lpfc_hba_eq_hdl *eqhdl;
13250 unsigned int cpu;
13251
13252
13253 phba->intr_type = INTx;
13254 intr_mode = 0;
13255
13256 eqhdl = lpfc_get_eq_hdl(0);
13257 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13258
13259 cpu = cpumask_first(cpu_present_mask);
13260 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13261 cpu);
13262 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13263 eqhdl = lpfc_get_eq_hdl(idx);
13264 eqhdl->idx = idx;
13265 }
13266 }
13267 }
13268 return intr_mode;
13269 }
13270
13271
13272
13273
13274
13275
13276
13277
13278
13279
13280 static void
13281 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13282 {
13283
13284 if (phba->intr_type == MSIX) {
13285 int index;
13286 struct lpfc_hba_eq_hdl *eqhdl;
13287
13288
13289 for (index = 0; index < phba->cfg_irq_chann; index++) {
13290 eqhdl = lpfc_get_eq_hdl(index);
13291 lpfc_irq_clear_aff(eqhdl);
13292 free_irq(eqhdl->irq, eqhdl);
13293 }
13294 } else {
13295 free_irq(phba->pcidev->irq, phba);
13296 }
13297
13298 pci_free_irq_vectors(phba->pcidev);
13299
13300
13301 phba->intr_type = NONE;
13302 phba->sli.slistat.sli_intr = 0;
13303 }
13304
13305
13306
13307
13308
13309
13310
13311
13312 static void
13313 lpfc_unset_hba(struct lpfc_hba *phba)
13314 {
13315 struct lpfc_vport *vport = phba->pport;
13316 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
13317
13318 spin_lock_irq(shost->host_lock);
13319 vport->load_flag |= FC_UNLOADING;
13320 spin_unlock_irq(shost->host_lock);
13321
13322 kfree(phba->vpi_bmask);
13323 kfree(phba->vpi_ids);
13324
13325 lpfc_stop_hba_timers(phba);
13326
13327 phba->pport->work_port_events = 0;
13328
13329 lpfc_sli_hba_down(phba);
13330
13331 lpfc_sli_brdrestart(phba);
13332
13333 lpfc_sli_disable_intr(phba);
13334
13335 return;
13336 }
13337
13338
13339
13340
13341
13342
13343
13344
13345
13346
13347
13348
13349
13350
13351 static void
13352 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13353 {
13354 struct lpfc_sli4_hdw_queue *qp;
13355 int idx, ccnt;
13356 int wait_time = 0;
13357 int io_xri_cmpl = 1;
13358 int nvmet_xri_cmpl = 1;
13359 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13360
13361
13362
13363
13364
13365 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13366
13367
13368 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13369 lpfc_nvme_wait_for_io_drain(phba);
13370
13371 ccnt = 0;
13372 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13373 qp = &phba->sli4_hba.hdwq[idx];
13374 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13375 if (!io_xri_cmpl)
13376 ccnt++;
13377 }
13378 if (ccnt)
13379 io_xri_cmpl = 0;
13380
13381 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13382 nvmet_xri_cmpl =
13383 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13384 }
13385
13386 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13387 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13388 if (!nvmet_xri_cmpl)
13389 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13390 "6424 NVMET XRI exchange busy "
13391 "wait time: %d seconds.\n",
13392 wait_time/1000);
13393 if (!io_xri_cmpl)
13394 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13395 "6100 IO XRI exchange busy "
13396 "wait time: %d seconds.\n",
13397 wait_time/1000);
13398 if (!els_xri_cmpl)
13399 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13400 "2878 ELS XRI exchange busy "
13401 "wait time: %d seconds.\n",
13402 wait_time/1000);
13403 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13404 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13405 } else {
13406 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13407 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13408 }
13409
13410 ccnt = 0;
13411 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13412 qp = &phba->sli4_hba.hdwq[idx];
13413 io_xri_cmpl = list_empty(
13414 &qp->lpfc_abts_io_buf_list);
13415 if (!io_xri_cmpl)
13416 ccnt++;
13417 }
13418 if (ccnt)
13419 io_xri_cmpl = 0;
13420
13421 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13422 nvmet_xri_cmpl = list_empty(
13423 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13424 }
13425 els_xri_cmpl =
13426 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13427
13428 }
13429 }
13430
13431
13432
13433
13434
13435
13436
13437
13438
13439
13440
13441 static void
13442 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13443 {
13444 int wait_cnt = 0;
13445 LPFC_MBOXQ_t *mboxq;
13446 struct pci_dev *pdev = phba->pcidev;
13447
13448 lpfc_stop_hba_timers(phba);
13449 hrtimer_cancel(&phba->cmf_timer);
13450
13451 if (phba->pport)
13452 phba->sli4_hba.intr_enable = 0;
13453
13454
13455
13456
13457
13458
13459
13460 spin_lock_irq(&phba->hbalock);
13461 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13462 spin_unlock_irq(&phba->hbalock);
13463
13464 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13465 msleep(10);
13466 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13467 break;
13468 }
13469
13470 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13471 spin_lock_irq(&phba->hbalock);
13472 mboxq = phba->sli.mbox_active;
13473 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13474 __lpfc_mbox_cmpl_put(phba, mboxq);
13475 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13476 phba->sli.mbox_active = NULL;
13477 spin_unlock_irq(&phba->hbalock);
13478 }
13479
13480
13481 lpfc_sli_hba_iocb_abort(phba);
13482
13483 if (!pci_channel_offline(phba->pcidev))
13484
13485 lpfc_sli4_xri_exchange_busy_wait(phba);
13486
13487
13488 if (phba->pport)
13489 lpfc_cpuhp_remove(phba);
13490
13491
13492 lpfc_sli4_disable_intr(phba);
13493
13494
13495 if (phba->cfg_sriov_nr_virtfn)
13496 pci_disable_sriov(pdev);
13497
13498
13499 kthread_stop(phba->worker_thread);
13500
13501
13502 lpfc_ras_stop_fwlog(phba);
13503
13504
13505 lpfc_pci_function_reset(phba);
13506
13507
13508 lpfc_sli4_queue_destroy(phba);
13509
13510
13511 if (phba->ras_fwlog.ras_enabled)
13512 lpfc_sli4_ras_dma_free(phba);
13513
13514
13515 if (phba->pport)
13516 phba->pport->work_port_events = 0;
13517 }
13518
13519 static uint32_t
13520 lpfc_cgn_crc32(uint32_t crc, u8 byte)
13521 {
13522 uint32_t msb = 0;
13523 uint32_t bit;
13524
13525 for (bit = 0; bit < 8; bit++) {
13526 msb = (crc >> 31) & 1;
13527 crc <<= 1;
13528
13529 if (msb ^ (byte & 1)) {
13530 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
13531 crc |= 1;
13532 }
13533 byte >>= 1;
13534 }
13535 return crc;
13536 }
13537
13538 static uint32_t
13539 lpfc_cgn_reverse_bits(uint32_t wd)
13540 {
13541 uint32_t result = 0;
13542 uint32_t i;
13543
13544 for (i = 0; i < 32; i++) {
13545 result <<= 1;
13546 result |= (1 & (wd >> i));
13547 }
13548 return result;
13549 }
13550
13551
13552
13553
13554
13555 uint32_t
13556 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
13557 {
13558 uint32_t i;
13559 uint32_t result;
13560 uint8_t *data = (uint8_t *)ptr;
13561
13562 for (i = 0; i < byteLen; ++i)
13563 crc = lpfc_cgn_crc32(crc, data[i]);
13564
13565 result = ~lpfc_cgn_reverse_bits(crc);
13566 return result;
13567 }
13568
13569 void
13570 lpfc_init_congestion_buf(struct lpfc_hba *phba)
13571 {
13572 struct lpfc_cgn_info *cp;
13573 struct timespec64 cmpl_time;
13574 struct tm broken;
13575 uint16_t size;
13576 uint32_t crc;
13577
13578 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13579 "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13580
13581 if (!phba->cgn_i)
13582 return;
13583 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13584
13585 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13586 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13587 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13588 atomic_set(&phba->cgn_sync_warn_cnt, 0);
13589
13590 atomic_set(&phba->cgn_driver_evt_cnt, 0);
13591 atomic_set(&phba->cgn_latency_evt_cnt, 0);
13592 atomic64_set(&phba->cgn_latency_evt, 0);
13593 phba->cgn_evt_minute = 0;
13594 phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
13595
13596 memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
13597 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13598 cp->cgn_info_version = LPFC_CGN_INFO_V3;
13599
13600
13601 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13602 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13603 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13604 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13605
13606 ktime_get_real_ts64(&cmpl_time);
13607 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13608
13609 cp->cgn_info_month = broken.tm_mon + 1;
13610 cp->cgn_info_day = broken.tm_mday;
13611 cp->cgn_info_year = broken.tm_year - 100;
13612 cp->cgn_info_hour = broken.tm_hour;
13613 cp->cgn_info_minute = broken.tm_min;
13614 cp->cgn_info_second = broken.tm_sec;
13615
13616 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13617 "2643 CGNInfo Init: Start Time "
13618 "%d/%d/%d %d:%d:%d\n",
13619 cp->cgn_info_day, cp->cgn_info_month,
13620 cp->cgn_info_year, cp->cgn_info_hour,
13621 cp->cgn_info_minute, cp->cgn_info_second);
13622
13623
13624 if (phba->pport) {
13625 size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13626 cp->cgn_lunq = cpu_to_le16(size);
13627 }
13628
13629
13630
13631 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13632 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13633 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13634 cp->cgn_info_crc = cpu_to_le32(crc);
13635
13636 phba->cgn_evt_timestamp = jiffies +
13637 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13638 }
13639
13640 void
13641 lpfc_init_congestion_stat(struct lpfc_hba *phba)
13642 {
13643 struct lpfc_cgn_info *cp;
13644 struct timespec64 cmpl_time;
13645 struct tm broken;
13646 uint32_t crc;
13647
13648 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13649 "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13650
13651 if (!phba->cgn_i)
13652 return;
13653
13654 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13655 memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
13656
13657 ktime_get_real_ts64(&cmpl_time);
13658 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13659
13660 cp->cgn_stat_month = broken.tm_mon + 1;
13661 cp->cgn_stat_day = broken.tm_mday;
13662 cp->cgn_stat_year = broken.tm_year - 100;
13663 cp->cgn_stat_hour = broken.tm_hour;
13664 cp->cgn_stat_minute = broken.tm_min;
13665
13666 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13667 "2647 CGNstat Init: Start Time "
13668 "%d/%d/%d %d:%d\n",
13669 cp->cgn_stat_day, cp->cgn_stat_month,
13670 cp->cgn_stat_year, cp->cgn_stat_hour,
13671 cp->cgn_stat_minute);
13672
13673 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13674 cp->cgn_info_crc = cpu_to_le32(crc);
13675 }
13676
13677
13678
13679
13680
13681
13682 static int
13683 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13684 {
13685 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13686 union lpfc_sli4_cfg_shdr *shdr;
13687 uint32_t shdr_status, shdr_add_status;
13688 LPFC_MBOXQ_t *mboxq;
13689 int length, rc;
13690
13691 if (!phba->cgn_i)
13692 return -ENXIO;
13693
13694 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13695 if (!mboxq) {
13696 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13697 "2641 REG_CONGESTION_BUF mbox allocation fail: "
13698 "HBA state x%x reg %d\n",
13699 phba->pport->port_state, reg);
13700 return -ENOMEM;
13701 }
13702
13703 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13704 sizeof(struct lpfc_sli4_cfg_mhdr));
13705 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13706 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13707 LPFC_SLI4_MBX_EMBED);
13708 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13709 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13710 if (reg > 0)
13711 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13712 else
13713 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13714 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13715 reg_congestion_buf->addr_lo =
13716 putPaddrLow(phba->cgn_i->phys);
13717 reg_congestion_buf->addr_hi =
13718 putPaddrHigh(phba->cgn_i->phys);
13719
13720 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13721 shdr = (union lpfc_sli4_cfg_shdr *)
13722 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13723 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13724 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13725 &shdr->response);
13726 mempool_free(mboxq, phba->mbox_mem_pool);
13727 if (shdr_status || shdr_add_status || rc) {
13728 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13729 "2642 REG_CONGESTION_BUF mailbox "
13730 "failed with status x%x add_status x%x,"
13731 " mbx status x%x reg %d\n",
13732 shdr_status, shdr_add_status, rc, reg);
13733 return -ENXIO;
13734 }
13735 return 0;
13736 }
13737
13738 int
13739 lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13740 {
13741 lpfc_cmf_stop(phba);
13742 return __lpfc_reg_congestion_buf(phba, 0);
13743 }
13744
13745 int
13746 lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13747 {
13748 return __lpfc_reg_congestion_buf(phba, 1);
13749 }
13750
13751
13752
13753
13754
13755
13756
13757
13758
13759
13760
13761
13762
13763 int
13764 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13765 {
13766 int rc;
13767 struct lpfc_mqe *mqe = &mboxq->u.mqe;
13768 struct lpfc_pc_sli4_params *sli4_params;
13769 uint32_t mbox_tmo;
13770 int length;
13771 bool exp_wqcq_pages = true;
13772 struct lpfc_sli4_parameters *mbx_sli4_parameters;
13773
13774
13775
13776
13777
13778
13779 phba->sli4_hba.rpi_hdrs_in_use = 1;
13780
13781
13782 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13783 sizeof(struct lpfc_sli4_cfg_mhdr));
13784 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13785 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13786 length, LPFC_SLI4_MBX_EMBED);
13787 if (!phba->sli4_hba.intr_enable)
13788 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13789 else {
13790 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13791 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13792 }
13793 if (unlikely(rc))
13794 return rc;
13795 sli4_params = &phba->sli4_hba.pc_sli4_params;
13796 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13797 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13798 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13799 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13800 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13801 mbx_sli4_parameters);
13802 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13803 mbx_sli4_parameters);
13804 if (bf_get(cfg_phwq, mbx_sli4_parameters))
13805 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13806 else
13807 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13808 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13809 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13810 mbx_sli4_parameters);
13811 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13812 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13813 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13814 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13815 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13816 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13817 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13818 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13819 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13820 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13821 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13822 mbx_sli4_parameters);
13823 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13824 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13825 mbx_sli4_parameters);
13826 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13827 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13828
13829
13830 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13831
13832
13833 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13834 bf_get(cfg_xib, mbx_sli4_parameters));
13835
13836 if (rc) {
13837
13838 sli4_params->nvme = 1;
13839
13840
13841 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13842 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13843 "6133 Disabling NVME support: "
13844 "FC4 type not supported: x%x\n",
13845 phba->cfg_enable_fc4_type);
13846 goto fcponly;
13847 }
13848 } else {
13849
13850 sli4_params->nvme = 0;
13851 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13852 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13853 "6101 Disabling NVME support: Not "
13854 "supported by firmware (%d %d) x%x\n",
13855 bf_get(cfg_nvme, mbx_sli4_parameters),
13856 bf_get(cfg_xib, mbx_sli4_parameters),
13857 phba->cfg_enable_fc4_type);
13858 fcponly:
13859 phba->nvmet_support = 0;
13860 phba->cfg_nvmet_mrq = 0;
13861 phba->cfg_nvme_seg_cnt = 0;
13862
13863
13864 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13865 return -ENODEV;
13866 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13867 }
13868 }
13869
13870
13871
13872
13873 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13874 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13875
13876
13877 if (bf_get(cfg_pbde, mbx_sli4_parameters))
13878 phba->cfg_enable_pbde = 1;
13879 else
13880 phba->cfg_enable_pbde = 0;
13881
13882
13883
13884
13885
13886
13887
13888
13889
13890 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13891 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13892 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13893 else
13894 phba->cfg_suppress_rsp = 0;
13895
13896 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13897 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13898
13899
13900 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13901 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13902
13903
13904
13905
13906
13907
13908 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13909 phba->fcp_embed_io = 1;
13910 else
13911 phba->fcp_embed_io = 0;
13912
13913 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13914 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13915 bf_get(cfg_xib, mbx_sli4_parameters),
13916 phba->cfg_enable_pbde,
13917 phba->fcp_embed_io, sli4_params->nvme,
13918 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13919
13920 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13921 LPFC_SLI_INTF_IF_TYPE_2) &&
13922 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13923 LPFC_SLI_INTF_FAMILY_LNCR_A0))
13924 exp_wqcq_pages = false;
13925
13926 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
13927 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
13928 exp_wqcq_pages &&
13929 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13930 phba->enab_exp_wqcq_pages = 1;
13931 else
13932 phba->enab_exp_wqcq_pages = 0;
13933
13934
13935
13936 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
13937 phba->mds_diags_support = 1;
13938 else
13939 phba->mds_diags_support = 0;
13940
13941
13942
13943
13944 if (bf_get(cfg_nsler, mbx_sli4_parameters))
13945 phba->nsler = 1;
13946 else
13947 phba->nsler = 0;
13948
13949 return 0;
13950 }
13951
13952
13953
13954
13955
13956
13957
13958
13959
13960
13961
13962
13963
13964
13965
13966
13967
13968
13969 static int
13970 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
13971 {
13972 struct lpfc_hba *phba;
13973 struct lpfc_vport *vport = NULL;
13974 struct Scsi_Host *shost = NULL;
13975 int error;
13976 uint32_t cfg_mode, intr_mode;
13977
13978
13979 phba = lpfc_hba_alloc(pdev);
13980 if (!phba)
13981 return -ENOMEM;
13982
13983
13984 error = lpfc_enable_pci_dev(phba);
13985 if (error)
13986 goto out_free_phba;
13987
13988
13989 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13990 if (error)
13991 goto out_disable_pci_dev;
13992
13993
13994 error = lpfc_sli_pci_mem_setup(phba);
13995 if (error) {
13996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13997 "1402 Failed to set up pci memory space.\n");
13998 goto out_disable_pci_dev;
13999 }
14000
14001
14002 error = lpfc_sli_driver_resource_setup(phba);
14003 if (error) {
14004 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14005 "1404 Failed to set up driver resource.\n");
14006 goto out_unset_pci_mem_s3;
14007 }
14008
14009
14010
14011 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
14012 if (error) {
14013 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14014 "1405 Failed to initialize iocb list.\n");
14015 goto out_unset_driver_resource_s3;
14016 }
14017
14018
14019 error = lpfc_setup_driver_resource_phase2(phba);
14020 if (error) {
14021 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14022 "1406 Failed to set up driver resource.\n");
14023 goto out_free_iocb_list;
14024 }
14025
14026
14027 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14028
14029
14030 error = lpfc_create_shost(phba);
14031 if (error) {
14032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14033 "1407 Failed to create scsi host.\n");
14034 goto out_unset_driver_resource;
14035 }
14036
14037
14038 vport = phba->pport;
14039 error = lpfc_alloc_sysfs_attr(vport);
14040 if (error) {
14041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14042 "1476 Failed to allocate sysfs attr\n");
14043 goto out_destroy_shost;
14044 }
14045
14046 shost = lpfc_shost_from_vport(vport);
14047
14048 cfg_mode = phba->cfg_use_msi;
14049 while (true) {
14050
14051 lpfc_stop_port(phba);
14052
14053 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
14054 if (intr_mode == LPFC_INTR_ERROR) {
14055 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14056 "0431 Failed to enable interrupt.\n");
14057 error = -ENODEV;
14058 goto out_free_sysfs_attr;
14059 }
14060
14061 if (lpfc_sli_hba_setup(phba)) {
14062 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14063 "1477 Failed to set up hba\n");
14064 error = -ENODEV;
14065 goto out_remove_device;
14066 }
14067
14068
14069 msleep(50);
14070
14071 if (intr_mode == 0 ||
14072 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
14073
14074 phba->intr_mode = intr_mode;
14075 lpfc_log_intr_mode(phba, intr_mode);
14076 break;
14077 } else {
14078 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14079 "0447 Configure interrupt mode (%d) "
14080 "failed active interrupt test.\n",
14081 intr_mode);
14082
14083 lpfc_sli_disable_intr(phba);
14084
14085 cfg_mode = --intr_mode;
14086 }
14087 }
14088
14089
14090 lpfc_post_init_setup(phba);
14091
14092
14093 lpfc_create_static_vport(phba);
14094
14095 return 0;
14096
14097 out_remove_device:
14098 lpfc_unset_hba(phba);
14099 out_free_sysfs_attr:
14100 lpfc_free_sysfs_attr(vport);
14101 out_destroy_shost:
14102 lpfc_destroy_shost(phba);
14103 out_unset_driver_resource:
14104 lpfc_unset_driver_resource_phase2(phba);
14105 out_free_iocb_list:
14106 lpfc_free_iocb_list(phba);
14107 out_unset_driver_resource_s3:
14108 lpfc_sli_driver_resource_unset(phba);
14109 out_unset_pci_mem_s3:
14110 lpfc_sli_pci_mem_unset(phba);
14111 out_disable_pci_dev:
14112 lpfc_disable_pci_dev(phba);
14113 if (shost)
14114 scsi_host_put(shost);
14115 out_free_phba:
14116 lpfc_hba_free(phba);
14117 return error;
14118 }
14119
14120
14121
14122
14123
14124
14125
14126
14127
14128
14129 static void
14130 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
14131 {
14132 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14133 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14134 struct lpfc_vport **vports;
14135 struct lpfc_hba *phba = vport->phba;
14136 int i;
14137
14138 spin_lock_irq(&phba->hbalock);
14139 vport->load_flag |= FC_UNLOADING;
14140 spin_unlock_irq(&phba->hbalock);
14141
14142 lpfc_free_sysfs_attr(vport);
14143
14144
14145 vports = lpfc_create_vport_work_array(phba);
14146 if (vports != NULL)
14147 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14148 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14149 continue;
14150 fc_vport_terminate(vports[i]->fc_vport);
14151 }
14152 lpfc_destroy_vport_work_array(phba, vports);
14153
14154
14155 fc_remove_host(shost);
14156 scsi_remove_host(shost);
14157
14158
14159 lpfc_cleanup(vport);
14160
14161
14162
14163
14164
14165
14166
14167
14168 lpfc_sli_hba_down(phba);
14169
14170 kthread_stop(phba->worker_thread);
14171
14172 lpfc_sli_brdrestart(phba);
14173
14174 kfree(phba->vpi_bmask);
14175 kfree(phba->vpi_ids);
14176
14177 lpfc_stop_hba_timers(phba);
14178 spin_lock_irq(&phba->port_list_lock);
14179 list_del_init(&vport->listentry);
14180 spin_unlock_irq(&phba->port_list_lock);
14181
14182 lpfc_debugfs_terminate(vport);
14183
14184
14185 if (phba->cfg_sriov_nr_virtfn)
14186 pci_disable_sriov(pdev);
14187
14188
14189 lpfc_sli_disable_intr(phba);
14190
14191 scsi_host_put(shost);
14192
14193
14194
14195
14196
14197 lpfc_scsi_free(phba);
14198 lpfc_free_iocb_list(phba);
14199
14200 lpfc_mem_free_all(phba);
14201
14202 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
14203 phba->hbqslimp.virt, phba->hbqslimp.phys);
14204
14205
14206 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
14207 phba->slim2p.virt, phba->slim2p.phys);
14208
14209
14210 iounmap(phba->ctrl_regs_memmap_p);
14211 iounmap(phba->slim_memmap_p);
14212
14213 lpfc_hba_free(phba);
14214
14215 pci_release_mem_regions(pdev);
14216 pci_disable_device(pdev);
14217 }
14218
14219
14220
14221
14222
14223
14224
14225
14226
14227
14228
14229
14230
14231
14232
14233
14234
14235
14236
14237
14238
14239 static int __maybe_unused
14240 lpfc_pci_suspend_one_s3(struct device *dev_d)
14241 {
14242 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14243 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14244
14245 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14246 "0473 PCI device Power Management suspend.\n");
14247
14248
14249 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14250 lpfc_offline(phba);
14251 kthread_stop(phba->worker_thread);
14252
14253
14254 lpfc_sli_disable_intr(phba);
14255
14256 return 0;
14257 }
14258
14259
14260
14261
14262
14263
14264
14265
14266
14267
14268
14269
14270
14271
14272
14273
14274
14275
14276
14277
14278 static int __maybe_unused
14279 lpfc_pci_resume_one_s3(struct device *dev_d)
14280 {
14281 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14282 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14283 uint32_t intr_mode;
14284 int error;
14285
14286 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14287 "0452 PCI device Power Management resume.\n");
14288
14289
14290 phba->worker_thread = kthread_run(lpfc_do_work, phba,
14291 "lpfc_worker_%d", phba->brd_no);
14292 if (IS_ERR(phba->worker_thread)) {
14293 error = PTR_ERR(phba->worker_thread);
14294 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14295 "0434 PM resume failed to start worker "
14296 "thread: error=x%x.\n", error);
14297 return error;
14298 }
14299
14300
14301 lpfc_cpu_map_array_init(phba);
14302
14303 lpfc_hba_eq_hdl_array_init(phba);
14304
14305 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14306 if (intr_mode == LPFC_INTR_ERROR) {
14307 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14308 "0430 PM resume Failed to enable interrupt\n");
14309 return -EIO;
14310 } else
14311 phba->intr_mode = intr_mode;
14312
14313
14314 lpfc_sli_brdrestart(phba);
14315 lpfc_online(phba);
14316
14317
14318 lpfc_log_intr_mode(phba, phba->intr_mode);
14319
14320 return 0;
14321 }
14322
14323
14324
14325
14326
14327
14328
14329
14330 static void
14331 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14332 {
14333 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14334 "2723 PCI channel I/O abort preparing for recovery\n");
14335
14336
14337
14338
14339
14340 lpfc_sli_abort_fcp_rings(phba);
14341 }
14342
14343
14344
14345
14346
14347
14348
14349
14350
14351 static void
14352 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14353 {
14354 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14355 "2710 PCI channel disable preparing for reset\n");
14356
14357
14358 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14359
14360
14361 lpfc_scsi_dev_block(phba);
14362
14363
14364 lpfc_sli_flush_io_rings(phba);
14365
14366
14367 lpfc_stop_hba_timers(phba);
14368
14369
14370 lpfc_sli_disable_intr(phba);
14371 pci_disable_device(phba->pcidev);
14372 }
14373
14374
14375
14376
14377
14378
14379
14380
14381
14382 static void
14383 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14384 {
14385 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14386 "2711 PCI channel permanent disable for failure\n");
14387
14388 lpfc_scsi_dev_block(phba);
14389 lpfc_sli4_prep_dev_for_reset(phba);
14390
14391
14392 lpfc_stop_hba_timers(phba);
14393
14394
14395 lpfc_sli_flush_io_rings(phba);
14396 }
14397
14398
14399
14400
14401
14402
14403
14404
14405
14406
14407
14408
14409
14410
14411
14412
14413
14414
14415
14416 static pci_ers_result_t
14417 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14418 {
14419 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14420 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14421
14422 switch (state) {
14423 case pci_channel_io_normal:
14424
14425 lpfc_sli_prep_dev_for_recover(phba);
14426 return PCI_ERS_RESULT_CAN_RECOVER;
14427 case pci_channel_io_frozen:
14428
14429 lpfc_sli_prep_dev_for_reset(phba);
14430 return PCI_ERS_RESULT_NEED_RESET;
14431 case pci_channel_io_perm_failure:
14432
14433 lpfc_sli_prep_dev_for_perm_failure(phba);
14434 return PCI_ERS_RESULT_DISCONNECT;
14435 default:
14436
14437 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14438 "0472 Unknown PCI error state: x%x\n", state);
14439 lpfc_sli_prep_dev_for_reset(phba);
14440 return PCI_ERS_RESULT_NEED_RESET;
14441 }
14442 }
14443
14444
14445
14446
14447
14448
14449
14450
14451
14452
14453
14454
14455
14456
14457
14458
14459
14460
14461
14462 static pci_ers_result_t
14463 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14464 {
14465 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14466 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14467 struct lpfc_sli *psli = &phba->sli;
14468 uint32_t intr_mode;
14469
14470 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14471 if (pci_enable_device_mem(pdev)) {
14472 printk(KERN_ERR "lpfc: Cannot re-enable "
14473 "PCI device after reset.\n");
14474 return PCI_ERS_RESULT_DISCONNECT;
14475 }
14476
14477 pci_restore_state(pdev);
14478
14479
14480
14481
14482
14483 pci_save_state(pdev);
14484
14485 if (pdev->is_busmaster)
14486 pci_set_master(pdev);
14487
14488 spin_lock_irq(&phba->hbalock);
14489 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14490 spin_unlock_irq(&phba->hbalock);
14491
14492
14493 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14494 if (intr_mode == LPFC_INTR_ERROR) {
14495 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14496 "0427 Cannot re-enable interrupt after "
14497 "slot reset.\n");
14498 return PCI_ERS_RESULT_DISCONNECT;
14499 } else
14500 phba->intr_mode = intr_mode;
14501
14502
14503 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14504 lpfc_offline(phba);
14505 lpfc_sli_brdrestart(phba);
14506
14507
14508 lpfc_log_intr_mode(phba, phba->intr_mode);
14509
14510 return PCI_ERS_RESULT_RECOVERED;
14511 }
14512
14513
14514
14515
14516
14517
14518
14519
14520
14521
14522
14523 static void
14524 lpfc_io_resume_s3(struct pci_dev *pdev)
14525 {
14526 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14527 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14528
14529
14530 lpfc_online(phba);
14531 }
14532
14533
14534
14535
14536
14537
14538
14539 int
14540 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14541 {
14542 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14543
14544 if (phba->sli_rev == LPFC_SLI_REV4) {
14545 if (max_xri <= 100)
14546 return 10;
14547 else if (max_xri <= 256)
14548 return 25;
14549 else if (max_xri <= 512)
14550 return 50;
14551 else if (max_xri <= 1024)
14552 return 100;
14553 else if (max_xri <= 1536)
14554 return 150;
14555 else if (max_xri <= 2048)
14556 return 200;
14557 else
14558 return 250;
14559 } else
14560 return 0;
14561 }
14562
14563
14564
14565
14566
14567
14568
14569 int
14570 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14571 {
14572 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14573
14574 if (phba->nvmet_support)
14575 max_xri += LPFC_NVMET_BUF_POST;
14576 return max_xri;
14577 }
14578
14579
14580 static int
14581 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14582 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14583 const struct firmware *fw)
14584 {
14585 int rc;
14586 u8 sli_family;
14587
14588 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14589
14590
14591
14592
14593
14594
14595 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14596 (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14597 magic_number != MAGIC_NUMBER_G6) ||
14598 (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14599 magic_number != MAGIC_NUMBER_G7) ||
14600 (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14601 magic_number != MAGIC_NUMBER_G7P)) {
14602 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14603 "3030 This firmware version is not supported on"
14604 " this HBA model. Device:%x Magic:%x Type:%x "
14605 "ID:%x Size %d %zd\n",
14606 phba->pcidev->device, magic_number, ftype, fid,
14607 fsize, fw->size);
14608 rc = -EINVAL;
14609 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14610 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14611 "3021 Firmware downloads have been prohibited "
14612 "by a system configuration setting on "
14613 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14614 "%zd\n",
14615 phba->pcidev->device, magic_number, ftype, fid,
14616 fsize, fw->size);
14617 rc = -EACCES;
14618 } else {
14619 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14620 "3022 FW Download failed. Add Status x%x "
14621 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14622 "%zd\n",
14623 offset, phba->pcidev->device, magic_number,
14624 ftype, fid, fsize, fw->size);
14625 rc = -EIO;
14626 }
14627 return rc;
14628 }
14629
14630
14631
14632
14633
14634
14635
14636 static void
14637 lpfc_write_firmware(const struct firmware *fw, void *context)
14638 {
14639 struct lpfc_hba *phba = (struct lpfc_hba *)context;
14640 char fwrev[FW_REV_STR_SIZE];
14641 struct lpfc_grp_hdr *image;
14642 struct list_head dma_buffer_list;
14643 int i, rc = 0;
14644 struct lpfc_dmabuf *dmabuf, *next;
14645 uint32_t offset = 0, temp_offset = 0;
14646 uint32_t magic_number, ftype, fid, fsize;
14647
14648
14649 if (!fw) {
14650 rc = -ENXIO;
14651 goto out;
14652 }
14653 image = (struct lpfc_grp_hdr *)fw->data;
14654
14655 magic_number = be32_to_cpu(image->magic_number);
14656 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14657 fid = bf_get_be32(lpfc_grp_hdr_id, image);
14658 fsize = be32_to_cpu(image->size);
14659
14660 INIT_LIST_HEAD(&dma_buffer_list);
14661 lpfc_decode_firmware_rev(phba, fwrev, 1);
14662 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14663 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14664 "3023 Updating Firmware, Current Version:%s "
14665 "New Version:%s\n",
14666 fwrev, image->revision);
14667 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14668 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
14669 GFP_KERNEL);
14670 if (!dmabuf) {
14671 rc = -ENOMEM;
14672 goto release_out;
14673 }
14674 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14675 SLI4_PAGE_SIZE,
14676 &dmabuf->phys,
14677 GFP_KERNEL);
14678 if (!dmabuf->virt) {
14679 kfree(dmabuf);
14680 rc = -ENOMEM;
14681 goto release_out;
14682 }
14683 list_add_tail(&dmabuf->list, &dma_buffer_list);
14684 }
14685 while (offset < fw->size) {
14686 temp_offset = offset;
14687 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14688 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14689 memcpy(dmabuf->virt,
14690 fw->data + temp_offset,
14691 fw->size - temp_offset);
14692 temp_offset = fw->size;
14693 break;
14694 }
14695 memcpy(dmabuf->virt, fw->data + temp_offset,
14696 SLI4_PAGE_SIZE);
14697 temp_offset += SLI4_PAGE_SIZE;
14698 }
14699 rc = lpfc_wr_object(phba, &dma_buffer_list,
14700 (fw->size - offset), &offset);
14701 if (rc) {
14702 rc = lpfc_log_write_firmware_error(phba, offset,
14703 magic_number,
14704 ftype,
14705 fid,
14706 fsize,
14707 fw);
14708 goto release_out;
14709 }
14710 }
14711 rc = offset;
14712 } else
14713 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14714 "3029 Skipped Firmware update, Current "
14715 "Version:%s New Version:%s\n",
14716 fwrev, image->revision);
14717
14718 release_out:
14719 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14720 list_del(&dmabuf->list);
14721 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14722 dmabuf->virt, dmabuf->phys);
14723 kfree(dmabuf);
14724 }
14725 release_firmware(fw);
14726 out:
14727 if (rc < 0)
14728 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14729 "3062 Firmware update error, status %d.\n", rc);
14730 else
14731 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14732 "3024 Firmware update success: size %d.\n", rc);
14733 }
14734
14735
14736
14737
14738
14739
14740
14741
14742
14743 int
14744 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14745 {
14746 uint8_t file_name[ELX_MODEL_NAME_SIZE];
14747 int ret;
14748 const struct firmware *fw;
14749
14750
14751 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14752 LPFC_SLI_INTF_IF_TYPE_2)
14753 return -EPERM;
14754
14755 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
14756
14757 if (fw_upgrade == INT_FW_UPGRADE) {
14758 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14759 file_name, &phba->pcidev->dev,
14760 GFP_KERNEL, (void *)phba,
14761 lpfc_write_firmware);
14762 } else if (fw_upgrade == RUN_FW_UPGRADE) {
14763 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14764 if (!ret)
14765 lpfc_write_firmware(fw, (void *)phba);
14766 } else {
14767 ret = -EINVAL;
14768 }
14769
14770 return ret;
14771 }
14772
14773
14774
14775
14776
14777
14778
14779
14780
14781
14782
14783
14784
14785
14786
14787
14788
14789
14790
14791 static int
14792 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14793 {
14794 struct lpfc_hba *phba;
14795 struct lpfc_vport *vport = NULL;
14796 struct Scsi_Host *shost = NULL;
14797 int error;
14798 uint32_t cfg_mode, intr_mode;
14799
14800
14801 phba = lpfc_hba_alloc(pdev);
14802 if (!phba)
14803 return -ENOMEM;
14804
14805 INIT_LIST_HEAD(&phba->poll_list);
14806
14807
14808 error = lpfc_enable_pci_dev(phba);
14809 if (error)
14810 goto out_free_phba;
14811
14812
14813 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14814 if (error)
14815 goto out_disable_pci_dev;
14816
14817
14818 error = lpfc_sli4_pci_mem_setup(phba);
14819 if (error) {
14820 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14821 "1410 Failed to set up pci memory space.\n");
14822 goto out_disable_pci_dev;
14823 }
14824
14825
14826 error = lpfc_sli4_driver_resource_setup(phba);
14827 if (error) {
14828 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14829 "1412 Failed to set up driver resource.\n");
14830 goto out_unset_pci_mem_s4;
14831 }
14832
14833 INIT_LIST_HEAD(&phba->active_rrq_list);
14834 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14835
14836
14837 error = lpfc_setup_driver_resource_phase2(phba);
14838 if (error) {
14839 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14840 "1414 Failed to set up driver resource.\n");
14841 goto out_unset_driver_resource_s4;
14842 }
14843
14844
14845 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14846
14847
14848 cfg_mode = phba->cfg_use_msi;
14849
14850
14851 phba->pport = NULL;
14852 lpfc_stop_port(phba);
14853
14854
14855 lpfc_cpu_map_array_init(phba);
14856
14857
14858 lpfc_hba_eq_hdl_array_init(phba);
14859
14860
14861 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14862 if (intr_mode == LPFC_INTR_ERROR) {
14863 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14864 "0426 Failed to enable interrupt.\n");
14865 error = -ENODEV;
14866 goto out_unset_driver_resource;
14867 }
14868
14869 if (phba->intr_type != MSIX) {
14870 phba->cfg_irq_chann = 1;
14871 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14872 if (phba->nvmet_support)
14873 phba->cfg_nvmet_mrq = 1;
14874 }
14875 }
14876 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14877
14878
14879 error = lpfc_create_shost(phba);
14880 if (error) {
14881 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14882 "1415 Failed to create scsi host.\n");
14883 goto out_disable_intr;
14884 }
14885 vport = phba->pport;
14886 shost = lpfc_shost_from_vport(vport);
14887
14888
14889 error = lpfc_alloc_sysfs_attr(vport);
14890 if (error) {
14891 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14892 "1416 Failed to allocate sysfs attr\n");
14893 goto out_destroy_shost;
14894 }
14895
14896
14897 if (lpfc_sli4_hba_setup(phba)) {
14898 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14899 "1421 Failed to set up hba\n");
14900 error = -ENODEV;
14901 goto out_free_sysfs_attr;
14902 }
14903
14904
14905 phba->intr_mode = intr_mode;
14906 lpfc_log_intr_mode(phba, intr_mode);
14907
14908
14909 lpfc_post_init_setup(phba);
14910
14911
14912
14913
14914 if (phba->nvmet_support == 0) {
14915 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14916
14917
14918
14919
14920
14921 error = lpfc_nvme_create_localport(vport);
14922 if (error) {
14923 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14924 "6004 NVME registration "
14925 "failed, error x%x\n",
14926 error);
14927 }
14928 }
14929 }
14930
14931
14932 if (phba->cfg_request_firmware_upgrade)
14933 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14934
14935
14936 lpfc_create_static_vport(phba);
14937
14938 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14939 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14940
14941 return 0;
14942
14943 out_free_sysfs_attr:
14944 lpfc_free_sysfs_attr(vport);
14945 out_destroy_shost:
14946 lpfc_destroy_shost(phba);
14947 out_disable_intr:
14948 lpfc_sli4_disable_intr(phba);
14949 out_unset_driver_resource:
14950 lpfc_unset_driver_resource_phase2(phba);
14951 out_unset_driver_resource_s4:
14952 lpfc_sli4_driver_resource_unset(phba);
14953 out_unset_pci_mem_s4:
14954 lpfc_sli4_pci_mem_unset(phba);
14955 out_disable_pci_dev:
14956 lpfc_disable_pci_dev(phba);
14957 if (shost)
14958 scsi_host_put(shost);
14959 out_free_phba:
14960 lpfc_hba_free(phba);
14961 return error;
14962 }
14963
14964
14965
14966
14967
14968
14969
14970
14971
14972
14973 static void
14974 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
14975 {
14976 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14977 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14978 struct lpfc_vport **vports;
14979 struct lpfc_hba *phba = vport->phba;
14980 int i;
14981
14982
14983 spin_lock_irq(&phba->hbalock);
14984 vport->load_flag |= FC_UNLOADING;
14985 spin_unlock_irq(&phba->hbalock);
14986 if (phba->cgn_i)
14987 lpfc_unreg_congestion_buf(phba);
14988
14989 lpfc_free_sysfs_attr(vport);
14990
14991
14992 vports = lpfc_create_vport_work_array(phba);
14993 if (vports != NULL)
14994 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14995 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14996 continue;
14997 fc_vport_terminate(vports[i]->fc_vport);
14998 }
14999 lpfc_destroy_vport_work_array(phba, vports);
15000
15001
15002 fc_remove_host(shost);
15003 scsi_remove_host(shost);
15004
15005
15006
15007
15008 lpfc_cleanup(vport);
15009 lpfc_nvmet_destroy_targetport(phba);
15010 lpfc_nvme_destroy_localport(vport);
15011
15012
15013 if (phba->cfg_xri_rebalancing)
15014 lpfc_destroy_multixri_pools(phba);
15015
15016
15017
15018
15019
15020
15021 lpfc_debugfs_terminate(vport);
15022
15023 lpfc_stop_hba_timers(phba);
15024 spin_lock_irq(&phba->port_list_lock);
15025 list_del_init(&vport->listentry);
15026 spin_unlock_irq(&phba->port_list_lock);
15027
15028
15029
15030
15031 lpfc_io_free(phba);
15032 lpfc_free_iocb_list(phba);
15033 lpfc_sli4_hba_unset(phba);
15034
15035 lpfc_unset_driver_resource_phase2(phba);
15036 lpfc_sli4_driver_resource_unset(phba);
15037
15038
15039 lpfc_sli4_pci_mem_unset(phba);
15040
15041
15042 scsi_host_put(shost);
15043 lpfc_disable_pci_dev(phba);
15044
15045
15046 lpfc_hba_free(phba);
15047
15048 return;
15049 }
15050
15051
15052
15053
15054
15055
15056
15057
15058
15059
15060
15061
15062
15063
15064
15065
15066
15067
15068
15069
15070
15071 static int __maybe_unused
15072 lpfc_pci_suspend_one_s4(struct device *dev_d)
15073 {
15074 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15075 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15076
15077 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15078 "2843 PCI device Power Management suspend.\n");
15079
15080
15081 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15082 lpfc_offline(phba);
15083 kthread_stop(phba->worker_thread);
15084
15085
15086 lpfc_sli4_disable_intr(phba);
15087 lpfc_sli4_queue_destroy(phba);
15088
15089 return 0;
15090 }
15091
15092
15093
15094
15095
15096
15097
15098
15099
15100
15101
15102
15103
15104
15105
15106
15107
15108
15109
15110
15111 static int __maybe_unused
15112 lpfc_pci_resume_one_s4(struct device *dev_d)
15113 {
15114 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15115 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15116 uint32_t intr_mode;
15117 int error;
15118
15119 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15120 "0292 PCI device Power Management resume.\n");
15121
15122
15123 phba->worker_thread = kthread_run(lpfc_do_work, phba,
15124 "lpfc_worker_%d", phba->brd_no);
15125 if (IS_ERR(phba->worker_thread)) {
15126 error = PTR_ERR(phba->worker_thread);
15127 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15128 "0293 PM resume failed to start worker "
15129 "thread: error=x%x.\n", error);
15130 return error;
15131 }
15132
15133
15134 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15135 if (intr_mode == LPFC_INTR_ERROR) {
15136 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15137 "0294 PM resume Failed to enable interrupt\n");
15138 return -EIO;
15139 } else
15140 phba->intr_mode = intr_mode;
15141
15142
15143 lpfc_sli_brdrestart(phba);
15144 lpfc_online(phba);
15145
15146
15147 lpfc_log_intr_mode(phba, phba->intr_mode);
15148
15149 return 0;
15150 }
15151
15152
15153
15154
15155
15156
15157
15158
15159 static void
15160 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
15161 {
15162 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15163 "2828 PCI channel I/O abort preparing for recovery\n");
15164
15165
15166
15167
15168 lpfc_sli_abort_fcp_rings(phba);
15169 }
15170
15171
15172
15173
15174
15175
15176
15177
15178
15179 static void
15180 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
15181 {
15182 int offline = pci_channel_offline(phba->pcidev);
15183
15184 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15185 "2826 PCI channel disable preparing for reset offline"
15186 " %d\n", offline);
15187
15188
15189 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
15190
15191
15192
15193 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
15194
15195 lpfc_sli_flush_io_rings(phba);
15196 lpfc_offline(phba);
15197
15198
15199 lpfc_stop_hba_timers(phba);
15200
15201 lpfc_sli4_queue_destroy(phba);
15202
15203 lpfc_sli4_disable_intr(phba);
15204 pci_disable_device(phba->pcidev);
15205 }
15206
15207
15208
15209
15210
15211
15212
15213
15214
15215 static void
15216 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
15217 {
15218 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15219 "2827 PCI channel permanent disable for failure\n");
15220
15221
15222 lpfc_scsi_dev_block(phba);
15223
15224
15225 lpfc_stop_hba_timers(phba);
15226
15227
15228 lpfc_sli_flush_io_rings(phba);
15229 }
15230
15231
15232
15233
15234
15235
15236
15237
15238
15239
15240
15241
15242
15243
15244
15245
15246
15247 static pci_ers_result_t
15248 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15249 {
15250 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15251 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15252 bool hba_pci_err;
15253
15254 switch (state) {
15255 case pci_channel_io_normal:
15256
15257 lpfc_sli4_prep_dev_for_recover(phba);
15258 return PCI_ERS_RESULT_CAN_RECOVER;
15259 case pci_channel_io_frozen:
15260 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15261
15262 if (!hba_pci_err)
15263 lpfc_sli4_prep_dev_for_reset(phba);
15264 else
15265 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15266 "2832 Already handling PCI error "
15267 "state: x%x\n", state);
15268 return PCI_ERS_RESULT_NEED_RESET;
15269 case pci_channel_io_perm_failure:
15270 set_bit(HBA_PCI_ERR, &phba->bit_flags);
15271
15272 lpfc_sli4_prep_dev_for_perm_failure(phba);
15273 return PCI_ERS_RESULT_DISCONNECT;
15274 default:
15275 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15276 if (!hba_pci_err)
15277 lpfc_sli4_prep_dev_for_reset(phba);
15278
15279 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15280 "2825 Unknown PCI error state: x%x\n", state);
15281 lpfc_sli4_prep_dev_for_reset(phba);
15282 return PCI_ERS_RESULT_NEED_RESET;
15283 }
15284 }
15285
15286
15287
15288
15289
15290
15291
15292
15293
15294
15295
15296
15297
15298
15299
15300
15301
15302
15303
15304 static pci_ers_result_t
15305 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15306 {
15307 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15308 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15309 struct lpfc_sli *psli = &phba->sli;
15310 uint32_t intr_mode;
15311 bool hba_pci_err;
15312
15313 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15314 if (pci_enable_device_mem(pdev)) {
15315 printk(KERN_ERR "lpfc: Cannot re-enable "
15316 "PCI device after reset.\n");
15317 return PCI_ERS_RESULT_DISCONNECT;
15318 }
15319
15320 pci_restore_state(pdev);
15321
15322 hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
15323 if (!hba_pci_err)
15324 dev_info(&pdev->dev,
15325 "hba_pci_err was not set, recovering slot reset.\n");
15326
15327
15328
15329
15330 pci_save_state(pdev);
15331
15332 if (pdev->is_busmaster)
15333 pci_set_master(pdev);
15334
15335 spin_lock_irq(&phba->hbalock);
15336 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15337 spin_unlock_irq(&phba->hbalock);
15338
15339
15340 lpfc_cpu_map_array_init(phba);
15341
15342 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15343 if (intr_mode == LPFC_INTR_ERROR) {
15344 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15345 "2824 Cannot re-enable interrupt after "
15346 "slot reset.\n");
15347 return PCI_ERS_RESULT_DISCONNECT;
15348 } else
15349 phba->intr_mode = intr_mode;
15350 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
15351
15352
15353 lpfc_log_intr_mode(phba, phba->intr_mode);
15354
15355 return PCI_ERS_RESULT_RECOVERED;
15356 }
15357
15358
15359
15360
15361
15362
15363
15364
15365
15366
15367
15368 static void
15369 lpfc_io_resume_s4(struct pci_dev *pdev)
15370 {
15371 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15372 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15373
15374
15375
15376
15377
15378
15379
15380 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15381
15382 lpfc_sli_brdrestart(phba);
15383
15384 lpfc_online(phba);
15385 }
15386 }
15387
15388
15389
15390
15391
15392
15393
15394
15395
15396
15397
15398
15399
15400
15401
15402
15403
15404
15405
15406 static int
15407 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15408 {
15409 int rc;
15410 struct lpfc_sli_intf intf;
15411
15412 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15413 return -ENODEV;
15414
15415 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15416 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15417 rc = lpfc_pci_probe_one_s4(pdev, pid);
15418 else
15419 rc = lpfc_pci_probe_one_s3(pdev, pid);
15420
15421 return rc;
15422 }
15423
15424
15425
15426
15427
15428
15429
15430
15431
15432
15433
15434 static void
15435 lpfc_pci_remove_one(struct pci_dev *pdev)
15436 {
15437 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15438 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15439
15440 switch (phba->pci_dev_grp) {
15441 case LPFC_PCI_DEV_LP:
15442 lpfc_pci_remove_one_s3(pdev);
15443 break;
15444 case LPFC_PCI_DEV_OC:
15445 lpfc_pci_remove_one_s4(pdev);
15446 break;
15447 default:
15448 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15449 "1424 Invalid PCI device group: 0x%x\n",
15450 phba->pci_dev_grp);
15451 break;
15452 }
15453 return;
15454 }
15455
15456
15457
15458
15459
15460
15461
15462
15463
15464
15465
15466
15467
15468
15469 static int __maybe_unused
15470 lpfc_pci_suspend_one(struct device *dev)
15471 {
15472 struct Scsi_Host *shost = dev_get_drvdata(dev);
15473 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15474 int rc = -ENODEV;
15475
15476 switch (phba->pci_dev_grp) {
15477 case LPFC_PCI_DEV_LP:
15478 rc = lpfc_pci_suspend_one_s3(dev);
15479 break;
15480 case LPFC_PCI_DEV_OC:
15481 rc = lpfc_pci_suspend_one_s4(dev);
15482 break;
15483 default:
15484 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15485 "1425 Invalid PCI device group: 0x%x\n",
15486 phba->pci_dev_grp);
15487 break;
15488 }
15489 return rc;
15490 }
15491
15492
15493
15494
15495
15496
15497
15498
15499
15500
15501
15502
15503
15504
15505 static int __maybe_unused
15506 lpfc_pci_resume_one(struct device *dev)
15507 {
15508 struct Scsi_Host *shost = dev_get_drvdata(dev);
15509 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15510 int rc = -ENODEV;
15511
15512 switch (phba->pci_dev_grp) {
15513 case LPFC_PCI_DEV_LP:
15514 rc = lpfc_pci_resume_one_s3(dev);
15515 break;
15516 case LPFC_PCI_DEV_OC:
15517 rc = lpfc_pci_resume_one_s4(dev);
15518 break;
15519 default:
15520 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15521 "1426 Invalid PCI device group: 0x%x\n",
15522 phba->pci_dev_grp);
15523 break;
15524 }
15525 return rc;
15526 }
15527
15528
15529
15530
15531
15532
15533
15534
15535
15536
15537
15538
15539
15540
15541
15542
15543 static pci_ers_result_t
15544 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15545 {
15546 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15547 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15548 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15549
15550 if (phba->link_state == LPFC_HBA_ERROR &&
15551 phba->hba_flag & HBA_IOQ_FLUSH)
15552 return PCI_ERS_RESULT_NEED_RESET;
15553
15554 switch (phba->pci_dev_grp) {
15555 case LPFC_PCI_DEV_LP:
15556 rc = lpfc_io_error_detected_s3(pdev, state);
15557 break;
15558 case LPFC_PCI_DEV_OC:
15559 rc = lpfc_io_error_detected_s4(pdev, state);
15560 break;
15561 default:
15562 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15563 "1427 Invalid PCI device group: 0x%x\n",
15564 phba->pci_dev_grp);
15565 break;
15566 }
15567 return rc;
15568 }
15569
15570
15571
15572
15573
15574
15575
15576
15577
15578
15579
15580
15581
15582
15583
15584 static pci_ers_result_t
15585 lpfc_io_slot_reset(struct pci_dev *pdev)
15586 {
15587 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15588 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15589 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15590
15591 switch (phba->pci_dev_grp) {
15592 case LPFC_PCI_DEV_LP:
15593 rc = lpfc_io_slot_reset_s3(pdev);
15594 break;
15595 case LPFC_PCI_DEV_OC:
15596 rc = lpfc_io_slot_reset_s4(pdev);
15597 break;
15598 default:
15599 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15600 "1428 Invalid PCI device group: 0x%x\n",
15601 phba->pci_dev_grp);
15602 break;
15603 }
15604 return rc;
15605 }
15606
15607
15608
15609
15610
15611
15612
15613
15614
15615
15616
15617 static void
15618 lpfc_io_resume(struct pci_dev *pdev)
15619 {
15620 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15621 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15622
15623 switch (phba->pci_dev_grp) {
15624 case LPFC_PCI_DEV_LP:
15625 lpfc_io_resume_s3(pdev);
15626 break;
15627 case LPFC_PCI_DEV_OC:
15628 lpfc_io_resume_s4(pdev);
15629 break;
15630 default:
15631 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15632 "1429 Invalid PCI device group: 0x%x\n",
15633 phba->pci_dev_grp);
15634 break;
15635 }
15636 return;
15637 }
15638
15639
15640
15641
15642
15643
15644
15645
15646
15647
15648
15649 static void
15650 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15651 {
15652
15653 if (!phba->cfg_EnableXLane)
15654 return;
15655
15656 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15657 phba->cfg_fof = 1;
15658 } else {
15659 phba->cfg_fof = 0;
15660 mempool_destroy(phba->device_data_mem_pool);
15661 phba->device_data_mem_pool = NULL;
15662 }
15663
15664 return;
15665 }
15666
15667
15668
15669
15670
15671
15672
15673
15674 void
15675 lpfc_sli4_ras_init(struct lpfc_hba *phba)
15676 {
15677
15678 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15679 LPFC_SLI_INTF_IF_TYPE_6) ||
15680 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15681 LPFC_SLI_INTF_FAMILY_G6)) {
15682 phba->ras_fwlog.ras_hwsupport = true;
15683 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15684 phba->cfg_ras_fwlog_buffsize)
15685 phba->ras_fwlog.ras_enabled = true;
15686 else
15687 phba->ras_fwlog.ras_enabled = false;
15688 } else {
15689 phba->ras_fwlog.ras_hwsupport = false;
15690 }
15691 }
15692
15693
15694 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15695
15696 static const struct pci_error_handlers lpfc_err_handler = {
15697 .error_detected = lpfc_io_error_detected,
15698 .slot_reset = lpfc_io_slot_reset,
15699 .resume = lpfc_io_resume,
15700 };
15701
15702 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15703 lpfc_pci_suspend_one,
15704 lpfc_pci_resume_one);
15705
15706 static struct pci_driver lpfc_driver = {
15707 .name = LPFC_DRIVER_NAME,
15708 .id_table = lpfc_id_table,
15709 .probe = lpfc_pci_probe_one,
15710 .remove = lpfc_pci_remove_one,
15711 .shutdown = lpfc_pci_remove_one,
15712 .driver.pm = &lpfc_pci_pm_ops_one,
15713 .err_handler = &lpfc_err_handler,
15714 };
15715
15716 static const struct file_operations lpfc_mgmt_fop = {
15717 .owner = THIS_MODULE,
15718 };
15719
15720 static struct miscdevice lpfc_mgmt_dev = {
15721 .minor = MISC_DYNAMIC_MINOR,
15722 .name = "lpfcmgmt",
15723 .fops = &lpfc_mgmt_fop,
15724 };
15725
15726
15727
15728
15729
15730
15731
15732
15733
15734
15735
15736
15737
15738 static int __init
15739 lpfc_init(void)
15740 {
15741 int error = 0;
15742
15743 pr_info(LPFC_MODULE_DESC "\n");
15744 pr_info(LPFC_COPYRIGHT "\n");
15745
15746 error = misc_register(&lpfc_mgmt_dev);
15747 if (error)
15748 printk(KERN_ERR "Could not register lpfcmgmt device, "
15749 "misc_register returned with status %d", error);
15750
15751 error = -ENOMEM;
15752 lpfc_transport_functions.vport_create = lpfc_vport_create;
15753 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15754 lpfc_transport_template =
15755 fc_attach_transport(&lpfc_transport_functions);
15756 if (lpfc_transport_template == NULL)
15757 goto unregister;
15758 lpfc_vport_transport_template =
15759 fc_attach_transport(&lpfc_vport_transport_functions);
15760 if (lpfc_vport_transport_template == NULL) {
15761 fc_release_transport(lpfc_transport_template);
15762 goto unregister;
15763 }
15764 lpfc_wqe_cmd_template();
15765 lpfc_nvmet_cmd_template();
15766
15767
15768 lpfc_present_cpu = num_present_cpus();
15769
15770 lpfc_pldv_detect = false;
15771
15772 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15773 "lpfc/sli4:online",
15774 lpfc_cpu_online, lpfc_cpu_offline);
15775 if (error < 0)
15776 goto cpuhp_failure;
15777 lpfc_cpuhp_state = error;
15778
15779 error = pci_register_driver(&lpfc_driver);
15780 if (error)
15781 goto unwind;
15782
15783 return error;
15784
15785 unwind:
15786 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15787 cpuhp_failure:
15788 fc_release_transport(lpfc_transport_template);
15789 fc_release_transport(lpfc_vport_transport_template);
15790 unregister:
15791 misc_deregister(&lpfc_mgmt_dev);
15792
15793 return error;
15794 }
15795
15796 void lpfc_dmp_dbg(struct lpfc_hba *phba)
15797 {
15798 unsigned int start_idx;
15799 unsigned int dbg_cnt;
15800 unsigned int temp_idx;
15801 int i;
15802 int j = 0;
15803 unsigned long rem_nsec;
15804
15805 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15806 return;
15807
15808 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15809 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15810 if (!dbg_cnt)
15811 goto out;
15812 temp_idx = start_idx;
15813 if (dbg_cnt >= DBG_LOG_SZ) {
15814 dbg_cnt = DBG_LOG_SZ;
15815 temp_idx -= 1;
15816 } else {
15817 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15818 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15819 } else {
15820 if (start_idx < dbg_cnt)
15821 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15822 else
15823 start_idx -= dbg_cnt;
15824 }
15825 }
15826 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15827 start_idx, temp_idx, dbg_cnt);
15828
15829 for (i = 0; i < dbg_cnt; i++) {
15830 if ((start_idx + i) < DBG_LOG_SZ)
15831 temp_idx = (start_idx + i) % DBG_LOG_SZ;
15832 else
15833 temp_idx = j++;
15834 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15835 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15836 temp_idx,
15837 (unsigned long)phba->dbg_log[temp_idx].t_ns,
15838 rem_nsec / 1000,
15839 phba->dbg_log[temp_idx].log);
15840 }
15841 out:
15842 atomic_set(&phba->dbg_log_cnt, 0);
15843 atomic_set(&phba->dbg_log_dmping, 0);
15844 }
15845
15846 __printf(2, 3)
15847 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15848 {
15849 unsigned int idx;
15850 va_list args;
15851 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15852 struct va_format vaf;
15853
15854
15855 va_start(args, fmt);
15856 if (unlikely(dbg_dmping)) {
15857 vaf.fmt = fmt;
15858 vaf.va = &args;
15859 dev_info(&phba->pcidev->dev, "%pV", &vaf);
15860 va_end(args);
15861 return;
15862 }
15863 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15864 DBG_LOG_SZ;
15865
15866 atomic_inc(&phba->dbg_log_cnt);
15867
15868 vscnprintf(phba->dbg_log[idx].log,
15869 sizeof(phba->dbg_log[idx].log), fmt, args);
15870 va_end(args);
15871
15872 phba->dbg_log[idx].t_ns = local_clock();
15873 }
15874
15875
15876
15877
15878
15879
15880
15881
15882 static void __exit
15883 lpfc_exit(void)
15884 {
15885 misc_deregister(&lpfc_mgmt_dev);
15886 pci_unregister_driver(&lpfc_driver);
15887 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15888 fc_release_transport(lpfc_transport_template);
15889 fc_release_transport(lpfc_vport_transport_template);
15890 idr_destroy(&lpfc_hba_index);
15891 }
15892
15893 module_init(lpfc_init);
15894 module_exit(lpfc_exit);
15895 MODULE_LICENSE("GPL");
15896 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15897 MODULE_AUTHOR("Broadcom");
15898 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);