Back to home page

OSCL-LXR

 
 

    


0001 /*******************************************************************
0002  * This file is part of the Emulex Linux Device Driver for         *
0003  * Fibre Channel Host Bus Adapters.                                *
0004  * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
0005  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
0006  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
0007  * EMULEX and SLI are trademarks of Emulex.                        *
0008  * www.broadcom.com                                                *
0009  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
0010  *                                                                 *
0011  * This program is free software; you can redistribute it and/or   *
0012  * modify it under the terms of version 2 of the GNU General       *
0013  * Public License as published by the Free Software Foundation.    *
0014  * This program is distributed in the hope that it will be useful. *
0015  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
0016  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
0017  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
0018  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
0019  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
0020  * more details, a copy of which can be found in the file COPYING  *
0021  * included with this package.                                     *
0022  *******************************************************************/
0023 
0024 #include <linux/blkdev.h>
0025 #include <linux/delay.h>
0026 #include <linux/dma-mapping.h>
0027 #include <linux/idr.h>
0028 #include <linux/interrupt.h>
0029 #include <linux/module.h>
0030 #include <linux/kthread.h>
0031 #include <linux/pci.h>
0032 #include <linux/spinlock.h>
0033 #include <linux/ctype.h>
0034 #include <linux/aer.h>
0035 #include <linux/slab.h>
0036 #include <linux/firmware.h>
0037 #include <linux/miscdevice.h>
0038 #include <linux/percpu.h>
0039 #include <linux/msi.h>
0040 #include <linux/irq.h>
0041 #include <linux/bitops.h>
0042 #include <linux/crash_dump.h>
0043 #include <linux/cpu.h>
0044 #include <linux/cpuhotplug.h>
0045 
0046 #include <scsi/scsi.h>
0047 #include <scsi/scsi_device.h>
0048 #include <scsi/scsi_host.h>
0049 #include <scsi/scsi_transport_fc.h>
0050 #include <scsi/scsi_tcq.h>
0051 #include <scsi/fc/fc_fs.h>
0052 
0053 #include "lpfc_hw4.h"
0054 #include "lpfc_hw.h"
0055 #include "lpfc_sli.h"
0056 #include "lpfc_sli4.h"
0057 #include "lpfc_nl.h"
0058 #include "lpfc_disc.h"
0059 #include "lpfc.h"
0060 #include "lpfc_scsi.h"
0061 #include "lpfc_nvme.h"
0062 #include "lpfc_logmsg.h"
0063 #include "lpfc_crtn.h"
0064 #include "lpfc_vport.h"
0065 #include "lpfc_version.h"
0066 #include "lpfc_ids.h"
0067 
0068 static enum cpuhp_state lpfc_cpuhp_state;
0069 /* Used when mapping IRQ vectors in a driver centric manner */
0070 static uint32_t lpfc_present_cpu;
0071 static bool lpfc_pldv_detect;
0072 
0073 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
0074 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
0075 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
0076 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
0077 static int lpfc_post_rcv_buf(struct lpfc_hba *);
0078 static int lpfc_sli4_queue_verify(struct lpfc_hba *);
0079 static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
0080 static int lpfc_setup_endian_order(struct lpfc_hba *);
0081 static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
0082 static void lpfc_free_els_sgl_list(struct lpfc_hba *);
0083 static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
0084 static void lpfc_init_sgl_list(struct lpfc_hba *);
0085 static int lpfc_init_active_sgl_array(struct lpfc_hba *);
0086 static void lpfc_free_active_sgl(struct lpfc_hba *);
0087 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
0088 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
0089 static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
0090 static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
0091 static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
0092 static void lpfc_sli4_disable_intr(struct lpfc_hba *);
0093 static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
0094 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
0095 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
0096 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
0097 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
0098 static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
0099 
0100 static struct scsi_transport_template *lpfc_transport_template = NULL;
0101 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
0102 static DEFINE_IDR(lpfc_hba_index);
0103 #define LPFC_NVMET_BUF_POST 254
0104 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
0105 
0106 /**
0107  * lpfc_config_port_prep - Perform lpfc initialization prior to config port
0108  * @phba: pointer to lpfc hba data structure.
0109  *
0110  * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
0111  * mailbox command. It retrieves the revision information from the HBA and
0112  * collects the Vital Product Data (VPD) about the HBA for preparing the
0113  * configuration of the HBA.
0114  *
0115  * Return codes:
0116  *   0 - success.
0117  *   -ERESTART - requests the SLI layer to reset the HBA and try again.
0118  *   Any other value - indicates an error.
0119  **/
0120 int
0121 lpfc_config_port_prep(struct lpfc_hba *phba)
0122 {
0123     lpfc_vpd_t *vp = &phba->vpd;
0124     int i = 0, rc;
0125     LPFC_MBOXQ_t *pmb;
0126     MAILBOX_t *mb;
0127     char *lpfc_vpd_data = NULL;
0128     uint16_t offset = 0;
0129     static char licensed[56] =
0130             "key unlock for use with gnu public licensed code only\0";
0131     static int init_key = 1;
0132 
0133     pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
0134     if (!pmb) {
0135         phba->link_state = LPFC_HBA_ERROR;
0136         return -ENOMEM;
0137     }
0138 
0139     mb = &pmb->u.mb;
0140     phba->link_state = LPFC_INIT_MBX_CMDS;
0141 
0142     if (lpfc_is_LC_HBA(phba->pcidev->device)) {
0143         if (init_key) {
0144             uint32_t *ptext = (uint32_t *) licensed;
0145 
0146             for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
0147                 *ptext = cpu_to_be32(*ptext);
0148             init_key = 0;
0149         }
0150 
0151         lpfc_read_nv(phba, pmb);
0152         memset((char*)mb->un.varRDnvp.rsvd3, 0,
0153             sizeof (mb->un.varRDnvp.rsvd3));
0154         memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
0155              sizeof (licensed));
0156 
0157         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
0158 
0159         if (rc != MBX_SUCCESS) {
0160             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0161                     "0324 Config Port initialization "
0162                     "error, mbxCmd x%x READ_NVPARM, "
0163                     "mbxStatus x%x\n",
0164                     mb->mbxCommand, mb->mbxStatus);
0165             mempool_free(pmb, phba->mbox_mem_pool);
0166             return -ERESTART;
0167         }
0168         memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
0169                sizeof(phba->wwnn));
0170         memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
0171                sizeof(phba->wwpn));
0172     }
0173 
0174     /*
0175      * Clear all option bits except LPFC_SLI3_BG_ENABLED,
0176      * which was already set in lpfc_get_cfgparam()
0177      */
0178     phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
0179 
0180     /* Setup and issue mailbox READ REV command */
0181     lpfc_read_rev(phba, pmb);
0182     rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
0183     if (rc != MBX_SUCCESS) {
0184         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0185                 "0439 Adapter failed to init, mbxCmd x%x "
0186                 "READ_REV, mbxStatus x%x\n",
0187                 mb->mbxCommand, mb->mbxStatus);
0188         mempool_free( pmb, phba->mbox_mem_pool);
0189         return -ERESTART;
0190     }
0191 
0192 
0193     /*
0194      * The value of rr must be 1 since the driver set the cv field to 1.
0195      * This setting requires the FW to set all revision fields.
0196      */
0197     if (mb->un.varRdRev.rr == 0) {
0198         vp->rev.rBit = 0;
0199         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0200                 "0440 Adapter failed to init, READ_REV has "
0201                 "missing revision information.\n");
0202         mempool_free(pmb, phba->mbox_mem_pool);
0203         return -ERESTART;
0204     }
0205 
0206     if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
0207         mempool_free(pmb, phba->mbox_mem_pool);
0208         return -EINVAL;
0209     }
0210 
0211     /* Save information as VPD data */
0212     vp->rev.rBit = 1;
0213     memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
0214     vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
0215     memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
0216     vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
0217     memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
0218     vp->rev.biuRev = mb->un.varRdRev.biuRev;
0219     vp->rev.smRev = mb->un.varRdRev.smRev;
0220     vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
0221     vp->rev.endecRev = mb->un.varRdRev.endecRev;
0222     vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
0223     vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
0224     vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
0225     vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
0226     vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
0227     vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
0228 
0229     /* If the sli feature level is less then 9, we must
0230      * tear down all RPIs and VPIs on link down if NPIV
0231      * is enabled.
0232      */
0233     if (vp->rev.feaLevelHigh < 9)
0234         phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
0235 
0236     if (lpfc_is_LC_HBA(phba->pcidev->device))
0237         memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
0238                         sizeof (phba->RandomData));
0239 
0240     /* Get adapter VPD information */
0241     lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
0242     if (!lpfc_vpd_data)
0243         goto out_free_mbox;
0244     do {
0245         lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
0246         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
0247 
0248         if (rc != MBX_SUCCESS) {
0249             lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
0250                     "0441 VPD not present on adapter, "
0251                     "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
0252                     mb->mbxCommand, mb->mbxStatus);
0253             mb->un.varDmp.word_cnt = 0;
0254         }
0255         /* dump mem may return a zero when finished or we got a
0256          * mailbox error, either way we are done.
0257          */
0258         if (mb->un.varDmp.word_cnt == 0)
0259             break;
0260 
0261         if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
0262             mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
0263         lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
0264                       lpfc_vpd_data + offset,
0265                       mb->un.varDmp.word_cnt);
0266         offset += mb->un.varDmp.word_cnt;
0267     } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
0268 
0269     lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
0270 
0271     kfree(lpfc_vpd_data);
0272 out_free_mbox:
0273     mempool_free(pmb, phba->mbox_mem_pool);
0274     return 0;
0275 }
0276 
0277 /**
0278  * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
0279  * @phba: pointer to lpfc hba data structure.
0280  * @pmboxq: pointer to the driver internal queue element for mailbox command.
0281  *
0282  * This is the completion handler for driver's configuring asynchronous event
0283  * mailbox command to the device. If the mailbox command returns successfully,
0284  * it will set internal async event support flag to 1; otherwise, it will
0285  * set internal async event support flag to 0.
0286  **/
0287 static void
0288 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
0289 {
0290     if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
0291         phba->temp_sensor_support = 1;
0292     else
0293         phba->temp_sensor_support = 0;
0294     mempool_free(pmboxq, phba->mbox_mem_pool);
0295     return;
0296 }
0297 
0298 /**
0299  * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
0300  * @phba: pointer to lpfc hba data structure.
0301  * @pmboxq: pointer to the driver internal queue element for mailbox command.
0302  *
0303  * This is the completion handler for dump mailbox command for getting
0304  * wake up parameters. When this command complete, the response contain
0305  * Option rom version of the HBA. This function translate the version number
0306  * into a human readable string and store it in OptionROMVersion.
0307  **/
0308 static void
0309 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
0310 {
0311     struct prog_id *prg;
0312     uint32_t prog_id_word;
0313     char dist = ' ';
0314     /* character array used for decoding dist type. */
0315     char dist_char[] = "nabx";
0316 
0317     if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
0318         mempool_free(pmboxq, phba->mbox_mem_pool);
0319         return;
0320     }
0321 
0322     prg = (struct prog_id *) &prog_id_word;
0323 
0324     /* word 7 contain option rom version */
0325     prog_id_word = pmboxq->u.mb.un.varWords[7];
0326 
0327     /* Decode the Option rom version word to a readable string */
0328     if (prg->dist < 4)
0329         dist = dist_char[prg->dist];
0330 
0331     if ((prg->dist == 3) && (prg->num == 0))
0332         snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
0333             prg->ver, prg->rev, prg->lev);
0334     else
0335         snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
0336             prg->ver, prg->rev, prg->lev,
0337             dist, prg->num);
0338     mempool_free(pmboxq, phba->mbox_mem_pool);
0339     return;
0340 }
0341 
0342 /**
0343  * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
0344  * @vport: pointer to lpfc vport data structure.
0345  *
0346  *
0347  * Return codes
0348  *   None.
0349  **/
0350 void
0351 lpfc_update_vport_wwn(struct lpfc_vport *vport)
0352 {
0353     struct lpfc_hba *phba = vport->phba;
0354 
0355     /*
0356      * If the name is empty or there exists a soft name
0357      * then copy the service params name, otherwise use the fc name
0358      */
0359     if (vport->fc_nodename.u.wwn[0] == 0)
0360         memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
0361             sizeof(struct lpfc_name));
0362     else
0363         memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
0364             sizeof(struct lpfc_name));
0365 
0366     /*
0367      * If the port name has changed, then set the Param changes flag
0368      * to unreg the login
0369      */
0370     if (vport->fc_portname.u.wwn[0] != 0 &&
0371         memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
0372                sizeof(struct lpfc_name))) {
0373         vport->vport_flag |= FAWWPN_PARAM_CHG;
0374 
0375         if (phba->sli_rev == LPFC_SLI_REV4 &&
0376             vport->port_type == LPFC_PHYSICAL_PORT &&
0377             phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
0378             if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
0379                 phba->sli4_hba.fawwpn_flag &=
0380                         ~LPFC_FAWWPN_FABRIC;
0381             lpfc_printf_log(phba, KERN_INFO,
0382                     LOG_SLI | LOG_DISCOVERY | LOG_ELS,
0383                     "2701 FA-PWWN change WWPN from %llx to "
0384                     "%llx: vflag x%x fawwpn_flag x%x\n",
0385                     wwn_to_u64(vport->fc_portname.u.wwn),
0386                     wwn_to_u64
0387                        (vport->fc_sparam.portName.u.wwn),
0388                     vport->vport_flag,
0389                     phba->sli4_hba.fawwpn_flag);
0390             memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
0391                    sizeof(struct lpfc_name));
0392         }
0393     }
0394 
0395     if (vport->fc_portname.u.wwn[0] == 0)
0396         memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
0397                sizeof(struct lpfc_name));
0398     else
0399         memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
0400                sizeof(struct lpfc_name));
0401 }
0402 
0403 /**
0404  * lpfc_config_port_post - Perform lpfc initialization after config port
0405  * @phba: pointer to lpfc hba data structure.
0406  *
0407  * This routine will do LPFC initialization after the CONFIG_PORT mailbox
0408  * command call. It performs all internal resource and state setups on the
0409  * port: post IOCB buffers, enable appropriate host interrupt attentions,
0410  * ELS ring timers, etc.
0411  *
0412  * Return codes
0413  *   0 - success.
0414  *   Any other value - error.
0415  **/
0416 int
0417 lpfc_config_port_post(struct lpfc_hba *phba)
0418 {
0419     struct lpfc_vport *vport = phba->pport;
0420     struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
0421     LPFC_MBOXQ_t *pmb;
0422     MAILBOX_t *mb;
0423     struct lpfc_dmabuf *mp;
0424     struct lpfc_sli *psli = &phba->sli;
0425     uint32_t status, timeout;
0426     int i, j;
0427     int rc;
0428 
0429     spin_lock_irq(&phba->hbalock);
0430     /*
0431      * If the Config port completed correctly the HBA is not
0432      * over heated any more.
0433      */
0434     if (phba->over_temp_state == HBA_OVER_TEMP)
0435         phba->over_temp_state = HBA_NORMAL_TEMP;
0436     spin_unlock_irq(&phba->hbalock);
0437 
0438     pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
0439     if (!pmb) {
0440         phba->link_state = LPFC_HBA_ERROR;
0441         return -ENOMEM;
0442     }
0443     mb = &pmb->u.mb;
0444 
0445     /* Get login parameters for NID.  */
0446     rc = lpfc_read_sparam(phba, pmb, 0);
0447     if (rc) {
0448         mempool_free(pmb, phba->mbox_mem_pool);
0449         return -ENOMEM;
0450     }
0451 
0452     pmb->vport = vport;
0453     if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
0454         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0455                 "0448 Adapter failed init, mbxCmd x%x "
0456                 "READ_SPARM mbxStatus x%x\n",
0457                 mb->mbxCommand, mb->mbxStatus);
0458         phba->link_state = LPFC_HBA_ERROR;
0459         lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
0460         return -EIO;
0461     }
0462 
0463     mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
0464 
0465     /* This dmabuf was allocated by lpfc_read_sparam. The dmabuf is no
0466      * longer needed.  Prevent unintended ctx_buf access as the mbox is
0467      * reused.
0468      */
0469     memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
0470     lpfc_mbuf_free(phba, mp->virt, mp->phys);
0471     kfree(mp);
0472     pmb->ctx_buf = NULL;
0473     lpfc_update_vport_wwn(vport);
0474 
0475     /* Update the fc_host data structures with new wwn. */
0476     fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
0477     fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
0478     fc_host_max_npiv_vports(shost) = phba->max_vpi;
0479 
0480     /* If no serial number in VPD data, use low 6 bytes of WWNN */
0481     /* This should be consolidated into parse_vpd ? - mr */
0482     if (phba->SerialNumber[0] == 0) {
0483         uint8_t *outptr;
0484 
0485         outptr = &vport->fc_nodename.u.s.IEEE[0];
0486         for (i = 0; i < 12; i++) {
0487             status = *outptr++;
0488             j = ((status & 0xf0) >> 4);
0489             if (j <= 9)
0490                 phba->SerialNumber[i] =
0491                     (char)((uint8_t) 0x30 + (uint8_t) j);
0492             else
0493                 phba->SerialNumber[i] =
0494                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
0495             i++;
0496             j = (status & 0xf);
0497             if (j <= 9)
0498                 phba->SerialNumber[i] =
0499                     (char)((uint8_t) 0x30 + (uint8_t) j);
0500             else
0501                 phba->SerialNumber[i] =
0502                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
0503         }
0504     }
0505 
0506     lpfc_read_config(phba, pmb);
0507     pmb->vport = vport;
0508     if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
0509         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0510                 "0453 Adapter failed to init, mbxCmd x%x "
0511                 "READ_CONFIG, mbxStatus x%x\n",
0512                 mb->mbxCommand, mb->mbxStatus);
0513         phba->link_state = LPFC_HBA_ERROR;
0514         mempool_free( pmb, phba->mbox_mem_pool);
0515         return -EIO;
0516     }
0517 
0518     /* Check if the port is disabled */
0519     lpfc_sli_read_link_ste(phba);
0520 
0521     /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
0522     if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
0523         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
0524                 "3359 HBA queue depth changed from %d to %d\n",
0525                 phba->cfg_hba_queue_depth,
0526                 mb->un.varRdConfig.max_xri);
0527         phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
0528     }
0529 
0530     phba->lmt = mb->un.varRdConfig.lmt;
0531 
0532     /* Get the default values for Model Name and Description */
0533     lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
0534 
0535     phba->link_state = LPFC_LINK_DOWN;
0536 
0537     /* Only process IOCBs on ELS ring till hba_state is READY */
0538     if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
0539         psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
0540     if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
0541         psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
0542 
0543     /* Post receive buffers for desired rings */
0544     if (phba->sli_rev != 3)
0545         lpfc_post_rcv_buf(phba);
0546 
0547     /*
0548      * Configure HBA MSI-X attention conditions to messages if MSI-X mode
0549      */
0550     if (phba->intr_type == MSIX) {
0551         rc = lpfc_config_msi(phba, pmb);
0552         if (rc) {
0553             mempool_free(pmb, phba->mbox_mem_pool);
0554             return -EIO;
0555         }
0556         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
0557         if (rc != MBX_SUCCESS) {
0558             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0559                     "0352 Config MSI mailbox command "
0560                     "failed, mbxCmd x%x, mbxStatus x%x\n",
0561                     pmb->u.mb.mbxCommand,
0562                     pmb->u.mb.mbxStatus);
0563             mempool_free(pmb, phba->mbox_mem_pool);
0564             return -EIO;
0565         }
0566     }
0567 
0568     spin_lock_irq(&phba->hbalock);
0569     /* Initialize ERATT handling flag */
0570     phba->hba_flag &= ~HBA_ERATT_HANDLED;
0571 
0572     /* Enable appropriate host interrupts */
0573     if (lpfc_readl(phba->HCregaddr, &status)) {
0574         spin_unlock_irq(&phba->hbalock);
0575         return -EIO;
0576     }
0577     status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
0578     if (psli->num_rings > 0)
0579         status |= HC_R0INT_ENA;
0580     if (psli->num_rings > 1)
0581         status |= HC_R1INT_ENA;
0582     if (psli->num_rings > 2)
0583         status |= HC_R2INT_ENA;
0584     if (psli->num_rings > 3)
0585         status |= HC_R3INT_ENA;
0586 
0587     if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
0588         (phba->cfg_poll & DISABLE_FCP_RING_INT))
0589         status &= ~(HC_R0INT_ENA);
0590 
0591     writel(status, phba->HCregaddr);
0592     readl(phba->HCregaddr); /* flush */
0593     spin_unlock_irq(&phba->hbalock);
0594 
0595     /* Set up ring-0 (ELS) timer */
0596     timeout = phba->fc_ratov * 2;
0597     mod_timer(&vport->els_tmofunc,
0598           jiffies + msecs_to_jiffies(1000 * timeout));
0599     /* Set up heart beat (HB) timer */
0600     mod_timer(&phba->hb_tmofunc,
0601           jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
0602     phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
0603     phba->last_completion_time = jiffies;
0604     /* Set up error attention (ERATT) polling timer */
0605     mod_timer(&phba->eratt_poll,
0606           jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
0607 
0608     if (phba->hba_flag & LINK_DISABLED) {
0609         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0610                 "2598 Adapter Link is disabled.\n");
0611         lpfc_down_link(phba, pmb);
0612         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0613         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
0614         if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
0615             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0616                     "2599 Adapter failed to issue DOWN_LINK"
0617                     " mbox command rc 0x%x\n", rc);
0618 
0619             mempool_free(pmb, phba->mbox_mem_pool);
0620             return -EIO;
0621         }
0622     } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
0623         mempool_free(pmb, phba->mbox_mem_pool);
0624         rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
0625         if (rc)
0626             return rc;
0627     }
0628     /* MBOX buffer will be freed in mbox compl */
0629     pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
0630     if (!pmb) {
0631         phba->link_state = LPFC_HBA_ERROR;
0632         return -ENOMEM;
0633     }
0634 
0635     lpfc_config_async(phba, pmb, LPFC_ELS_RING);
0636     pmb->mbox_cmpl = lpfc_config_async_cmpl;
0637     pmb->vport = phba->pport;
0638     rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
0639 
0640     if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
0641         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0642                 "0456 Adapter failed to issue "
0643                 "ASYNCEVT_ENABLE mbox status x%x\n",
0644                 rc);
0645         mempool_free(pmb, phba->mbox_mem_pool);
0646     }
0647 
0648     /* Get Option rom version */
0649     pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
0650     if (!pmb) {
0651         phba->link_state = LPFC_HBA_ERROR;
0652         return -ENOMEM;
0653     }
0654 
0655     lpfc_dump_wakeup_param(phba, pmb);
0656     pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
0657     pmb->vport = phba->pport;
0658     rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
0659 
0660     if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
0661         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0662                 "0435 Adapter failed "
0663                 "to get Option ROM version status x%x\n", rc);
0664         mempool_free(pmb, phba->mbox_mem_pool);
0665     }
0666 
0667     return 0;
0668 }
0669 
0670 /**
0671  * lpfc_sli4_refresh_params - update driver copy of params.
0672  * @phba: Pointer to HBA context object.
0673  *
0674  * This is called to refresh driver copy of dynamic fields from the
0675  * common_get_sli4_parameters descriptor.
0676  **/
0677 int
0678 lpfc_sli4_refresh_params(struct lpfc_hba *phba)
0679 {
0680     LPFC_MBOXQ_t *mboxq;
0681     struct lpfc_mqe *mqe;
0682     struct lpfc_sli4_parameters *mbx_sli4_parameters;
0683     int length, rc;
0684 
0685     mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
0686     if (!mboxq)
0687         return -ENOMEM;
0688 
0689     mqe = &mboxq->u.mqe;
0690     /* Read the port's SLI4 Config Parameters */
0691     length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
0692           sizeof(struct lpfc_sli4_cfg_mhdr));
0693     lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
0694              LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
0695              length, LPFC_SLI4_MBX_EMBED);
0696 
0697     rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
0698     if (unlikely(rc)) {
0699         mempool_free(mboxq, phba->mbox_mem_pool);
0700         return rc;
0701     }
0702     mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
0703 
0704     /* Are we forcing MI off via module parameter? */
0705     if (phba->cfg_enable_mi)
0706         phba->sli4_hba.pc_sli4_params.mi_ver =
0707             bf_get(cfg_mi_ver, mbx_sli4_parameters);
0708     else
0709         phba->sli4_hba.pc_sli4_params.mi_ver = 0;
0710 
0711     phba->sli4_hba.pc_sli4_params.cmf =
0712             bf_get(cfg_cmf, mbx_sli4_parameters);
0713     phba->sli4_hba.pc_sli4_params.pls =
0714             bf_get(cfg_pvl, mbx_sli4_parameters);
0715 
0716     mempool_free(mboxq, phba->mbox_mem_pool);
0717     return rc;
0718 }
0719 
0720 /**
0721  * lpfc_hba_init_link - Initialize the FC link
0722  * @phba: pointer to lpfc hba data structure.
0723  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
0724  *
0725  * This routine will issue the INIT_LINK mailbox command call.
0726  * It is available to other drivers through the lpfc_hba data
0727  * structure for use as a delayed link up mechanism with the
0728  * module parameter lpfc_suppress_link_up.
0729  *
0730  * Return code
0731  *      0 - success
0732  *      Any other value - error
0733  **/
0734 static int
0735 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
0736 {
0737     return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
0738 }
0739 
0740 /**
0741  * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
0742  * @phba: pointer to lpfc hba data structure.
0743  * @fc_topology: desired fc topology.
0744  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
0745  *
0746  * This routine will issue the INIT_LINK mailbox command call.
0747  * It is available to other drivers through the lpfc_hba data
0748  * structure for use as a delayed link up mechanism with the
0749  * module parameter lpfc_suppress_link_up.
0750  *
0751  * Return code
0752  *              0 - success
0753  *              Any other value - error
0754  **/
0755 int
0756 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
0757                    uint32_t flag)
0758 {
0759     struct lpfc_vport *vport = phba->pport;
0760     LPFC_MBOXQ_t *pmb;
0761     MAILBOX_t *mb;
0762     int rc;
0763 
0764     pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
0765     if (!pmb) {
0766         phba->link_state = LPFC_HBA_ERROR;
0767         return -ENOMEM;
0768     }
0769     mb = &pmb->u.mb;
0770     pmb->vport = vport;
0771 
0772     if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
0773         ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
0774          !(phba->lmt & LMT_1Gb)) ||
0775         ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
0776          !(phba->lmt & LMT_2Gb)) ||
0777         ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
0778          !(phba->lmt & LMT_4Gb)) ||
0779         ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
0780          !(phba->lmt & LMT_8Gb)) ||
0781         ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
0782          !(phba->lmt & LMT_10Gb)) ||
0783         ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
0784          !(phba->lmt & LMT_16Gb)) ||
0785         ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
0786          !(phba->lmt & LMT_32Gb)) ||
0787         ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
0788          !(phba->lmt & LMT_64Gb))) {
0789         /* Reset link speed to auto */
0790         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0791                 "1302 Invalid speed for this board:%d "
0792                 "Reset link speed to auto.\n",
0793                 phba->cfg_link_speed);
0794             phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
0795     }
0796     lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
0797     pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0798     if (phba->sli_rev < LPFC_SLI_REV4)
0799         lpfc_set_loopback_flag(phba);
0800     rc = lpfc_sli_issue_mbox(phba, pmb, flag);
0801     if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
0802         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0803                 "0498 Adapter failed to init, mbxCmd x%x "
0804                 "INIT_LINK, mbxStatus x%x\n",
0805                 mb->mbxCommand, mb->mbxStatus);
0806         if (phba->sli_rev <= LPFC_SLI_REV3) {
0807             /* Clear all interrupt enable conditions */
0808             writel(0, phba->HCregaddr);
0809             readl(phba->HCregaddr); /* flush */
0810             /* Clear all pending interrupts */
0811             writel(0xffffffff, phba->HAregaddr);
0812             readl(phba->HAregaddr); /* flush */
0813         }
0814         phba->link_state = LPFC_HBA_ERROR;
0815         if (rc != MBX_BUSY || flag == MBX_POLL)
0816             mempool_free(pmb, phba->mbox_mem_pool);
0817         return -EIO;
0818     }
0819     phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
0820     if (flag == MBX_POLL)
0821         mempool_free(pmb, phba->mbox_mem_pool);
0822 
0823     return 0;
0824 }
0825 
0826 /**
0827  * lpfc_hba_down_link - this routine downs the FC link
0828  * @phba: pointer to lpfc hba data structure.
0829  * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
0830  *
0831  * This routine will issue the DOWN_LINK mailbox command call.
0832  * It is available to other drivers through the lpfc_hba data
0833  * structure for use to stop the link.
0834  *
0835  * Return code
0836  *      0 - success
0837  *      Any other value - error
0838  **/
0839 static int
0840 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
0841 {
0842     LPFC_MBOXQ_t *pmb;
0843     int rc;
0844 
0845     pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
0846     if (!pmb) {
0847         phba->link_state = LPFC_HBA_ERROR;
0848         return -ENOMEM;
0849     }
0850 
0851     lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0852             "0491 Adapter Link is disabled.\n");
0853     lpfc_down_link(phba, pmb);
0854     pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0855     rc = lpfc_sli_issue_mbox(phba, pmb, flag);
0856     if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
0857         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
0858                 "2522 Adapter failed to issue DOWN_LINK"
0859                 " mbox command rc 0x%x\n", rc);
0860 
0861         mempool_free(pmb, phba->mbox_mem_pool);
0862         return -EIO;
0863     }
0864     if (flag == MBX_POLL)
0865         mempool_free(pmb, phba->mbox_mem_pool);
0866 
0867     return 0;
0868 }
0869 
0870 /**
0871  * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
0872  * @phba: pointer to lpfc HBA data structure.
0873  *
0874  * This routine will do LPFC uninitialization before the HBA is reset when
0875  * bringing down the SLI Layer.
0876  *
0877  * Return codes
0878  *   0 - success.
0879  *   Any other value - error.
0880  **/
0881 int
0882 lpfc_hba_down_prep(struct lpfc_hba *phba)
0883 {
0884     struct lpfc_vport **vports;
0885     int i;
0886 
0887     if (phba->sli_rev <= LPFC_SLI_REV3) {
0888         /* Disable interrupts */
0889         writel(0, phba->HCregaddr);
0890         readl(phba->HCregaddr); /* flush */
0891     }
0892 
0893     if (phba->pport->load_flag & FC_UNLOADING)
0894         lpfc_cleanup_discovery_resources(phba->pport);
0895     else {
0896         vports = lpfc_create_vport_work_array(phba);
0897         if (vports != NULL)
0898             for (i = 0; i <= phba->max_vports &&
0899                 vports[i] != NULL; i++)
0900                 lpfc_cleanup_discovery_resources(vports[i]);
0901         lpfc_destroy_vport_work_array(phba, vports);
0902     }
0903     return 0;
0904 }
0905 
0906 /**
0907  * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
0908  * rspiocb which got deferred
0909  *
0910  * @phba: pointer to lpfc HBA data structure.
0911  *
0912  * This routine will cleanup completed slow path events after HBA is reset
0913  * when bringing down the SLI Layer.
0914  *
0915  *
0916  * Return codes
0917  *   void.
0918  **/
0919 static void
0920 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
0921 {
0922     struct lpfc_iocbq *rspiocbq;
0923     struct hbq_dmabuf *dmabuf;
0924     struct lpfc_cq_event *cq_event;
0925 
0926     spin_lock_irq(&phba->hbalock);
0927     phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
0928     spin_unlock_irq(&phba->hbalock);
0929 
0930     while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
0931         /* Get the response iocb from the head of work queue */
0932         spin_lock_irq(&phba->hbalock);
0933         list_remove_head(&phba->sli4_hba.sp_queue_event,
0934                  cq_event, struct lpfc_cq_event, list);
0935         spin_unlock_irq(&phba->hbalock);
0936 
0937         switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
0938         case CQE_CODE_COMPL_WQE:
0939             rspiocbq = container_of(cq_event, struct lpfc_iocbq,
0940                          cq_event);
0941             lpfc_sli_release_iocbq(phba, rspiocbq);
0942             break;
0943         case CQE_CODE_RECEIVE:
0944         case CQE_CODE_RECEIVE_V1:
0945             dmabuf = container_of(cq_event, struct hbq_dmabuf,
0946                           cq_event);
0947             lpfc_in_buf_free(phba, &dmabuf->dbuf);
0948         }
0949     }
0950 }
0951 
0952 /**
0953  * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
0954  * @phba: pointer to lpfc HBA data structure.
0955  *
0956  * This routine will cleanup posted ELS buffers after the HBA is reset
0957  * when bringing down the SLI Layer.
0958  *
0959  *
0960  * Return codes
0961  *   void.
0962  **/
0963 static void
0964 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
0965 {
0966     struct lpfc_sli *psli = &phba->sli;
0967     struct lpfc_sli_ring *pring;
0968     struct lpfc_dmabuf *mp, *next_mp;
0969     LIST_HEAD(buflist);
0970     int count;
0971 
0972     if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
0973         lpfc_sli_hbqbuf_free_all(phba);
0974     else {
0975         /* Cleanup preposted buffers on the ELS ring */
0976         pring = &psli->sli3_ring[LPFC_ELS_RING];
0977         spin_lock_irq(&phba->hbalock);
0978         list_splice_init(&pring->postbufq, &buflist);
0979         spin_unlock_irq(&phba->hbalock);
0980 
0981         count = 0;
0982         list_for_each_entry_safe(mp, next_mp, &buflist, list) {
0983             list_del(&mp->list);
0984             count++;
0985             lpfc_mbuf_free(phba, mp->virt, mp->phys);
0986             kfree(mp);
0987         }
0988 
0989         spin_lock_irq(&phba->hbalock);
0990         pring->postbufq_cnt -= count;
0991         spin_unlock_irq(&phba->hbalock);
0992     }
0993 }
0994 
0995 /**
0996  * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
0997  * @phba: pointer to lpfc HBA data structure.
0998  *
0999  * This routine will cleanup the txcmplq after the HBA is reset when bringing
1000  * down the SLI Layer.
1001  *
1002  * Return codes
1003  *   void
1004  **/
1005 static void
1006 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
1007 {
1008     struct lpfc_sli *psli = &phba->sli;
1009     struct lpfc_queue *qp = NULL;
1010     struct lpfc_sli_ring *pring;
1011     LIST_HEAD(completions);
1012     int i;
1013     struct lpfc_iocbq *piocb, *next_iocb;
1014 
1015     if (phba->sli_rev != LPFC_SLI_REV4) {
1016         for (i = 0; i < psli->num_rings; i++) {
1017             pring = &psli->sli3_ring[i];
1018             spin_lock_irq(&phba->hbalock);
1019             /* At this point in time the HBA is either reset or DOA
1020              * Nothing should be on txcmplq as it will
1021              * NEVER complete.
1022              */
1023             list_splice_init(&pring->txcmplq, &completions);
1024             pring->txcmplq_cnt = 0;
1025             spin_unlock_irq(&phba->hbalock);
1026 
1027             lpfc_sli_abort_iocb_ring(phba, pring);
1028         }
1029         /* Cancel all the IOCBs from the completions list */
1030         lpfc_sli_cancel_iocbs(phba, &completions,
1031                       IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1032         return;
1033     }
1034     list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1035         pring = qp->pring;
1036         if (!pring)
1037             continue;
1038         spin_lock_irq(&pring->ring_lock);
1039         list_for_each_entry_safe(piocb, next_iocb,
1040                      &pring->txcmplq, list)
1041             piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
1042         list_splice_init(&pring->txcmplq, &completions);
1043         pring->txcmplq_cnt = 0;
1044         spin_unlock_irq(&pring->ring_lock);
1045         lpfc_sli_abort_iocb_ring(phba, pring);
1046     }
1047     /* Cancel all the IOCBs from the completions list */
1048     lpfc_sli_cancel_iocbs(phba, &completions,
1049                   IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1050 }
1051 
1052 /**
1053  * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
1054  * @phba: pointer to lpfc HBA data structure.
1055  *
1056  * This routine will do uninitialization after the HBA is reset when bring
1057  * down the SLI Layer.
1058  *
1059  * Return codes
1060  *   0 - success.
1061  *   Any other value - error.
1062  **/
1063 static int
1064 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1065 {
1066     lpfc_hba_free_post_buf(phba);
1067     lpfc_hba_clean_txcmplq(phba);
1068     return 0;
1069 }
1070 
1071 /**
1072  * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
1073  * @phba: pointer to lpfc HBA data structure.
1074  *
1075  * This routine will do uninitialization after the HBA is reset when bring
1076  * down the SLI Layer.
1077  *
1078  * Return codes
1079  *   0 - success.
1080  *   Any other value - error.
1081  **/
1082 static int
1083 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1084 {
1085     struct lpfc_io_buf *psb, *psb_next;
1086     struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1087     struct lpfc_sli4_hdw_queue *qp;
1088     LIST_HEAD(aborts);
1089     LIST_HEAD(nvme_aborts);
1090     LIST_HEAD(nvmet_aborts);
1091     struct lpfc_sglq *sglq_entry = NULL;
1092     int cnt, idx;
1093 
1094 
1095     lpfc_sli_hbqbuf_free_all(phba);
1096     lpfc_hba_clean_txcmplq(phba);
1097 
1098     /* At this point in time the HBA is either reset or DOA. Either
1099      * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
1100      * on the lpfc_els_sgl_list so that it can either be freed if the
1101      * driver is unloading or reposted if the driver is restarting
1102      * the port.
1103      */
1104 
1105     /* sgl_list_lock required because worker thread uses this
1106      * list.
1107      */
1108     spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1109     list_for_each_entry(sglq_entry,
1110         &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1111         sglq_entry->state = SGL_FREED;
1112 
1113     list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1114             &phba->sli4_hba.lpfc_els_sgl_list);
1115 
1116 
1117     spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1118 
1119     /* abts_xxxx_buf_list_lock required because worker thread uses this
1120      * list.
1121      */
1122     spin_lock_irq(&phba->hbalock);
1123     cnt = 0;
1124     for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1125         qp = &phba->sli4_hba.hdwq[idx];
1126 
1127         spin_lock(&qp->abts_io_buf_list_lock);
1128         list_splice_init(&qp->lpfc_abts_io_buf_list,
1129                  &aborts);
1130 
1131         list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1132             psb->pCmd = NULL;
1133             psb->status = IOSTAT_SUCCESS;
1134             cnt++;
1135         }
1136         spin_lock(&qp->io_buf_list_put_lock);
1137         list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1138         qp->put_io_bufs += qp->abts_scsi_io_bufs;
1139         qp->put_io_bufs += qp->abts_nvme_io_bufs;
1140         qp->abts_scsi_io_bufs = 0;
1141         qp->abts_nvme_io_bufs = 0;
1142         spin_unlock(&qp->io_buf_list_put_lock);
1143         spin_unlock(&qp->abts_io_buf_list_lock);
1144     }
1145     spin_unlock_irq(&phba->hbalock);
1146 
1147     if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1148         spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1149         list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1150                  &nvmet_aborts);
1151         spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1152         list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1153             ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1154             lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1155         }
1156     }
1157 
1158     lpfc_sli4_free_sp_events(phba);
1159     return cnt;
1160 }
1161 
1162 /**
1163  * lpfc_hba_down_post - Wrapper func for hba down post routine
1164  * @phba: pointer to lpfc HBA data structure.
1165  *
1166  * This routine wraps the actual SLI3 or SLI4 routine for performing
1167  * uninitialization after the HBA is reset when bring down the SLI Layer.
1168  *
1169  * Return codes
1170  *   0 - success.
1171  *   Any other value - error.
1172  **/
1173 int
1174 lpfc_hba_down_post(struct lpfc_hba *phba)
1175 {
1176     return (*phba->lpfc_hba_down_post)(phba);
1177 }
1178 
1179 /**
1180  * lpfc_hb_timeout - The HBA-timer timeout handler
1181  * @t: timer context used to obtain the pointer to lpfc hba data structure.
1182  *
1183  * This is the HBA-timer timeout handler registered to the lpfc driver. When
1184  * this timer fires, a HBA timeout event shall be posted to the lpfc driver
1185  * work-port-events bitmap and the worker thread is notified. This timeout
1186  * event will be used by the worker thread to invoke the actual timeout
1187  * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
1188  * be performed in the timeout handler and the HBA timeout event bit shall
1189  * be cleared by the worker thread after it has taken the event bitmap out.
1190  **/
1191 static void
1192 lpfc_hb_timeout(struct timer_list *t)
1193 {
1194     struct lpfc_hba *phba;
1195     uint32_t tmo_posted;
1196     unsigned long iflag;
1197 
1198     phba = from_timer(phba, t, hb_tmofunc);
1199 
1200     /* Check for heart beat timeout conditions */
1201     spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1202     tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1203     if (!tmo_posted)
1204         phba->pport->work_port_events |= WORKER_HB_TMO;
1205     spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1206 
1207     /* Tell the worker thread there is work to do */
1208     if (!tmo_posted)
1209         lpfc_worker_wake_up(phba);
1210     return;
1211 }
1212 
1213 /**
1214  * lpfc_rrq_timeout - The RRQ-timer timeout handler
1215  * @t: timer context used to obtain the pointer to lpfc hba data structure.
1216  *
1217  * This is the RRQ-timer timeout handler registered to the lpfc driver. When
1218  * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
1219  * work-port-events bitmap and the worker thread is notified. This timeout
1220  * event will be used by the worker thread to invoke the actual timeout
1221  * handler routine, lpfc_rrq_handler. Any periodical operations will
1222  * be performed in the timeout handler and the RRQ timeout event bit shall
1223  * be cleared by the worker thread after it has taken the event bitmap out.
1224  **/
1225 static void
1226 lpfc_rrq_timeout(struct timer_list *t)
1227 {
1228     struct lpfc_hba *phba;
1229     unsigned long iflag;
1230 
1231     phba = from_timer(phba, t, rrq_tmr);
1232     spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1233     if (!(phba->pport->load_flag & FC_UNLOADING))
1234         phba->hba_flag |= HBA_RRQ_ACTIVE;
1235     else
1236         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1237     spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1238 
1239     if (!(phba->pport->load_flag & FC_UNLOADING))
1240         lpfc_worker_wake_up(phba);
1241 }
1242 
1243 /**
1244  * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
1245  * @phba: pointer to lpfc hba data structure.
1246  * @pmboxq: pointer to the driver internal queue element for mailbox command.
1247  *
1248  * This is the callback function to the lpfc heart-beat mailbox command.
1249  * If configured, the lpfc driver issues the heart-beat mailbox command to
1250  * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
1251  * heart-beat mailbox command is issued, the driver shall set up heart-beat
1252  * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
1253  * heart-beat outstanding state. Once the mailbox command comes back and
1254  * no error conditions detected, the heart-beat mailbox command timer is
1255  * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
1256  * state is cleared for the next heart-beat. If the timer expired with the
1257  * heart-beat outstanding state set, the driver will put the HBA offline.
1258  **/
1259 static void
1260 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1261 {
1262     unsigned long drvr_flag;
1263 
1264     spin_lock_irqsave(&phba->hbalock, drvr_flag);
1265     phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1266     spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1267 
1268     /* Check and reset heart-beat timer if necessary */
1269     mempool_free(pmboxq, phba->mbox_mem_pool);
1270     if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1271         !(phba->link_state == LPFC_HBA_ERROR) &&
1272         !(phba->pport->load_flag & FC_UNLOADING))
1273         mod_timer(&phba->hb_tmofunc,
1274               jiffies +
1275               msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1276     return;
1277 }
1278 
1279 /*
1280  * lpfc_idle_stat_delay_work - idle_stat tracking
1281  *
1282  * This routine tracks per-cq idle_stat and determines polling decisions.
1283  *
1284  * Return codes:
1285  *   None
1286  **/
1287 static void
1288 lpfc_idle_stat_delay_work(struct work_struct *work)
1289 {
1290     struct lpfc_hba *phba = container_of(to_delayed_work(work),
1291                          struct lpfc_hba,
1292                          idle_stat_delay_work);
1293     struct lpfc_queue *cq;
1294     struct lpfc_sli4_hdw_queue *hdwq;
1295     struct lpfc_idle_stat *idle_stat;
1296     u32 i, idle_percent;
1297     u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1298 
1299     if (phba->pport->load_flag & FC_UNLOADING)
1300         return;
1301 
1302     if (phba->link_state == LPFC_HBA_ERROR ||
1303         phba->pport->fc_flag & FC_OFFLINE_MODE ||
1304         phba->cmf_active_mode != LPFC_CFG_OFF)
1305         goto requeue;
1306 
1307     for_each_present_cpu(i) {
1308         hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1309         cq = hdwq->io_cq;
1310 
1311         /* Skip if we've already handled this cq's primary CPU */
1312         if (cq->chann != i)
1313             continue;
1314 
1315         idle_stat = &phba->sli4_hba.idle_stat[i];
1316 
1317         /* get_cpu_idle_time returns values as running counters. Thus,
1318          * to know the amount for this period, the prior counter values
1319          * need to be subtracted from the current counter values.
1320          * From there, the idle time stat can be calculated as a
1321          * percentage of 100 - the sum of the other consumption times.
1322          */
1323         wall_idle = get_cpu_idle_time(i, &wall, 1);
1324         diff_idle = wall_idle - idle_stat->prev_idle;
1325         diff_wall = wall - idle_stat->prev_wall;
1326 
1327         if (diff_wall <= diff_idle)
1328             busy_time = 0;
1329         else
1330             busy_time = diff_wall - diff_idle;
1331 
1332         idle_percent = div64_u64(100 * busy_time, diff_wall);
1333         idle_percent = 100 - idle_percent;
1334 
1335         if (idle_percent < 15)
1336             cq->poll_mode = LPFC_QUEUE_WORK;
1337         else
1338             cq->poll_mode = LPFC_IRQ_POLL;
1339 
1340         idle_stat->prev_idle = wall_idle;
1341         idle_stat->prev_wall = wall;
1342     }
1343 
1344 requeue:
1345     schedule_delayed_work(&phba->idle_stat_delay_work,
1346                   msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1347 }
1348 
1349 static void
1350 lpfc_hb_eq_delay_work(struct work_struct *work)
1351 {
1352     struct lpfc_hba *phba = container_of(to_delayed_work(work),
1353                          struct lpfc_hba, eq_delay_work);
1354     struct lpfc_eq_intr_info *eqi, *eqi_new;
1355     struct lpfc_queue *eq, *eq_next;
1356     unsigned char *ena_delay = NULL;
1357     uint32_t usdelay;
1358     int i;
1359 
1360     if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1361         return;
1362 
1363     if (phba->link_state == LPFC_HBA_ERROR ||
1364         phba->pport->fc_flag & FC_OFFLINE_MODE)
1365         goto requeue;
1366 
1367     ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1368                 GFP_KERNEL);
1369     if (!ena_delay)
1370         goto requeue;
1371 
1372     for (i = 0; i < phba->cfg_irq_chann; i++) {
1373         /* Get the EQ corresponding to the IRQ vector */
1374         eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1375         if (!eq)
1376             continue;
1377         if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1378             eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1379             ena_delay[eq->last_cpu] = 1;
1380         }
1381     }
1382 
1383     for_each_present_cpu(i) {
1384         eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1385         if (ena_delay[i]) {
1386             usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1387             if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1388                 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1389         } else {
1390             usdelay = 0;
1391         }
1392 
1393         eqi->icnt = 0;
1394 
1395         list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1396             if (unlikely(eq->last_cpu != i)) {
1397                 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1398                               eq->last_cpu);
1399                 list_move_tail(&eq->cpu_list, &eqi_new->list);
1400                 continue;
1401             }
1402             if (usdelay != eq->q_mode)
1403                 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1404                              usdelay);
1405         }
1406     }
1407 
1408     kfree(ena_delay);
1409 
1410 requeue:
1411     queue_delayed_work(phba->wq, &phba->eq_delay_work,
1412                msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1413 }
1414 
1415 /**
1416  * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution
1417  * @phba: pointer to lpfc hba data structure.
1418  *
1419  * For each heartbeat, this routine does some heuristic methods to adjust
1420  * XRI distribution. The goal is to fully utilize free XRIs.
1421  **/
1422 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1423 {
1424     u32 i;
1425     u32 hwq_count;
1426 
1427     hwq_count = phba->cfg_hdw_queue;
1428     for (i = 0; i < hwq_count; i++) {
1429         /* Adjust XRIs in private pool */
1430         lpfc_adjust_pvt_pool_count(phba, i);
1431 
1432         /* Adjust high watermark */
1433         lpfc_adjust_high_watermark(phba, i);
1434 
1435 #ifdef LPFC_MXP_STAT
1436         /* Snapshot pbl, pvt and busy count */
1437         lpfc_snapshot_mxp(phba, i);
1438 #endif
1439     }
1440 }
1441 
1442 /**
1443  * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
1444  * @phba: pointer to lpfc hba data structure.
1445  *
1446  * If a HB mbox is not already in progrees, this routine will allocate
1447  * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
1448  * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
1449  **/
1450 int
1451 lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1452 {
1453     LPFC_MBOXQ_t *pmboxq;
1454     int retval;
1455 
1456     /* Is a Heartbeat mbox already in progress */
1457     if (phba->hba_flag & HBA_HBEAT_INP)
1458         return 0;
1459 
1460     pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1461     if (!pmboxq)
1462         return -ENOMEM;
1463 
1464     lpfc_heart_beat(phba, pmboxq);
1465     pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1466     pmboxq->vport = phba->pport;
1467     retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1468 
1469     if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1470         mempool_free(pmboxq, phba->mbox_mem_pool);
1471         return -ENXIO;
1472     }
1473     phba->hba_flag |= HBA_HBEAT_INP;
1474 
1475     return 0;
1476 }
1477 
1478 /**
1479  * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
1480  * @phba: pointer to lpfc hba data structure.
1481  *
1482  * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
1483  * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
1484  * of the value of lpfc_enable_hba_heartbeat.
1485  * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
1486  * try to issue a MBX_HEARTBEAT mbox command.
1487  **/
1488 void
1489 lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1490 {
1491     if (phba->cfg_enable_hba_heartbeat)
1492         return;
1493     phba->hba_flag |= HBA_HBEAT_TMO;
1494 }
1495 
1496 /**
1497  * lpfc_hb_timeout_handler - The HBA-timer timeout handler
1498  * @phba: pointer to lpfc hba data structure.
1499  *
1500  * This is the actual HBA-timer timeout handler to be invoked by the worker
1501  * thread whenever the HBA timer fired and HBA-timeout event posted. This
1502  * handler performs any periodic operations needed for the device. If such
1503  * periodic event has already been attended to either in the interrupt handler
1504  * or by processing slow-ring or fast-ring events within the HBA-timer
1505  * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
1506  * the timer for the next timeout period. If lpfc heart-beat mailbox command
1507  * is configured and there is no heart-beat mailbox command outstanding, a
1508  * heart-beat mailbox is issued and timer set properly. Otherwise, if there
1509  * has been a heart-beat mailbox command outstanding, the HBA shall be put
1510  * to offline.
1511  **/
1512 void
1513 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1514 {
1515     struct lpfc_vport **vports;
1516     struct lpfc_dmabuf *buf_ptr;
1517     int retval = 0;
1518     int i, tmo;
1519     struct lpfc_sli *psli = &phba->sli;
1520     LIST_HEAD(completions);
1521 
1522     if (phba->cfg_xri_rebalancing) {
1523         /* Multi-XRI pools handler */
1524         lpfc_hb_mxp_handler(phba);
1525     }
1526 
1527     vports = lpfc_create_vport_work_array(phba);
1528     if (vports != NULL)
1529         for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1530             lpfc_rcv_seq_check_edtov(vports[i]);
1531             lpfc_fdmi_change_check(vports[i]);
1532         }
1533     lpfc_destroy_vport_work_array(phba, vports);
1534 
1535     if ((phba->link_state == LPFC_HBA_ERROR) ||
1536         (phba->pport->load_flag & FC_UNLOADING) ||
1537         (phba->pport->fc_flag & FC_OFFLINE_MODE))
1538         return;
1539 
1540     if (phba->elsbuf_cnt &&
1541         (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1542         spin_lock_irq(&phba->hbalock);
1543         list_splice_init(&phba->elsbuf, &completions);
1544         phba->elsbuf_cnt = 0;
1545         phba->elsbuf_prev_cnt = 0;
1546         spin_unlock_irq(&phba->hbalock);
1547 
1548         while (!list_empty(&completions)) {
1549             list_remove_head(&completions, buf_ptr,
1550                 struct lpfc_dmabuf, list);
1551             lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1552             kfree(buf_ptr);
1553         }
1554     }
1555     phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1556 
1557     /* If there is no heart beat outstanding, issue a heartbeat command */
1558     if (phba->cfg_enable_hba_heartbeat) {
1559         /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
1560         spin_lock_irq(&phba->pport->work_port_lock);
1561         if (time_after(phba->last_completion_time +
1562                 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1563                 jiffies)) {
1564             spin_unlock_irq(&phba->pport->work_port_lock);
1565             if (phba->hba_flag & HBA_HBEAT_INP)
1566                 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1567             else
1568                 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1569             goto out;
1570         }
1571         spin_unlock_irq(&phba->pport->work_port_lock);
1572 
1573         /* Check if a MBX_HEARTBEAT is already in progress */
1574         if (phba->hba_flag & HBA_HBEAT_INP) {
1575             /*
1576              * If heart beat timeout called with HBA_HBEAT_INP set
1577              * we need to give the hb mailbox cmd a chance to
1578              * complete or TMO.
1579              */
1580             lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1581                 "0459 Adapter heartbeat still outstanding: "
1582                 "last compl time was %d ms.\n",
1583                 jiffies_to_msecs(jiffies
1584                      - phba->last_completion_time));
1585             tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1586         } else {
1587             if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1588                 (list_empty(&psli->mboxq))) {
1589 
1590                 retval = lpfc_issue_hb_mbox(phba);
1591                 if (retval) {
1592                     tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1593                     goto out;
1594                 }
1595                 phba->skipped_hb = 0;
1596             } else if (time_before_eq(phba->last_completion_time,
1597                     phba->skipped_hb)) {
1598                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1599                     "2857 Last completion time not "
1600                     " updated in %d ms\n",
1601                     jiffies_to_msecs(jiffies
1602                          - phba->last_completion_time));
1603             } else
1604                 phba->skipped_hb = jiffies;
1605 
1606             tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1607             goto out;
1608         }
1609     } else {
1610         /* Check to see if we want to force a MBX_HEARTBEAT */
1611         if (phba->hba_flag & HBA_HBEAT_TMO) {
1612             retval = lpfc_issue_hb_mbox(phba);
1613             if (retval)
1614                 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1615             else
1616                 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1617             goto out;
1618         }
1619         tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1620     }
1621 out:
1622     mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1623 }
1624 
1625 /**
1626  * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
1627  * @phba: pointer to lpfc hba data structure.
1628  *
1629  * This routine is called to bring the HBA offline when HBA hardware error
1630  * other than Port Error 6 has been detected.
1631  **/
1632 static void
1633 lpfc_offline_eratt(struct lpfc_hba *phba)
1634 {
1635     struct lpfc_sli   *psli = &phba->sli;
1636 
1637     spin_lock_irq(&phba->hbalock);
1638     psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1639     spin_unlock_irq(&phba->hbalock);
1640     lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1641 
1642     lpfc_offline(phba);
1643     lpfc_reset_barrier(phba);
1644     spin_lock_irq(&phba->hbalock);
1645     lpfc_sli_brdreset(phba);
1646     spin_unlock_irq(&phba->hbalock);
1647     lpfc_hba_down_post(phba);
1648     lpfc_sli_brdready(phba, HS_MBRDY);
1649     lpfc_unblock_mgmt_io(phba);
1650     phba->link_state = LPFC_HBA_ERROR;
1651     return;
1652 }
1653 
1654 /**
1655  * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
1656  * @phba: pointer to lpfc hba data structure.
1657  *
1658  * This routine is called to bring a SLI4 HBA offline when HBA hardware error
1659  * other than Port Error 6 has been detected.
1660  **/
1661 void
1662 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1663 {
1664     spin_lock_irq(&phba->hbalock);
1665     if (phba->link_state == LPFC_HBA_ERROR &&
1666         test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
1667         spin_unlock_irq(&phba->hbalock);
1668         return;
1669     }
1670     phba->link_state = LPFC_HBA_ERROR;
1671     spin_unlock_irq(&phba->hbalock);
1672 
1673     lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1674     lpfc_sli_flush_io_rings(phba);
1675     lpfc_offline(phba);
1676     lpfc_hba_down_post(phba);
1677     lpfc_unblock_mgmt_io(phba);
1678 }
1679 
1680 /**
1681  * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
1682  * @phba: pointer to lpfc hba data structure.
1683  *
1684  * This routine is invoked to handle the deferred HBA hardware error
1685  * conditions. This type of error is indicated by HBA by setting ER1
1686  * and another ER bit in the host status register. The driver will
1687  * wait until the ER1 bit clears before handling the error condition.
1688  **/
1689 static void
1690 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1691 {
1692     uint32_t old_host_status = phba->work_hs;
1693     struct lpfc_sli *psli = &phba->sli;
1694 
1695     /* If the pci channel is offline, ignore possible errors,
1696      * since we cannot communicate with the pci card anyway.
1697      */
1698     if (pci_channel_offline(phba->pcidev)) {
1699         spin_lock_irq(&phba->hbalock);
1700         phba->hba_flag &= ~DEFER_ERATT;
1701         spin_unlock_irq(&phba->hbalock);
1702         return;
1703     }
1704 
1705     lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1706             "0479 Deferred Adapter Hardware Error "
1707             "Data: x%x x%x x%x\n",
1708             phba->work_hs, phba->work_status[0],
1709             phba->work_status[1]);
1710 
1711     spin_lock_irq(&phba->hbalock);
1712     psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1713     spin_unlock_irq(&phba->hbalock);
1714 
1715 
1716     /*
1717      * Firmware stops when it triggred erratt. That could cause the I/Os
1718      * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
1719      * SCSI layer retry it after re-establishing link.
1720      */
1721     lpfc_sli_abort_fcp_rings(phba);
1722 
1723     /*
1724      * There was a firmware error. Take the hba offline and then
1725      * attempt to restart it.
1726      */
1727     lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1728     lpfc_offline(phba);
1729 
1730     /* Wait for the ER1 bit to clear.*/
1731     while (phba->work_hs & HS_FFER1) {
1732         msleep(100);
1733         if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1734             phba->work_hs = UNPLUG_ERR ;
1735             break;
1736         }
1737         /* If driver is unloading let the worker thread continue */
1738         if (phba->pport->load_flag & FC_UNLOADING) {
1739             phba->work_hs = 0;
1740             break;
1741         }
1742     }
1743 
1744     /*
1745      * This is to ptrotect against a race condition in which
1746      * first write to the host attention register clear the
1747      * host status register.
1748      */
1749     if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1750         phba->work_hs = old_host_status & ~HS_FFER1;
1751 
1752     spin_lock_irq(&phba->hbalock);
1753     phba->hba_flag &= ~DEFER_ERATT;
1754     spin_unlock_irq(&phba->hbalock);
1755     phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1756     phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1757 }
1758 
1759 static void
1760 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1761 {
1762     struct lpfc_board_event_header board_event;
1763     struct Scsi_Host *shost;
1764 
1765     board_event.event_type = FC_REG_BOARD_EVENT;
1766     board_event.subcategory = LPFC_EVENT_PORTINTERR;
1767     shost = lpfc_shost_from_vport(phba->pport);
1768     fc_host_post_vendor_event(shost, fc_get_event_number(),
1769                   sizeof(board_event),
1770                   (char *) &board_event,
1771                   LPFC_NL_VENDOR_ID);
1772 }
1773 
1774 /**
1775  * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
1776  * @phba: pointer to lpfc hba data structure.
1777  *
1778  * This routine is invoked to handle the following HBA hardware error
1779  * conditions:
1780  * 1 - HBA error attention interrupt
1781  * 2 - DMA ring index out of range
1782  * 3 - Mailbox command came back as unknown
1783  **/
1784 static void
1785 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1786 {
1787     struct lpfc_vport *vport = phba->pport;
1788     struct lpfc_sli   *psli = &phba->sli;
1789     uint32_t event_data;
1790     unsigned long temperature;
1791     struct temp_event temp_event_data;
1792     struct Scsi_Host  *shost;
1793 
1794     /* If the pci channel is offline, ignore possible errors,
1795      * since we cannot communicate with the pci card anyway.
1796      */
1797     if (pci_channel_offline(phba->pcidev)) {
1798         spin_lock_irq(&phba->hbalock);
1799         phba->hba_flag &= ~DEFER_ERATT;
1800         spin_unlock_irq(&phba->hbalock);
1801         return;
1802     }
1803 
1804     /* If resets are disabled then leave the HBA alone and return */
1805     if (!phba->cfg_enable_hba_reset)
1806         return;
1807 
1808     /* Send an internal error event to mgmt application */
1809     lpfc_board_errevt_to_mgmt(phba);
1810 
1811     if (phba->hba_flag & DEFER_ERATT)
1812         lpfc_handle_deferred_eratt(phba);
1813 
1814     if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1815         if (phba->work_hs & HS_FFER6)
1816             /* Re-establishing Link */
1817             lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1818                     "1301 Re-establishing Link "
1819                     "Data: x%x x%x x%x\n",
1820                     phba->work_hs, phba->work_status[0],
1821                     phba->work_status[1]);
1822         if (phba->work_hs & HS_FFER8)
1823             /* Device Zeroization */
1824             lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1825                     "2861 Host Authentication device "
1826                     "zeroization Data:x%x x%x x%x\n",
1827                     phba->work_hs, phba->work_status[0],
1828                     phba->work_status[1]);
1829 
1830         spin_lock_irq(&phba->hbalock);
1831         psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1832         spin_unlock_irq(&phba->hbalock);
1833 
1834         /*
1835         * Firmware stops when it triggled erratt with HS_FFER6.
1836         * That could cause the I/Os dropped by the firmware.
1837         * Error iocb (I/O) on txcmplq and let the SCSI layer
1838         * retry it after re-establishing link.
1839         */
1840         lpfc_sli_abort_fcp_rings(phba);
1841 
1842         /*
1843          * There was a firmware error.  Take the hba offline and then
1844          * attempt to restart it.
1845          */
1846         lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1847         lpfc_offline(phba);
1848         lpfc_sli_brdrestart(phba);
1849         if (lpfc_online(phba) == 0) {   /* Initialize the HBA */
1850             lpfc_unblock_mgmt_io(phba);
1851             return;
1852         }
1853         lpfc_unblock_mgmt_io(phba);
1854     } else if (phba->work_hs & HS_CRIT_TEMP) {
1855         temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1856         temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1857         temp_event_data.event_code = LPFC_CRIT_TEMP;
1858         temp_event_data.data = (uint32_t)temperature;
1859 
1860         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1861                 "0406 Adapter maximum temperature exceeded "
1862                 "(%ld), taking this port offline "
1863                 "Data: x%x x%x x%x\n",
1864                 temperature, phba->work_hs,
1865                 phba->work_status[0], phba->work_status[1]);
1866 
1867         shost = lpfc_shost_from_vport(phba->pport);
1868         fc_host_post_vendor_event(shost, fc_get_event_number(),
1869                       sizeof(temp_event_data),
1870                       (char *) &temp_event_data,
1871                       SCSI_NL_VID_TYPE_PCI
1872                       | PCI_VENDOR_ID_EMULEX);
1873 
1874         spin_lock_irq(&phba->hbalock);
1875         phba->over_temp_state = HBA_OVER_TEMP;
1876         spin_unlock_irq(&phba->hbalock);
1877         lpfc_offline_eratt(phba);
1878 
1879     } else {
1880         /* The if clause above forces this code path when the status
1881          * failure is a value other than FFER6. Do not call the offline
1882          * twice. This is the adapter hardware error path.
1883          */
1884         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1885                 "0457 Adapter Hardware Error "
1886                 "Data: x%x x%x x%x\n",
1887                 phba->work_hs,
1888                 phba->work_status[0], phba->work_status[1]);
1889 
1890         event_data = FC_REG_DUMP_EVENT;
1891         shost = lpfc_shost_from_vport(vport);
1892         fc_host_post_vendor_event(shost, fc_get_event_number(),
1893                 sizeof(event_data), (char *) &event_data,
1894                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1895 
1896         lpfc_offline_eratt(phba);
1897     }
1898     return;
1899 }
1900 
1901 /**
1902  * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
1903  * @phba: pointer to lpfc hba data structure.
1904  * @mbx_action: flag for mailbox shutdown action.
1905  * @en_rn_msg: send reset/port recovery message.
1906  * This routine is invoked to perform an SLI4 port PCI function reset in
1907  * response to port status register polling attention. It waits for port
1908  * status register (ERR, RDY, RN) bits before proceeding with function reset.
1909  * During this process, interrupt vectors are freed and later requested
1910  * for handling possible port resource change.
1911  **/
1912 static int
1913 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1914                 bool en_rn_msg)
1915 {
1916     int rc;
1917     uint32_t intr_mode;
1918     LPFC_MBOXQ_t *mboxq;
1919 
1920     if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1921         LPFC_SLI_INTF_IF_TYPE_2) {
1922         /*
1923          * On error status condition, driver need to wait for port
1924          * ready before performing reset.
1925          */
1926         rc = lpfc_sli4_pdev_status_reg_wait(phba);
1927         if (rc)
1928             return rc;
1929     }
1930 
1931     /* need reset: attempt for port recovery */
1932     if (en_rn_msg)
1933         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1934                 "2887 Reset Needed: Attempting Port "
1935                 "Recovery...\n");
1936 
1937     /* If we are no wait, the HBA has been reset and is not
1938      * functional, thus we should clear
1939      * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags.
1940      */
1941     if (mbx_action == LPFC_MBX_NO_WAIT) {
1942         spin_lock_irq(&phba->hbalock);
1943         phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1944         if (phba->sli.mbox_active) {
1945             mboxq = phba->sli.mbox_active;
1946             mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1947             __lpfc_mbox_cmpl_put(phba, mboxq);
1948             phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1949             phba->sli.mbox_active = NULL;
1950         }
1951         spin_unlock_irq(&phba->hbalock);
1952     }
1953 
1954     lpfc_offline_prep(phba, mbx_action);
1955     lpfc_sli_flush_io_rings(phba);
1956     lpfc_offline(phba);
1957     /* release interrupt for possible resource change */
1958     lpfc_sli4_disable_intr(phba);
1959     rc = lpfc_sli_brdrestart(phba);
1960     if (rc) {
1961         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1962                 "6309 Failed to restart board\n");
1963         return rc;
1964     }
1965     /* request and enable interrupt */
1966     intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1967     if (intr_mode == LPFC_INTR_ERROR) {
1968         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1969                 "3175 Failed to enable interrupt\n");
1970         return -EIO;
1971     }
1972     phba->intr_mode = intr_mode;
1973     rc = lpfc_online(phba);
1974     if (rc == 0)
1975         lpfc_unblock_mgmt_io(phba);
1976 
1977     return rc;
1978 }
1979 
1980 /**
1981  * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
1982  * @phba: pointer to lpfc hba data structure.
1983  *
1984  * This routine is invoked to handle the SLI4 HBA hardware error attention
1985  * conditions.
1986  **/
1987 static void
1988 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1989 {
1990     struct lpfc_vport *vport = phba->pport;
1991     uint32_t event_data;
1992     struct Scsi_Host *shost;
1993     uint32_t if_type;
1994     struct lpfc_register portstat_reg = {0};
1995     uint32_t reg_err1, reg_err2;
1996     uint32_t uerrlo_reg, uemasklo_reg;
1997     uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1998     bool en_rn_msg = true;
1999     struct temp_event temp_event_data;
2000     struct lpfc_register portsmphr_reg;
2001     int rc, i;
2002 
2003     /* If the pci channel is offline, ignore possible errors, since
2004      * we cannot communicate with the pci card anyway.
2005      */
2006     if (pci_channel_offline(phba->pcidev)) {
2007         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2008                 "3166 pci channel is offline\n");
2009         lpfc_sli_flush_io_rings(phba);
2010         return;
2011     }
2012 
2013     memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
2014     if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
2015     switch (if_type) {
2016     case LPFC_SLI_INTF_IF_TYPE_0:
2017         pci_rd_rc1 = lpfc_readl(
2018                 phba->sli4_hba.u.if_type0.UERRLOregaddr,
2019                 &uerrlo_reg);
2020         pci_rd_rc2 = lpfc_readl(
2021                 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
2022                 &uemasklo_reg);
2023         /* consider PCI bus read error as pci_channel_offline */
2024         if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
2025             return;
2026         if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
2027             lpfc_sli4_offline_eratt(phba);
2028             return;
2029         }
2030         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2031                 "7623 Checking UE recoverable");
2032 
2033         for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
2034             if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2035                        &portsmphr_reg.word0))
2036                 continue;
2037 
2038             smphr_port_status = bf_get(lpfc_port_smphr_port_status,
2039                            &portsmphr_reg);
2040             if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2041                 LPFC_PORT_SEM_UE_RECOVERABLE)
2042                 break;
2043             /*Sleep for 1Sec, before checking SEMAPHORE */
2044             msleep(1000);
2045         }
2046 
2047         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2048                 "4827 smphr_port_status x%x : Waited %dSec",
2049                 smphr_port_status, i);
2050 
2051         /* Recoverable UE, reset the HBA device */
2052         if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2053             LPFC_PORT_SEM_UE_RECOVERABLE) {
2054             for (i = 0; i < 20; i++) {
2055                 msleep(1000);
2056                 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2057                     &portsmphr_reg.word0) &&
2058                     (LPFC_POST_STAGE_PORT_READY ==
2059                      bf_get(lpfc_port_smphr_port_status,
2060                      &portsmphr_reg))) {
2061                     rc = lpfc_sli4_port_sta_fn_reset(phba,
2062                         LPFC_MBX_NO_WAIT, en_rn_msg);
2063                     if (rc == 0)
2064                         return;
2065                     lpfc_printf_log(phba, KERN_ERR,
2066                         LOG_TRACE_EVENT,
2067                         "4215 Failed to recover UE");
2068                     break;
2069                 }
2070             }
2071         }
2072         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2073                 "7624 Firmware not ready: Failing UE recovery,"
2074                 " waited %dSec", i);
2075         phba->link_state = LPFC_HBA_ERROR;
2076         break;
2077 
2078     case LPFC_SLI_INTF_IF_TYPE_2:
2079     case LPFC_SLI_INTF_IF_TYPE_6:
2080         pci_rd_rc1 = lpfc_readl(
2081                 phba->sli4_hba.u.if_type2.STATUSregaddr,
2082                 &portstat_reg.word0);
2083         /* consider PCI bus read error as pci_channel_offline */
2084         if (pci_rd_rc1 == -EIO) {
2085             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2086                 "3151 PCI bus read access failure: x%x\n",
2087                 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2088             lpfc_sli4_offline_eratt(phba);
2089             return;
2090         }
2091         reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2092         reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2093         if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2094             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2095                     "2889 Port Overtemperature event, "
2096                     "taking port offline Data: x%x x%x\n",
2097                     reg_err1, reg_err2);
2098 
2099             phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2100             temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2101             temp_event_data.event_code = LPFC_CRIT_TEMP;
2102             temp_event_data.data = 0xFFFFFFFF;
2103 
2104             shost = lpfc_shost_from_vport(phba->pport);
2105             fc_host_post_vendor_event(shost, fc_get_event_number(),
2106                           sizeof(temp_event_data),
2107                           (char *)&temp_event_data,
2108                           SCSI_NL_VID_TYPE_PCI
2109                           | PCI_VENDOR_ID_EMULEX);
2110 
2111             spin_lock_irq(&phba->hbalock);
2112             phba->over_temp_state = HBA_OVER_TEMP;
2113             spin_unlock_irq(&phba->hbalock);
2114             lpfc_sli4_offline_eratt(phba);
2115             return;
2116         }
2117         if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2118             reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2119             lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2120                     "3143 Port Down: Firmware Update "
2121                     "Detected\n");
2122             en_rn_msg = false;
2123         } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2124              reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2125             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2126                     "3144 Port Down: Debug Dump\n");
2127         else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2128              reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2129             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2130                     "3145 Port Down: Provisioning\n");
2131 
2132         /* If resets are disabled then leave the HBA alone and return */
2133         if (!phba->cfg_enable_hba_reset)
2134             return;
2135 
2136         /* Check port status register for function reset */
2137         rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2138                 en_rn_msg);
2139         if (rc == 0) {
2140             /* don't report event on forced debug dump */
2141             if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2142                 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2143                 return;
2144             else
2145                 break;
2146         }
2147         /* fall through for not able to recover */
2148         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2149                 "3152 Unrecoverable error\n");
2150         phba->link_state = LPFC_HBA_ERROR;
2151         break;
2152     case LPFC_SLI_INTF_IF_TYPE_1:
2153     default:
2154         break;
2155     }
2156     lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2157             "3123 Report dump event to upper layer\n");
2158     /* Send an internal error event to mgmt application */
2159     lpfc_board_errevt_to_mgmt(phba);
2160 
2161     event_data = FC_REG_DUMP_EVENT;
2162     shost = lpfc_shost_from_vport(vport);
2163     fc_host_post_vendor_event(shost, fc_get_event_number(),
2164                   sizeof(event_data), (char *) &event_data,
2165                   SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2166 }
2167 
2168 /**
2169  * lpfc_handle_eratt - Wrapper func for handling hba error attention
2170  * @phba: pointer to lpfc HBA data structure.
2171  *
2172  * This routine wraps the actual SLI3 or SLI4 hba error attention handling
2173  * routine from the API jump table function pointer from the lpfc_hba struct.
2174  *
2175  * Return codes
2176  *   0 - success.
2177  *   Any other value - error.
2178  **/
2179 void
2180 lpfc_handle_eratt(struct lpfc_hba *phba)
2181 {
2182     (*phba->lpfc_handle_eratt)(phba);
2183 }
2184 
2185 /**
2186  * lpfc_handle_latt - The HBA link event handler
2187  * @phba: pointer to lpfc hba data structure.
2188  *
2189  * This routine is invoked from the worker thread to handle a HBA host
2190  * attention link event. SLI3 only.
2191  **/
2192 void
2193 lpfc_handle_latt(struct lpfc_hba *phba)
2194 {
2195     struct lpfc_vport *vport = phba->pport;
2196     struct lpfc_sli   *psli = &phba->sli;
2197     LPFC_MBOXQ_t *pmb;
2198     volatile uint32_t control;
2199     int rc = 0;
2200 
2201     pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2202     if (!pmb) {
2203         rc = 1;
2204         goto lpfc_handle_latt_err_exit;
2205     }
2206 
2207     rc = lpfc_mbox_rsrc_prep(phba, pmb);
2208     if (rc) {
2209         rc = 2;
2210         mempool_free(pmb, phba->mbox_mem_pool);
2211         goto lpfc_handle_latt_err_exit;
2212     }
2213 
2214     /* Cleanup any outstanding ELS commands */
2215     lpfc_els_flush_all_cmd(phba);
2216     psli->slistat.link_event++;
2217     lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
2218     pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2219     pmb->vport = vport;
2220     /* Block ELS IOCBs until we have processed this mbox command */
2221     phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2222     rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2223     if (rc == MBX_NOT_FINISHED) {
2224         rc = 4;
2225         goto lpfc_handle_latt_free_mbuf;
2226     }
2227 
2228     /* Clear Link Attention in HA REG */
2229     spin_lock_irq(&phba->hbalock);
2230     writel(HA_LATT, phba->HAregaddr);
2231     readl(phba->HAregaddr); /* flush */
2232     spin_unlock_irq(&phba->hbalock);
2233 
2234     return;
2235 
2236 lpfc_handle_latt_free_mbuf:
2237     phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2238     lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2239 lpfc_handle_latt_err_exit:
2240     /* Enable Link attention interrupts */
2241     spin_lock_irq(&phba->hbalock);
2242     psli->sli_flag |= LPFC_PROCESS_LA;
2243     control = readl(phba->HCregaddr);
2244     control |= HC_LAINT_ENA;
2245     writel(control, phba->HCregaddr);
2246     readl(phba->HCregaddr); /* flush */
2247 
2248     /* Clear Link Attention in HA REG */
2249     writel(HA_LATT, phba->HAregaddr);
2250     readl(phba->HAregaddr); /* flush */
2251     spin_unlock_irq(&phba->hbalock);
2252     lpfc_linkdown(phba);
2253     phba->link_state = LPFC_HBA_ERROR;
2254 
2255     lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2256             "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2257 
2258     return;
2259 }
2260 
2261 /**
2262  * lpfc_parse_vpd - Parse VPD (Vital Product Data)
2263  * @phba: pointer to lpfc hba data structure.
2264  * @vpd: pointer to the vital product data.
2265  * @len: length of the vital product data in bytes.
2266  *
2267  * This routine parses the Vital Product Data (VPD). The VPD is treated as
2268  * an array of characters. In this routine, the ModelName, ProgramType, and
2269  * ModelDesc, etc. fields of the phba data structure will be populated.
2270  *
2271  * Return codes
2272  *   0 - pointer to the VPD passed in is NULL
2273  *   1 - success
2274  **/
2275 int
2276 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2277 {
2278     uint8_t lenlo, lenhi;
2279     int Length;
2280     int i, j;
2281     int finished = 0;
2282     int index = 0;
2283 
2284     if (!vpd)
2285         return 0;
2286 
2287     /* Vital Product */
2288     lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2289             "0455 Vital Product Data: x%x x%x x%x x%x\n",
2290             (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2291             (uint32_t) vpd[3]);
2292     while (!finished && (index < (len - 4))) {
2293         switch (vpd[index]) {
2294         case 0x82:
2295         case 0x91:
2296             index += 1;
2297             lenlo = vpd[index];
2298             index += 1;
2299             lenhi = vpd[index];
2300             index += 1;
2301             i = ((((unsigned short)lenhi) << 8) + lenlo);
2302             index += i;
2303             break;
2304         case 0x90:
2305             index += 1;
2306             lenlo = vpd[index];
2307             index += 1;
2308             lenhi = vpd[index];
2309             index += 1;
2310             Length = ((((unsigned short)lenhi) << 8) + lenlo);
2311             if (Length > len - index)
2312                 Length = len - index;
2313             while (Length > 0) {
2314             /* Look for Serial Number */
2315             if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2316                 index += 2;
2317                 i = vpd[index];
2318                 index += 1;
2319                 j = 0;
2320                 Length -= (3+i);
2321                 while(i--) {
2322                     phba->SerialNumber[j++] = vpd[index++];
2323                     if (j == 31)
2324                         break;
2325                 }
2326                 phba->SerialNumber[j] = 0;
2327                 continue;
2328             }
2329             else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2330                 phba->vpd_flag |= VPD_MODEL_DESC;
2331                 index += 2;
2332                 i = vpd[index];
2333                 index += 1;
2334                 j = 0;
2335                 Length -= (3+i);
2336                 while(i--) {
2337                     phba->ModelDesc[j++] = vpd[index++];
2338                     if (j == 255)
2339                         break;
2340                 }
2341                 phba->ModelDesc[j] = 0;
2342                 continue;
2343             }
2344             else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2345                 phba->vpd_flag |= VPD_MODEL_NAME;
2346                 index += 2;
2347                 i = vpd[index];
2348                 index += 1;
2349                 j = 0;
2350                 Length -= (3+i);
2351                 while(i--) {
2352                     phba->ModelName[j++] = vpd[index++];
2353                     if (j == 79)
2354                         break;
2355                 }
2356                 phba->ModelName[j] = 0;
2357                 continue;
2358             }
2359             else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2360                 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2361                 index += 2;
2362                 i = vpd[index];
2363                 index += 1;
2364                 j = 0;
2365                 Length -= (3+i);
2366                 while(i--) {
2367                     phba->ProgramType[j++] = vpd[index++];
2368                     if (j == 255)
2369                         break;
2370                 }
2371                 phba->ProgramType[j] = 0;
2372                 continue;
2373             }
2374             else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2375                 phba->vpd_flag |= VPD_PORT;
2376                 index += 2;
2377                 i = vpd[index];
2378                 index += 1;
2379                 j = 0;
2380                 Length -= (3+i);
2381                 while(i--) {
2382                     if ((phba->sli_rev == LPFC_SLI_REV4) &&
2383                         (phba->sli4_hba.pport_name_sta ==
2384                          LPFC_SLI4_PPNAME_GET)) {
2385                         j++;
2386                         index++;
2387                     } else
2388                         phba->Port[j++] = vpd[index++];
2389                     if (j == 19)
2390                         break;
2391                 }
2392                 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2393                     (phba->sli4_hba.pport_name_sta ==
2394                      LPFC_SLI4_PPNAME_NON))
2395                     phba->Port[j] = 0;
2396                 continue;
2397             }
2398             else {
2399                 index += 2;
2400                 i = vpd[index];
2401                 index += 1;
2402                 index += i;
2403                 Length -= (3 + i);
2404             }
2405         }
2406         finished = 0;
2407         break;
2408         case 0x78:
2409             finished = 1;
2410             break;
2411         default:
2412             index ++;
2413             break;
2414         }
2415     }
2416 
2417     return(1);
2418 }
2419 
2420 /**
2421  * lpfc_get_atto_model_desc - Retrieve ATTO HBA device model name and description
2422  * @phba: pointer to lpfc hba data structure.
2423  * @mdp: pointer to the data structure to hold the derived model name.
2424  * @descp: pointer to the data structure to hold the derived description.
2425  *
2426  * This routine retrieves HBA's description based on its registered PCI device
2427  * ID. The @descp passed into this function points to an array of 256 chars. It
2428  * shall be returned with the model name, maximum speed, and the host bus type.
2429  * The @mdp passed into this function points to an array of 80 chars. When the
2430  * function returns, the @mdp will be filled with the model name.
2431  **/
2432 static void
2433 lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2434 {
2435     uint16_t sub_dev_id = phba->pcidev->subsystem_device;
2436     char *model = "<Unknown>";
2437     int tbolt = 0;
2438 
2439     switch (sub_dev_id) {
2440     case PCI_DEVICE_ID_CLRY_161E:
2441         model = "161E";
2442         break;
2443     case PCI_DEVICE_ID_CLRY_162E:
2444         model = "162E";
2445         break;
2446     case PCI_DEVICE_ID_CLRY_164E:
2447         model = "164E";
2448         break;
2449     case PCI_DEVICE_ID_CLRY_161P:
2450         model = "161P";
2451         break;
2452     case PCI_DEVICE_ID_CLRY_162P:
2453         model = "162P";
2454         break;
2455     case PCI_DEVICE_ID_CLRY_164P:
2456         model = "164P";
2457         break;
2458     case PCI_DEVICE_ID_CLRY_321E:
2459         model = "321E";
2460         break;
2461     case PCI_DEVICE_ID_CLRY_322E:
2462         model = "322E";
2463         break;
2464     case PCI_DEVICE_ID_CLRY_324E:
2465         model = "324E";
2466         break;
2467     case PCI_DEVICE_ID_CLRY_321P:
2468         model = "321P";
2469         break;
2470     case PCI_DEVICE_ID_CLRY_322P:
2471         model = "322P";
2472         break;
2473     case PCI_DEVICE_ID_CLRY_324P:
2474         model = "324P";
2475         break;
2476     case PCI_DEVICE_ID_TLFC_2XX2:
2477         model = "2XX2";
2478         tbolt = 1;
2479         break;
2480     case PCI_DEVICE_ID_TLFC_3162:
2481         model = "3162";
2482         tbolt = 1;
2483         break;
2484     case PCI_DEVICE_ID_TLFC_3322:
2485         model = "3322";
2486         tbolt = 1;
2487         break;
2488     default:
2489         model = "Unknown";
2490         break;
2491     }
2492 
2493     if (mdp && mdp[0] == '\0')
2494         snprintf(mdp, 79, "%s", model);
2495 
2496     if (descp && descp[0] == '\0')
2497         snprintf(descp, 255,
2498              "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
2499              (tbolt) ? "ThunderLink FC " : "Celerity FC-",
2500              model,
2501              phba->Port);
2502 }
2503 
2504 /**
2505  * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
2506  * @phba: pointer to lpfc hba data structure.
2507  * @mdp: pointer to the data structure to hold the derived model name.
2508  * @descp: pointer to the data structure to hold the derived description.
2509  *
2510  * This routine retrieves HBA's description based on its registered PCI device
2511  * ID. The @descp passed into this function points to an array of 256 chars. It
2512  * shall be returned with the model name, maximum speed, and the host bus type.
2513  * The @mdp passed into this function points to an array of 80 chars. When the
2514  * function returns, the @mdp will be filled with the model name.
2515  **/
2516 static void
2517 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2518 {
2519     lpfc_vpd_t *vp;
2520     uint16_t dev_id = phba->pcidev->device;
2521     int max_speed;
2522     int GE = 0;
2523     int oneConnect = 0; /* default is not a oneConnect */
2524     struct {
2525         char *name;
2526         char *bus;
2527         char *function;
2528     } m = {"<Unknown>", "", ""};
2529 
2530     if (mdp && mdp[0] != '\0'
2531         && descp && descp[0] != '\0')
2532         return;
2533 
2534     if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
2535         lpfc_get_atto_model_desc(phba, mdp, descp);
2536         return;
2537     }
2538 
2539     if (phba->lmt & LMT_64Gb)
2540         max_speed = 64;
2541     else if (phba->lmt & LMT_32Gb)
2542         max_speed = 32;
2543     else if (phba->lmt & LMT_16Gb)
2544         max_speed = 16;
2545     else if (phba->lmt & LMT_10Gb)
2546         max_speed = 10;
2547     else if (phba->lmt & LMT_8Gb)
2548         max_speed = 8;
2549     else if (phba->lmt & LMT_4Gb)
2550         max_speed = 4;
2551     else if (phba->lmt & LMT_2Gb)
2552         max_speed = 2;
2553     else if (phba->lmt & LMT_1Gb)
2554         max_speed = 1;
2555     else
2556         max_speed = 0;
2557 
2558     vp = &phba->vpd;
2559 
2560     switch (dev_id) {
2561     case PCI_DEVICE_ID_FIREFLY:
2562         m = (typeof(m)){"LP6000", "PCI",
2563                 "Obsolete, Unsupported Fibre Channel Adapter"};
2564         break;
2565     case PCI_DEVICE_ID_SUPERFLY:
2566         if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2567             m = (typeof(m)){"LP7000", "PCI", ""};
2568         else
2569             m = (typeof(m)){"LP7000E", "PCI", ""};
2570         m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2571         break;
2572     case PCI_DEVICE_ID_DRAGONFLY:
2573         m = (typeof(m)){"LP8000", "PCI",
2574                 "Obsolete, Unsupported Fibre Channel Adapter"};
2575         break;
2576     case PCI_DEVICE_ID_CENTAUR:
2577         if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2578             m = (typeof(m)){"LP9002", "PCI", ""};
2579         else
2580             m = (typeof(m)){"LP9000", "PCI", ""};
2581         m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2582         break;
2583     case PCI_DEVICE_ID_RFLY:
2584         m = (typeof(m)){"LP952", "PCI",
2585                 "Obsolete, Unsupported Fibre Channel Adapter"};
2586         break;
2587     case PCI_DEVICE_ID_PEGASUS:
2588         m = (typeof(m)){"LP9802", "PCI-X",
2589                 "Obsolete, Unsupported Fibre Channel Adapter"};
2590         break;
2591     case PCI_DEVICE_ID_THOR:
2592         m = (typeof(m)){"LP10000", "PCI-X",
2593                 "Obsolete, Unsupported Fibre Channel Adapter"};
2594         break;
2595     case PCI_DEVICE_ID_VIPER:
2596         m = (typeof(m)){"LPX1000",  "PCI-X",
2597                 "Obsolete, Unsupported Fibre Channel Adapter"};
2598         break;
2599     case PCI_DEVICE_ID_PFLY:
2600         m = (typeof(m)){"LP982", "PCI-X",
2601                 "Obsolete, Unsupported Fibre Channel Adapter"};
2602         break;
2603     case PCI_DEVICE_ID_TFLY:
2604         m = (typeof(m)){"LP1050", "PCI-X",
2605                 "Obsolete, Unsupported Fibre Channel Adapter"};
2606         break;
2607     case PCI_DEVICE_ID_HELIOS:
2608         m = (typeof(m)){"LP11000", "PCI-X2",
2609                 "Obsolete, Unsupported Fibre Channel Adapter"};
2610         break;
2611     case PCI_DEVICE_ID_HELIOS_SCSP:
2612         m = (typeof(m)){"LP11000-SP", "PCI-X2",
2613                 "Obsolete, Unsupported Fibre Channel Adapter"};
2614         break;
2615     case PCI_DEVICE_ID_HELIOS_DCSP:
2616         m = (typeof(m)){"LP11002-SP",  "PCI-X2",
2617                 "Obsolete, Unsupported Fibre Channel Adapter"};
2618         break;
2619     case PCI_DEVICE_ID_NEPTUNE:
2620         m = (typeof(m)){"LPe1000", "PCIe",
2621                 "Obsolete, Unsupported Fibre Channel Adapter"};
2622         break;
2623     case PCI_DEVICE_ID_NEPTUNE_SCSP:
2624         m = (typeof(m)){"LPe1000-SP", "PCIe",
2625                 "Obsolete, Unsupported Fibre Channel Adapter"};
2626         break;
2627     case PCI_DEVICE_ID_NEPTUNE_DCSP:
2628         m = (typeof(m)){"LPe1002-SP", "PCIe",
2629                 "Obsolete, Unsupported Fibre Channel Adapter"};
2630         break;
2631     case PCI_DEVICE_ID_BMID:
2632         m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2633         break;
2634     case PCI_DEVICE_ID_BSMB:
2635         m = (typeof(m)){"LP111", "PCI-X2",
2636                 "Obsolete, Unsupported Fibre Channel Adapter"};
2637         break;
2638     case PCI_DEVICE_ID_ZEPHYR:
2639         m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2640         break;
2641     case PCI_DEVICE_ID_ZEPHYR_SCSP:
2642         m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2643         break;
2644     case PCI_DEVICE_ID_ZEPHYR_DCSP:
2645         m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2646         GE = 1;
2647         break;
2648     case PCI_DEVICE_ID_ZMID:
2649         m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2650         break;
2651     case PCI_DEVICE_ID_ZSMB:
2652         m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2653         break;
2654     case PCI_DEVICE_ID_LP101:
2655         m = (typeof(m)){"LP101", "PCI-X",
2656                 "Obsolete, Unsupported Fibre Channel Adapter"};
2657         break;
2658     case PCI_DEVICE_ID_LP10000S:
2659         m = (typeof(m)){"LP10000-S", "PCI",
2660                 "Obsolete, Unsupported Fibre Channel Adapter"};
2661         break;
2662     case PCI_DEVICE_ID_LP11000S:
2663         m = (typeof(m)){"LP11000-S", "PCI-X2",
2664                 "Obsolete, Unsupported Fibre Channel Adapter"};
2665         break;
2666     case PCI_DEVICE_ID_LPE11000S:
2667         m = (typeof(m)){"LPe11000-S", "PCIe",
2668                 "Obsolete, Unsupported Fibre Channel Adapter"};
2669         break;
2670     case PCI_DEVICE_ID_SAT:
2671         m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2672         break;
2673     case PCI_DEVICE_ID_SAT_MID:
2674         m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2675         break;
2676     case PCI_DEVICE_ID_SAT_SMB:
2677         m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2678         break;
2679     case PCI_DEVICE_ID_SAT_DCSP:
2680         m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2681         break;
2682     case PCI_DEVICE_ID_SAT_SCSP:
2683         m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2684         break;
2685     case PCI_DEVICE_ID_SAT_S:
2686         m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2687         break;
2688     case PCI_DEVICE_ID_PROTEUS_VF:
2689         m = (typeof(m)){"LPev12000", "PCIe IOV",
2690                 "Obsolete, Unsupported Fibre Channel Adapter"};
2691         break;
2692     case PCI_DEVICE_ID_PROTEUS_PF:
2693         m = (typeof(m)){"LPev12000", "PCIe IOV",
2694                 "Obsolete, Unsupported Fibre Channel Adapter"};
2695         break;
2696     case PCI_DEVICE_ID_PROTEUS_S:
2697         m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2698                 "Obsolete, Unsupported Fibre Channel Adapter"};
2699         break;
2700     case PCI_DEVICE_ID_TIGERSHARK:
2701         oneConnect = 1;
2702         m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2703         break;
2704     case PCI_DEVICE_ID_TOMCAT:
2705         oneConnect = 1;
2706         m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2707         break;
2708     case PCI_DEVICE_ID_FALCON:
2709         m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2710                 "EmulexSecure Fibre"};
2711         break;
2712     case PCI_DEVICE_ID_BALIUS:
2713         m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2714                 "Obsolete, Unsupported Fibre Channel Adapter"};
2715         break;
2716     case PCI_DEVICE_ID_LANCER_FC:
2717         m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2718         break;
2719     case PCI_DEVICE_ID_LANCER_FC_VF:
2720         m = (typeof(m)){"LPe16000", "PCIe",
2721                 "Obsolete, Unsupported Fibre Channel Adapter"};
2722         break;
2723     case PCI_DEVICE_ID_LANCER_FCOE:
2724         oneConnect = 1;
2725         m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2726         break;
2727     case PCI_DEVICE_ID_LANCER_FCOE_VF:
2728         oneConnect = 1;
2729         m = (typeof(m)){"OCe15100", "PCIe",
2730                 "Obsolete, Unsupported FCoE"};
2731         break;
2732     case PCI_DEVICE_ID_LANCER_G6_FC:
2733         m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2734         break;
2735     case PCI_DEVICE_ID_LANCER_G7_FC:
2736         m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2737         break;
2738     case PCI_DEVICE_ID_LANCER_G7P_FC:
2739         m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2740         break;
2741     case PCI_DEVICE_ID_SKYHAWK:
2742     case PCI_DEVICE_ID_SKYHAWK_VF:
2743         oneConnect = 1;
2744         m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2745         break;
2746     default:
2747         m = (typeof(m)){"Unknown", "", ""};
2748         break;
2749     }
2750 
2751     if (mdp && mdp[0] == '\0')
2752         snprintf(mdp, 79,"%s", m.name);
2753     /*
2754      * oneConnect hba requires special processing, they are all initiators
2755      * and we put the port number on the end
2756      */
2757     if (descp && descp[0] == '\0') {
2758         if (oneConnect)
2759             snprintf(descp, 255,
2760                 "Emulex OneConnect %s, %s Initiator %s",
2761                 m.name, m.function,
2762                 phba->Port);
2763         else if (max_speed == 0)
2764             snprintf(descp, 255,
2765                 "Emulex %s %s %s",
2766                 m.name, m.bus, m.function);
2767         else
2768             snprintf(descp, 255,
2769                 "Emulex %s %d%s %s %s",
2770                 m.name, max_speed, (GE) ? "GE" : "Gb",
2771                 m.bus, m.function);
2772     }
2773 }
2774 
2775 /**
2776  * lpfc_sli3_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
2777  * @phba: pointer to lpfc hba data structure.
2778  * @pring: pointer to a IOCB ring.
2779  * @cnt: the number of IOCBs to be posted to the IOCB ring.
2780  *
2781  * This routine posts a given number of IOCBs with the associated DMA buffer
2782  * descriptors specified by the cnt argument to the given IOCB ring.
2783  *
2784  * Return codes
2785  *   The number of IOCBs NOT able to be posted to the IOCB ring.
2786  **/
2787 int
2788 lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2789 {
2790     IOCB_t *icmd;
2791     struct lpfc_iocbq *iocb;
2792     struct lpfc_dmabuf *mp1, *mp2;
2793 
2794     cnt += pring->missbufcnt;
2795 
2796     /* While there are buffers to post */
2797     while (cnt > 0) {
2798         /* Allocate buffer for  command iocb */
2799         iocb = lpfc_sli_get_iocbq(phba);
2800         if (iocb == NULL) {
2801             pring->missbufcnt = cnt;
2802             return cnt;
2803         }
2804         icmd = &iocb->iocb;
2805 
2806         /* 2 buffers can be posted per command */
2807         /* Allocate buffer to post */
2808         mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2809         if (mp1)
2810             mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2811         if (!mp1 || !mp1->virt) {
2812             kfree(mp1);
2813             lpfc_sli_release_iocbq(phba, iocb);
2814             pring->missbufcnt = cnt;
2815             return cnt;
2816         }
2817 
2818         INIT_LIST_HEAD(&mp1->list);
2819         /* Allocate buffer to post */
2820         if (cnt > 1) {
2821             mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2822             if (mp2)
2823                 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2824                                 &mp2->phys);
2825             if (!mp2 || !mp2->virt) {
2826                 kfree(mp2);
2827                 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2828                 kfree(mp1);
2829                 lpfc_sli_release_iocbq(phba, iocb);
2830                 pring->missbufcnt = cnt;
2831                 return cnt;
2832             }
2833 
2834             INIT_LIST_HEAD(&mp2->list);
2835         } else {
2836             mp2 = NULL;
2837         }
2838 
2839         icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2840         icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2841         icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2842         icmd->ulpBdeCount = 1;
2843         cnt--;
2844         if (mp2) {
2845             icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2846             icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2847             icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2848             cnt--;
2849             icmd->ulpBdeCount = 2;
2850         }
2851 
2852         icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2853         icmd->ulpLe = 1;
2854 
2855         if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2856             IOCB_ERROR) {
2857             lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2858             kfree(mp1);
2859             cnt++;
2860             if (mp2) {
2861                 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2862                 kfree(mp2);
2863                 cnt++;
2864             }
2865             lpfc_sli_release_iocbq(phba, iocb);
2866             pring->missbufcnt = cnt;
2867             return cnt;
2868         }
2869         lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2870         if (mp2)
2871             lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2872     }
2873     pring->missbufcnt = 0;
2874     return 0;
2875 }
2876 
2877 /**
2878  * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
2879  * @phba: pointer to lpfc hba data structure.
2880  *
2881  * This routine posts initial receive IOCB buffers to the ELS ring. The
2882  * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
2883  * set to 64 IOCBs. SLI3 only.
2884  *
2885  * Return codes
2886  *   0 - success (currently always success)
2887  **/
2888 static int
2889 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2890 {
2891     struct lpfc_sli *psli = &phba->sli;
2892 
2893     /* Ring 0, ELS / CT buffers */
2894     lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2895     /* Ring 2 - FCP no buffers needed */
2896 
2897     return 0;
2898 }
2899 
2900 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2901 
2902 /**
2903  * lpfc_sha_init - Set up initial array of hash table entries
2904  * @HashResultPointer: pointer to an array as hash table.
2905  *
2906  * This routine sets up the initial values to the array of hash table entries
2907  * for the LC HBAs.
2908  **/
2909 static void
2910 lpfc_sha_init(uint32_t * HashResultPointer)
2911 {
2912     HashResultPointer[0] = 0x67452301;
2913     HashResultPointer[1] = 0xEFCDAB89;
2914     HashResultPointer[2] = 0x98BADCFE;
2915     HashResultPointer[3] = 0x10325476;
2916     HashResultPointer[4] = 0xC3D2E1F0;
2917 }
2918 
2919 /**
2920  * lpfc_sha_iterate - Iterate initial hash table with the working hash table
2921  * @HashResultPointer: pointer to an initial/result hash table.
2922  * @HashWorkingPointer: pointer to an working hash table.
2923  *
2924  * This routine iterates an initial hash table pointed by @HashResultPointer
2925  * with the values from the working hash table pointeed by @HashWorkingPointer.
2926  * The results are putting back to the initial hash table, returned through
2927  * the @HashResultPointer as the result hash table.
2928  **/
2929 static void
2930 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2931 {
2932     int t;
2933     uint32_t TEMP;
2934     uint32_t A, B, C, D, E;
2935     t = 16;
2936     do {
2937         HashWorkingPointer[t] =
2938             S(1,
2939               HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2940                                      8] ^
2941               HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2942     } while (++t <= 79);
2943     t = 0;
2944     A = HashResultPointer[0];
2945     B = HashResultPointer[1];
2946     C = HashResultPointer[2];
2947     D = HashResultPointer[3];
2948     E = HashResultPointer[4];
2949 
2950     do {
2951         if (t < 20) {
2952             TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2953         } else if (t < 40) {
2954             TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2955         } else if (t < 60) {
2956             TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2957         } else {
2958             TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2959         }
2960         TEMP += S(5, A) + E + HashWorkingPointer[t];
2961         E = D;
2962         D = C;
2963         C = S(30, B);
2964         B = A;
2965         A = TEMP;
2966     } while (++t <= 79);
2967 
2968     HashResultPointer[0] += A;
2969     HashResultPointer[1] += B;
2970     HashResultPointer[2] += C;
2971     HashResultPointer[3] += D;
2972     HashResultPointer[4] += E;
2973 
2974 }
2975 
2976 /**
2977  * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
2978  * @RandomChallenge: pointer to the entry of host challenge random number array.
2979  * @HashWorking: pointer to the entry of the working hash array.
2980  *
2981  * This routine calculates the working hash array referred by @HashWorking
2982  * from the challenge random numbers associated with the host, referred by
2983  * @RandomChallenge. The result is put into the entry of the working hash
2984  * array and returned by reference through @HashWorking.
2985  **/
2986 static void
2987 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2988 {
2989     *HashWorking = (*RandomChallenge ^ *HashWorking);
2990 }
2991 
2992 /**
2993  * lpfc_hba_init - Perform special handling for LC HBA initialization
2994  * @phba: pointer to lpfc hba data structure.
2995  * @hbainit: pointer to an array of unsigned 32-bit integers.
2996  *
2997  * This routine performs the special handling for LC HBA initialization.
2998  **/
2999 void
3000 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
3001 {
3002     int t;
3003     uint32_t *HashWorking;
3004     uint32_t *pwwnn = (uint32_t *) phba->wwnn;
3005 
3006     HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
3007     if (!HashWorking)
3008         return;
3009 
3010     HashWorking[0] = HashWorking[78] = *pwwnn++;
3011     HashWorking[1] = HashWorking[79] = *pwwnn;
3012 
3013     for (t = 0; t < 7; t++)
3014         lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
3015 
3016     lpfc_sha_init(hbainit);
3017     lpfc_sha_iterate(hbainit, HashWorking);
3018     kfree(HashWorking);
3019 }
3020 
3021 /**
3022  * lpfc_cleanup - Performs vport cleanups before deleting a vport
3023  * @vport: pointer to a virtual N_Port data structure.
3024  *
3025  * This routine performs the necessary cleanups before deleting the @vport.
3026  * It invokes the discovery state machine to perform necessary state
3027  * transitions and to release the ndlps associated with the @vport. Note,
3028  * the physical port is treated as @vport 0.
3029  **/
3030 void
3031 lpfc_cleanup(struct lpfc_vport *vport)
3032 {
3033     struct lpfc_hba   *phba = vport->phba;
3034     struct lpfc_nodelist *ndlp, *next_ndlp;
3035     int i = 0;
3036 
3037     if (phba->link_state > LPFC_LINK_DOWN)
3038         lpfc_port_link_failure(vport);
3039 
3040     /* Clean up VMID resources */
3041     if (lpfc_is_vmid_enabled(phba))
3042         lpfc_vmid_vport_cleanup(vport);
3043 
3044     list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3045         if (vport->port_type != LPFC_PHYSICAL_PORT &&
3046             ndlp->nlp_DID == Fabric_DID) {
3047             /* Just free up ndlp with Fabric_DID for vports */
3048             lpfc_nlp_put(ndlp);
3049             continue;
3050         }
3051 
3052         if (ndlp->nlp_DID == Fabric_Cntl_DID &&
3053             ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3054             lpfc_nlp_put(ndlp);
3055             continue;
3056         }
3057 
3058         /* Fabric Ports not in UNMAPPED state are cleaned up in the
3059          * DEVICE_RM event.
3060          */
3061         if (ndlp->nlp_type & NLP_FABRIC &&
3062             ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
3063             lpfc_disc_state_machine(vport, ndlp, NULL,
3064                     NLP_EVT_DEVICE_RECOVERY);
3065 
3066         if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
3067             lpfc_disc_state_machine(vport, ndlp, NULL,
3068                     NLP_EVT_DEVICE_RM);
3069     }
3070 
3071     /* This is a special case flush to return all
3072      * IOs before entering this loop. There are
3073      * two points in the code where a flush is
3074      * avoided if the FC_UNLOADING flag is set.
3075      * one is in the multipool destroy,
3076      * (this prevents a crash) and the other is
3077      * in the nvme abort handler, ( also prevents
3078      * a crash). Both of these exceptions are
3079      * cases where the slot is still accessible.
3080      * The flush here is only when the pci slot
3081      * is offline.
3082      */
3083     if (vport->load_flag & FC_UNLOADING &&
3084         pci_channel_offline(phba->pcidev))
3085         lpfc_sli_flush_io_rings(vport->phba);
3086 
3087     /* At this point, ALL ndlp's should be gone
3088      * because of the previous NLP_EVT_DEVICE_RM.
3089      * Lets wait for this to happen, if needed.
3090      */
3091     while (!list_empty(&vport->fc_nodes)) {
3092         if (i++ > 3000) {
3093             lpfc_printf_vlog(vport, KERN_ERR,
3094                      LOG_TRACE_EVENT,
3095                 "0233 Nodelist not empty\n");
3096             list_for_each_entry_safe(ndlp, next_ndlp,
3097                         &vport->fc_nodes, nlp_listp) {
3098                 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
3099                          LOG_DISCOVERY,
3100                          "0282 did:x%x ndlp:x%px "
3101                          "refcnt:%d xflags x%x nflag x%x\n",
3102                          ndlp->nlp_DID, (void *)ndlp,
3103                          kref_read(&ndlp->kref),
3104                          ndlp->fc4_xpt_flags,
3105                          ndlp->nlp_flag);
3106             }
3107             break;
3108         }
3109 
3110         /* Wait for any activity on ndlps to settle */
3111         msleep(10);
3112     }
3113     lpfc_cleanup_vports_rrqs(vport, NULL);
3114 }
3115 
3116 /**
3117  * lpfc_stop_vport_timers - Stop all the timers associated with a vport
3118  * @vport: pointer to a virtual N_Port data structure.
3119  *
3120  * This routine stops all the timers associated with a @vport. This function
3121  * is invoked before disabling or deleting a @vport. Note that the physical
3122  * port is treated as @vport 0.
3123  **/
3124 void
3125 lpfc_stop_vport_timers(struct lpfc_vport *vport)
3126 {
3127     del_timer_sync(&vport->els_tmofunc);
3128     del_timer_sync(&vport->delayed_disc_tmo);
3129     lpfc_can_disctmo(vport);
3130     return;
3131 }
3132 
3133 /**
3134  * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3135  * @phba: pointer to lpfc hba data structure.
3136  *
3137  * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
3138  * caller of this routine should already hold the host lock.
3139  **/
3140 void
3141 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3142 {
3143     /* Clear pending FCF rediscovery wait flag */
3144     phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3145 
3146     /* Now, try to stop the timer */
3147     del_timer(&phba->fcf.redisc_wait);
3148 }
3149 
3150 /**
3151  * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
3152  * @phba: pointer to lpfc hba data structure.
3153  *
3154  * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
3155  * checks whether the FCF rediscovery wait timer is pending with the host
3156  * lock held before proceeding with disabling the timer and clearing the
3157  * wait timer pendig flag.
3158  **/
3159 void
3160 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3161 {
3162     spin_lock_irq(&phba->hbalock);
3163     if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3164         /* FCF rediscovery timer already fired or stopped */
3165         spin_unlock_irq(&phba->hbalock);
3166         return;
3167     }
3168     __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3169     /* Clear failover in progress flags */
3170     phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3171     spin_unlock_irq(&phba->hbalock);
3172 }
3173 
3174 /**
3175  * lpfc_cmf_stop - Stop CMF processing
3176  * @phba: pointer to lpfc hba data structure.
3177  *
3178  * This is called when the link goes down or if CMF mode is turned OFF.
3179  * It is also called when going offline or unloaded just before the
3180  * congestion info buffer is unregistered.
3181  **/
3182 void
3183 lpfc_cmf_stop(struct lpfc_hba *phba)
3184 {
3185     int cpu;
3186     struct lpfc_cgn_stat *cgs;
3187 
3188     /* We only do something if CMF is enabled */
3189     if (!phba->sli4_hba.pc_sli4_params.cmf)
3190         return;
3191 
3192     lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3193             "6221 Stop CMF / Cancel Timer\n");
3194 
3195     /* Cancel the CMF timer */
3196     hrtimer_cancel(&phba->cmf_timer);
3197 
3198     /* Zero CMF counters */
3199     atomic_set(&phba->cmf_busy, 0);
3200     for_each_present_cpu(cpu) {
3201         cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3202         atomic64_set(&cgs->total_bytes, 0);
3203         atomic64_set(&cgs->rcv_bytes, 0);
3204         atomic_set(&cgs->rx_io_cnt, 0);
3205         atomic64_set(&cgs->rx_latency, 0);
3206     }
3207     atomic_set(&phba->cmf_bw_wait, 0);
3208 
3209     /* Resume any blocked IO - Queue unblock on workqueue */
3210     queue_work(phba->wq, &phba->unblock_request_work);
3211 }
3212 
3213 static inline uint64_t
3214 lpfc_get_max_line_rate(struct lpfc_hba *phba)
3215 {
3216     uint64_t rate = lpfc_sli_port_speed_get(phba);
3217 
3218     return ((((unsigned long)rate) * 1024 * 1024) / 10);
3219 }
3220 
3221 void
3222 lpfc_cmf_signal_init(struct lpfc_hba *phba)
3223 {
3224     lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3225             "6223 Signal CMF init\n");
3226 
3227     /* Use the new fc_linkspeed to recalculate */
3228     phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3229     phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3230     phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3231                         phba->cmf_interval_rate, 1000);
3232     phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3233 
3234     /* This is a signal to firmware to sync up CMF BW with link speed */
3235     lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3236 }
3237 
3238 /**
3239  * lpfc_cmf_start - Start CMF processing
3240  * @phba: pointer to lpfc hba data structure.
3241  *
3242  * This is called when the link comes up or if CMF mode is turned OFF
3243  * to Monitor or Managed.
3244  **/
3245 void
3246 lpfc_cmf_start(struct lpfc_hba *phba)
3247 {
3248     struct lpfc_cgn_stat *cgs;
3249     int cpu;
3250 
3251     /* We only do something if CMF is enabled */
3252     if (!phba->sli4_hba.pc_sli4_params.cmf ||
3253         phba->cmf_active_mode == LPFC_CFG_OFF)
3254         return;
3255 
3256     /* Reinitialize congestion buffer info */
3257     lpfc_init_congestion_buf(phba);
3258 
3259     atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3260     atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3261     atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3262     atomic_set(&phba->cgn_sync_warn_cnt, 0);
3263 
3264     atomic_set(&phba->cmf_busy, 0);
3265     for_each_present_cpu(cpu) {
3266         cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3267         atomic64_set(&cgs->total_bytes, 0);
3268         atomic64_set(&cgs->rcv_bytes, 0);
3269         atomic_set(&cgs->rx_io_cnt, 0);
3270         atomic64_set(&cgs->rx_latency, 0);
3271     }
3272     phba->cmf_latency.tv_sec = 0;
3273     phba->cmf_latency.tv_nsec = 0;
3274 
3275     lpfc_cmf_signal_init(phba);
3276 
3277     lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3278             "6222 Start CMF / Timer\n");
3279 
3280     phba->cmf_timer_cnt = 0;
3281     hrtimer_start(&phba->cmf_timer,
3282               ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
3283               HRTIMER_MODE_REL);
3284     /* Setup for latency check in IO cmpl routines */
3285     ktime_get_real_ts64(&phba->cmf_latency);
3286 
3287     atomic_set(&phba->cmf_bw_wait, 0);
3288     atomic_set(&phba->cmf_stop_io, 0);
3289 }
3290 
3291 /**
3292  * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
3293  * @phba: pointer to lpfc hba data structure.
3294  *
3295  * This routine stops all the timers associated with a HBA. This function is
3296  * invoked before either putting a HBA offline or unloading the driver.
3297  **/
3298 void
3299 lpfc_stop_hba_timers(struct lpfc_hba *phba)
3300 {
3301     if (phba->pport)
3302         lpfc_stop_vport_timers(phba->pport);
3303     cancel_delayed_work_sync(&phba->eq_delay_work);
3304     cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3305     del_timer_sync(&phba->sli.mbox_tmo);
3306     del_timer_sync(&phba->fabric_block_timer);
3307     del_timer_sync(&phba->eratt_poll);
3308     del_timer_sync(&phba->hb_tmofunc);
3309     if (phba->sli_rev == LPFC_SLI_REV4) {
3310         del_timer_sync(&phba->rrq_tmr);
3311         phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3312     }
3313     phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3314 
3315     switch (phba->pci_dev_grp) {
3316     case LPFC_PCI_DEV_LP:
3317         /* Stop any LightPulse device specific driver timers */
3318         del_timer_sync(&phba->fcp_poll_timer);
3319         break;
3320     case LPFC_PCI_DEV_OC:
3321         /* Stop any OneConnect device specific driver timers */
3322         lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3323         break;
3324     default:
3325         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3326                 "0297 Invalid device group (x%x)\n",
3327                 phba->pci_dev_grp);
3328         break;
3329     }
3330     return;
3331 }
3332 
3333 /**
3334  * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
3335  * @phba: pointer to lpfc hba data structure.
3336  * @mbx_action: flag for mailbox no wait action.
3337  *
3338  * This routine marks a HBA's management interface as blocked. Once the HBA's
3339  * management interface is marked as blocked, all the user space access to
3340  * the HBA, whether they are from sysfs interface or libdfc interface will
3341  * all be blocked. The HBA is set to block the management interface when the
3342  * driver prepares the HBA interface for online or offline.
3343  **/
3344 static void
3345 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3346 {
3347     unsigned long iflag;
3348     uint8_t actcmd = MBX_HEARTBEAT;
3349     unsigned long timeout;
3350 
3351     spin_lock_irqsave(&phba->hbalock, iflag);
3352     phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3353     spin_unlock_irqrestore(&phba->hbalock, iflag);
3354     if (mbx_action == LPFC_MBX_NO_WAIT)
3355         return;
3356     timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3357     spin_lock_irqsave(&phba->hbalock, iflag);
3358     if (phba->sli.mbox_active) {
3359         actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3360         /* Determine how long we might wait for the active mailbox
3361          * command to be gracefully completed by firmware.
3362          */
3363         timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3364                 phba->sli.mbox_active) * 1000) + jiffies;
3365     }
3366     spin_unlock_irqrestore(&phba->hbalock, iflag);
3367 
3368     /* Wait for the outstnading mailbox command to complete */
3369     while (phba->sli.mbox_active) {
3370         /* Check active mailbox complete status every 2ms */
3371         msleep(2);
3372         if (time_after(jiffies, timeout)) {
3373             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3374                     "2813 Mgmt IO is Blocked %x "
3375                     "- mbox cmd %x still active\n",
3376                     phba->sli.sli_flag, actcmd);
3377             break;
3378         }
3379     }
3380 }
3381 
3382 /**
3383  * lpfc_sli4_node_prep - Assign RPIs for active nodes.
3384  * @phba: pointer to lpfc hba data structure.
3385  *
3386  * Allocate RPIs for all active remote nodes. This is needed whenever
3387  * an SLI4 adapter is reset and the driver is not unloading. Its purpose
3388  * is to fixup the temporary rpi assignments.
3389  **/
3390 void
3391 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3392 {
3393     struct lpfc_nodelist  *ndlp, *next_ndlp;
3394     struct lpfc_vport **vports;
3395     int i, rpi;
3396 
3397     if (phba->sli_rev != LPFC_SLI_REV4)
3398         return;
3399 
3400     vports = lpfc_create_vport_work_array(phba);
3401     if (vports == NULL)
3402         return;
3403 
3404     for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3405         if (vports[i]->load_flag & FC_UNLOADING)
3406             continue;
3407 
3408         list_for_each_entry_safe(ndlp, next_ndlp,
3409                      &vports[i]->fc_nodes,
3410                      nlp_listp) {
3411             rpi = lpfc_sli4_alloc_rpi(phba);
3412             if (rpi == LPFC_RPI_ALLOC_ERROR) {
3413                 /* TODO print log? */
3414                 continue;
3415             }
3416             ndlp->nlp_rpi = rpi;
3417             lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3418                      LOG_NODE | LOG_DISCOVERY,
3419                      "0009 Assign RPI x%x to ndlp x%px "
3420                      "DID:x%06x flg:x%x\n",
3421                      ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3422                      ndlp->nlp_flag);
3423         }
3424     }
3425     lpfc_destroy_vport_work_array(phba, vports);
3426 }
3427 
3428 /**
3429  * lpfc_create_expedite_pool - create expedite pool
3430  * @phba: pointer to lpfc hba data structure.
3431  *
3432  * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0
3433  * to expedite pool. Mark them as expedite.
3434  **/
3435 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3436 {
3437     struct lpfc_sli4_hdw_queue *qp;
3438     struct lpfc_io_buf *lpfc_ncmd;
3439     struct lpfc_io_buf *lpfc_ncmd_next;
3440     struct lpfc_epd_pool *epd_pool;
3441     unsigned long iflag;
3442 
3443     epd_pool = &phba->epd_pool;
3444     qp = &phba->sli4_hba.hdwq[0];
3445 
3446     spin_lock_init(&epd_pool->lock);
3447     spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3448     spin_lock(&epd_pool->lock);
3449     INIT_LIST_HEAD(&epd_pool->list);
3450     list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3451                  &qp->lpfc_io_buf_list_put, list) {
3452         list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3453         lpfc_ncmd->expedite = true;
3454         qp->put_io_bufs--;
3455         epd_pool->count++;
3456         if (epd_pool->count >= XRI_BATCH)
3457             break;
3458     }
3459     spin_unlock(&epd_pool->lock);
3460     spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3461 }
3462 
3463 /**
3464  * lpfc_destroy_expedite_pool - destroy expedite pool
3465  * @phba: pointer to lpfc hba data structure.
3466  *
3467  * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put
3468  * of HWQ 0. Clear the mark.
3469  **/
3470 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3471 {
3472     struct lpfc_sli4_hdw_queue *qp;
3473     struct lpfc_io_buf *lpfc_ncmd;
3474     struct lpfc_io_buf *lpfc_ncmd_next;
3475     struct lpfc_epd_pool *epd_pool;
3476     unsigned long iflag;
3477 
3478     epd_pool = &phba->epd_pool;
3479     qp = &phba->sli4_hba.hdwq[0];
3480 
3481     spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3482     spin_lock(&epd_pool->lock);
3483     list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3484                  &epd_pool->list, list) {
3485         list_move_tail(&lpfc_ncmd->list,
3486                    &qp->lpfc_io_buf_list_put);
3487         lpfc_ncmd->flags = false;
3488         qp->put_io_bufs++;
3489         epd_pool->count--;
3490     }
3491     spin_unlock(&epd_pool->lock);
3492     spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3493 }
3494 
3495 /**
3496  * lpfc_create_multixri_pools - create multi-XRI pools
3497  * @phba: pointer to lpfc hba data structure.
3498  *
3499  * This routine initialize public, private per HWQ. Then, move XRIs from
3500  * lpfc_io_buf_list_put to public pool. High and low watermark are also
3501  * Initialized.
3502  **/
3503 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3504 {
3505     u32 i, j;
3506     u32 hwq_count;
3507     u32 count_per_hwq;
3508     struct lpfc_io_buf *lpfc_ncmd;
3509     struct lpfc_io_buf *lpfc_ncmd_next;
3510     unsigned long iflag;
3511     struct lpfc_sli4_hdw_queue *qp;
3512     struct lpfc_multixri_pool *multixri_pool;
3513     struct lpfc_pbl_pool *pbl_pool;
3514     struct lpfc_pvt_pool *pvt_pool;
3515 
3516     lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3517             "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3518             phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3519             phba->sli4_hba.io_xri_cnt);
3520 
3521     if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3522         lpfc_create_expedite_pool(phba);
3523 
3524     hwq_count = phba->cfg_hdw_queue;
3525     count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3526 
3527     for (i = 0; i < hwq_count; i++) {
3528         multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3529 
3530         if (!multixri_pool) {
3531             lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3532                     "1238 Failed to allocate memory for "
3533                     "multixri_pool\n");
3534 
3535             if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3536                 lpfc_destroy_expedite_pool(phba);
3537 
3538             j = 0;
3539             while (j < i) {
3540                 qp = &phba->sli4_hba.hdwq[j];
3541                 kfree(qp->p_multixri_pool);
3542                 j++;
3543             }
3544             phba->cfg_xri_rebalancing = 0;
3545             return;
3546         }
3547 
3548         qp = &phba->sli4_hba.hdwq[i];
3549         qp->p_multixri_pool = multixri_pool;
3550 
3551         multixri_pool->xri_limit = count_per_hwq;
3552         multixri_pool->rrb_next_hwqid = i;
3553 
3554         /* Deal with public free xri pool */
3555         pbl_pool = &multixri_pool->pbl_pool;
3556         spin_lock_init(&pbl_pool->lock);
3557         spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3558         spin_lock(&pbl_pool->lock);
3559         INIT_LIST_HEAD(&pbl_pool->list);
3560         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3561                      &qp->lpfc_io_buf_list_put, list) {
3562             list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3563             qp->put_io_bufs--;
3564             pbl_pool->count++;
3565         }
3566         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3567                 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3568                 pbl_pool->count, i);
3569         spin_unlock(&pbl_pool->lock);
3570         spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3571 
3572         /* Deal with private free xri pool */
3573         pvt_pool = &multixri_pool->pvt_pool;
3574         pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3575         pvt_pool->low_watermark = XRI_BATCH;
3576         spin_lock_init(&pvt_pool->lock);
3577         spin_lock_irqsave(&pvt_pool->lock, iflag);
3578         INIT_LIST_HEAD(&pvt_pool->list);
3579         pvt_pool->count = 0;
3580         spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3581     }
3582 }
3583 
3584 /**
3585  * lpfc_destroy_multixri_pools - destroy multi-XRI pools
3586  * @phba: pointer to lpfc hba data structure.
3587  *
3588  * This routine returns XRIs from public/private to lpfc_io_buf_list_put.
3589  **/
3590 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3591 {
3592     u32 i;
3593     u32 hwq_count;
3594     struct lpfc_io_buf *lpfc_ncmd;
3595     struct lpfc_io_buf *lpfc_ncmd_next;
3596     unsigned long iflag;
3597     struct lpfc_sli4_hdw_queue *qp;
3598     struct lpfc_multixri_pool *multixri_pool;
3599     struct lpfc_pbl_pool *pbl_pool;
3600     struct lpfc_pvt_pool *pvt_pool;
3601 
3602     if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3603         lpfc_destroy_expedite_pool(phba);
3604 
3605     if (!(phba->pport->load_flag & FC_UNLOADING))
3606         lpfc_sli_flush_io_rings(phba);
3607 
3608     hwq_count = phba->cfg_hdw_queue;
3609 
3610     for (i = 0; i < hwq_count; i++) {
3611         qp = &phba->sli4_hba.hdwq[i];
3612         multixri_pool = qp->p_multixri_pool;
3613         if (!multixri_pool)
3614             continue;
3615 
3616         qp->p_multixri_pool = NULL;
3617 
3618         spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3619 
3620         /* Deal with public free xri pool */
3621         pbl_pool = &multixri_pool->pbl_pool;
3622         spin_lock(&pbl_pool->lock);
3623 
3624         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3625                 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3626                 pbl_pool->count, i);
3627 
3628         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3629                      &pbl_pool->list, list) {
3630             list_move_tail(&lpfc_ncmd->list,
3631                        &qp->lpfc_io_buf_list_put);
3632             qp->put_io_bufs++;
3633             pbl_pool->count--;
3634         }
3635 
3636         INIT_LIST_HEAD(&pbl_pool->list);
3637         pbl_pool->count = 0;
3638 
3639         spin_unlock(&pbl_pool->lock);
3640 
3641         /* Deal with private free xri pool */
3642         pvt_pool = &multixri_pool->pvt_pool;
3643         spin_lock(&pvt_pool->lock);
3644 
3645         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3646                 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3647                 pvt_pool->count, i);
3648 
3649         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3650                      &pvt_pool->list, list) {
3651             list_move_tail(&lpfc_ncmd->list,
3652                        &qp->lpfc_io_buf_list_put);
3653             qp->put_io_bufs++;
3654             pvt_pool->count--;
3655         }
3656 
3657         INIT_LIST_HEAD(&pvt_pool->list);
3658         pvt_pool->count = 0;
3659 
3660         spin_unlock(&pvt_pool->lock);
3661         spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3662 
3663         kfree(multixri_pool);
3664     }
3665 }
3666 
3667 /**
3668  * lpfc_online - Initialize and bring a HBA online
3669  * @phba: pointer to lpfc hba data structure.
3670  *
3671  * This routine initializes the HBA and brings a HBA online. During this
3672  * process, the management interface is blocked to prevent user space access
3673  * to the HBA interfering with the driver initialization.
3674  *
3675  * Return codes
3676  *   0 - successful
3677  *   1 - failed
3678  **/
3679 int
3680 lpfc_online(struct lpfc_hba *phba)
3681 {
3682     struct lpfc_vport *vport;
3683     struct lpfc_vport **vports;
3684     int i, error = 0;
3685     bool vpis_cleared = false;
3686 
3687     if (!phba)
3688         return 0;
3689     vport = phba->pport;
3690 
3691     if (!(vport->fc_flag & FC_OFFLINE_MODE))
3692         return 0;
3693 
3694     lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3695             "0458 Bring Adapter online\n");
3696 
3697     lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3698 
3699     if (phba->sli_rev == LPFC_SLI_REV4) {
3700         if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3701             lpfc_unblock_mgmt_io(phba);
3702             return 1;
3703         }
3704         spin_lock_irq(&phba->hbalock);
3705         if (!phba->sli4_hba.max_cfg_param.vpi_used)
3706             vpis_cleared = true;
3707         spin_unlock_irq(&phba->hbalock);
3708 
3709         /* Reestablish the local initiator port.
3710          * The offline process destroyed the previous lport.
3711          */
3712         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3713                 !phba->nvmet_support) {
3714             error = lpfc_nvme_create_localport(phba->pport);
3715             if (error)
3716                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3717                     "6132 NVME restore reg failed "
3718                     "on nvmei error x%x\n", error);
3719         }
3720     } else {
3721         lpfc_sli_queue_init(phba);
3722         if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3723             lpfc_unblock_mgmt_io(phba);
3724             return 1;
3725         }
3726     }
3727 
3728     vports = lpfc_create_vport_work_array(phba);
3729     if (vports != NULL) {
3730         for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3731             struct Scsi_Host *shost;
3732             shost = lpfc_shost_from_vport(vports[i]);
3733             spin_lock_irq(shost->host_lock);
3734             vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3735             if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3736                 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3737             if (phba->sli_rev == LPFC_SLI_REV4) {
3738                 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3739                 if ((vpis_cleared) &&
3740                     (vports[i]->port_type !=
3741                     LPFC_PHYSICAL_PORT))
3742                     vports[i]->vpi = 0;
3743             }
3744             spin_unlock_irq(shost->host_lock);
3745         }
3746     }
3747     lpfc_destroy_vport_work_array(phba, vports);
3748 
3749     if (phba->cfg_xri_rebalancing)
3750         lpfc_create_multixri_pools(phba);
3751 
3752     lpfc_cpuhp_add(phba);
3753 
3754     lpfc_unblock_mgmt_io(phba);
3755     return 0;
3756 }
3757 
3758 /**
3759  * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
3760  * @phba: pointer to lpfc hba data structure.
3761  *
3762  * This routine marks a HBA's management interface as not blocked. Once the
3763  * HBA's management interface is marked as not blocked, all the user space
3764  * access to the HBA, whether they are from sysfs interface or libdfc
3765  * interface will be allowed. The HBA is set to block the management interface
3766  * when the driver prepares the HBA interface for online or offline and then
3767  * set to unblock the management interface afterwards.
3768  **/
3769 void
3770 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3771 {
3772     unsigned long iflag;
3773 
3774     spin_lock_irqsave(&phba->hbalock, iflag);
3775     phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3776     spin_unlock_irqrestore(&phba->hbalock, iflag);
3777 }
3778 
3779 /**
3780  * lpfc_offline_prep - Prepare a HBA to be brought offline
3781  * @phba: pointer to lpfc hba data structure.
3782  * @mbx_action: flag for mailbox shutdown action.
3783  *
3784  * This routine is invoked to prepare a HBA to be brought offline. It performs
3785  * unregistration login to all the nodes on all vports and flushes the mailbox
3786  * queue to make it ready to be brought offline.
3787  **/
3788 void
3789 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3790 {
3791     struct lpfc_vport *vport = phba->pport;
3792     struct lpfc_nodelist  *ndlp, *next_ndlp;
3793     struct lpfc_vport **vports;
3794     struct Scsi_Host *shost;
3795     int i;
3796     int offline;
3797     bool hba_pci_err;
3798 
3799     if (vport->fc_flag & FC_OFFLINE_MODE)
3800         return;
3801 
3802     lpfc_block_mgmt_io(phba, mbx_action);
3803 
3804     lpfc_linkdown(phba);
3805 
3806     offline =  pci_channel_offline(phba->pcidev);
3807     hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
3808 
3809     /* Issue an unreg_login to all nodes on all vports */
3810     vports = lpfc_create_vport_work_array(phba);
3811     if (vports != NULL) {
3812         for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3813             if (vports[i]->load_flag & FC_UNLOADING)
3814                 continue;
3815             shost = lpfc_shost_from_vport(vports[i]);
3816             spin_lock_irq(shost->host_lock);
3817             vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3818             vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3819             vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3820             spin_unlock_irq(shost->host_lock);
3821 
3822             shost = lpfc_shost_from_vport(vports[i]);
3823             list_for_each_entry_safe(ndlp, next_ndlp,
3824                          &vports[i]->fc_nodes,
3825                          nlp_listp) {
3826 
3827                 spin_lock_irq(&ndlp->lock);
3828                 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3829                 spin_unlock_irq(&ndlp->lock);
3830 
3831                 if (offline || hba_pci_err) {
3832                     spin_lock_irq(&ndlp->lock);
3833                     ndlp->nlp_flag &= ~(NLP_UNREG_INP |
3834                                 NLP_RPI_REGISTERED);
3835                     spin_unlock_irq(&ndlp->lock);
3836                     if (phba->sli_rev == LPFC_SLI_REV4)
3837                         lpfc_sli_rpi_release(vports[i],
3838                                      ndlp);
3839                 } else {
3840                     lpfc_unreg_rpi(vports[i], ndlp);
3841                 }
3842                 /*
3843                  * Whenever an SLI4 port goes offline, free the
3844                  * RPI. Get a new RPI when the adapter port
3845                  * comes back online.
3846                  */
3847                 if (phba->sli_rev == LPFC_SLI_REV4) {
3848                     lpfc_printf_vlog(vports[i], KERN_INFO,
3849                          LOG_NODE | LOG_DISCOVERY,
3850                          "0011 Free RPI x%x on "
3851                          "ndlp: x%px did x%x\n",
3852                          ndlp->nlp_rpi, ndlp,
3853                          ndlp->nlp_DID);
3854                     lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3855                     ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3856                 }
3857 
3858                 if (ndlp->nlp_type & NLP_FABRIC) {
3859                     lpfc_disc_state_machine(vports[i], ndlp,
3860                         NULL, NLP_EVT_DEVICE_RECOVERY);
3861 
3862                     /* Don't remove the node unless the node
3863                      * has been unregistered with the
3864                      * transport, and we're not in recovery
3865                      * before dev_loss_tmo triggered.
3866                      * Otherwise, let dev_loss take care of
3867                      * the node.
3868                      */
3869                     if (!(ndlp->save_flags &
3870                           NLP_IN_RECOV_POST_DEV_LOSS) &&
3871                         !(ndlp->fc4_xpt_flags &
3872                           (NVME_XPT_REGD | SCSI_XPT_REGD)))
3873                         lpfc_disc_state_machine
3874                             (vports[i], ndlp,
3875                              NULL,
3876                              NLP_EVT_DEVICE_RM);
3877                 }
3878             }
3879         }
3880     }
3881     lpfc_destroy_vport_work_array(phba, vports);
3882 
3883     lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3884 
3885     if (phba->wq)
3886         flush_workqueue(phba->wq);
3887 }
3888 
3889 /**
3890  * lpfc_offline - Bring a HBA offline
3891  * @phba: pointer to lpfc hba data structure.
3892  *
3893  * This routine actually brings a HBA offline. It stops all the timers
3894  * associated with the HBA, brings down the SLI layer, and eventually
3895  * marks the HBA as in offline state for the upper layer protocol.
3896  **/
3897 void
3898 lpfc_offline(struct lpfc_hba *phba)
3899 {
3900     struct Scsi_Host  *shost;
3901     struct lpfc_vport **vports;
3902     int i;
3903 
3904     if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3905         return;
3906 
3907     /* stop port and all timers associated with this hba */
3908     lpfc_stop_port(phba);
3909 
3910     /* Tear down the local and target port registrations.  The
3911      * nvme transports need to cleanup.
3912      */
3913     lpfc_nvmet_destroy_targetport(phba);
3914     lpfc_nvme_destroy_localport(phba->pport);
3915 
3916     vports = lpfc_create_vport_work_array(phba);
3917     if (vports != NULL)
3918         for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3919             lpfc_stop_vport_timers(vports[i]);
3920     lpfc_destroy_vport_work_array(phba, vports);
3921     lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3922             "0460 Bring Adapter offline\n");
3923     /* Bring down the SLI Layer and cleanup.  The HBA is offline
3924        now.  */
3925     lpfc_sli_hba_down(phba);
3926     spin_lock_irq(&phba->hbalock);
3927     phba->work_ha = 0;
3928     spin_unlock_irq(&phba->hbalock);
3929     vports = lpfc_create_vport_work_array(phba);
3930     if (vports != NULL)
3931         for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3932             shost = lpfc_shost_from_vport(vports[i]);
3933             spin_lock_irq(shost->host_lock);
3934             vports[i]->work_port_events = 0;
3935             vports[i]->fc_flag |= FC_OFFLINE_MODE;
3936             spin_unlock_irq(shost->host_lock);
3937         }
3938     lpfc_destroy_vport_work_array(phba, vports);
3939     /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
3940      * in hba_unset
3941      */
3942     if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3943         __lpfc_cpuhp_remove(phba);
3944 
3945     if (phba->cfg_xri_rebalancing)
3946         lpfc_destroy_multixri_pools(phba);
3947 }
3948 
3949 /**
3950  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
3951  * @phba: pointer to lpfc hba data structure.
3952  *
3953  * This routine is to free all the SCSI buffers and IOCBs from the driver
3954  * list back to kernel. It is called from lpfc_pci_remove_one to free
3955  * the internal resources before the device is removed from the system.
3956  **/
3957 static void
3958 lpfc_scsi_free(struct lpfc_hba *phba)
3959 {
3960     struct lpfc_io_buf *sb, *sb_next;
3961 
3962     if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3963         return;
3964 
3965     spin_lock_irq(&phba->hbalock);
3966 
3967     /* Release all the lpfc_scsi_bufs maintained by this host. */
3968 
3969     spin_lock(&phba->scsi_buf_list_put_lock);
3970     list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3971                  list) {
3972         list_del(&sb->list);
3973         dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3974                   sb->dma_handle);
3975         kfree(sb);
3976         phba->total_scsi_bufs--;
3977     }
3978     spin_unlock(&phba->scsi_buf_list_put_lock);
3979 
3980     spin_lock(&phba->scsi_buf_list_get_lock);
3981     list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3982                  list) {
3983         list_del(&sb->list);
3984         dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3985                   sb->dma_handle);
3986         kfree(sb);
3987         phba->total_scsi_bufs--;
3988     }
3989     spin_unlock(&phba->scsi_buf_list_get_lock);
3990     spin_unlock_irq(&phba->hbalock);
3991 }
3992 
3993 /**
3994  * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists
3995  * @phba: pointer to lpfc hba data structure.
3996  *
3997  * This routine is to free all the IO buffers and IOCBs from the driver
3998  * list back to kernel. It is called from lpfc_pci_remove_one to free
3999  * the internal resources before the device is removed from the system.
4000  **/
4001 void
4002 lpfc_io_free(struct lpfc_hba *phba)
4003 {
4004     struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
4005     struct lpfc_sli4_hdw_queue *qp;
4006     int idx;
4007 
4008     for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4009         qp = &phba->sli4_hba.hdwq[idx];
4010         /* Release all the lpfc_nvme_bufs maintained by this host. */
4011         spin_lock(&qp->io_buf_list_put_lock);
4012         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4013                      &qp->lpfc_io_buf_list_put,
4014                      list) {
4015             list_del(&lpfc_ncmd->list);
4016             qp->put_io_bufs--;
4017             dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4018                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4019             if (phba->cfg_xpsgl && !phba->nvmet_support)
4020                 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4021             lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4022             kfree(lpfc_ncmd);
4023             qp->total_io_bufs--;
4024         }
4025         spin_unlock(&qp->io_buf_list_put_lock);
4026 
4027         spin_lock(&qp->io_buf_list_get_lock);
4028         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4029                      &qp->lpfc_io_buf_list_get,
4030                      list) {
4031             list_del(&lpfc_ncmd->list);
4032             qp->get_io_bufs--;
4033             dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4034                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4035             if (phba->cfg_xpsgl && !phba->nvmet_support)
4036                 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4037             lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4038             kfree(lpfc_ncmd);
4039             qp->total_io_bufs--;
4040         }
4041         spin_unlock(&qp->io_buf_list_get_lock);
4042     }
4043 }
4044 
4045 /**
4046  * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
4047  * @phba: pointer to lpfc hba data structure.
4048  *
4049  * This routine first calculates the sizes of the current els and allocated
4050  * scsi sgl lists, and then goes through all sgls to updates the physical
4051  * XRIs assigned due to port function reset. During port initialization, the
4052  * current els and allocated scsi sgl lists are 0s.
4053  *
4054  * Return codes
4055  *   0 - successful (for now, it always returns 0)
4056  **/
4057 int
4058 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
4059 {
4060     struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4061     uint16_t i, lxri, xri_cnt, els_xri_cnt;
4062     LIST_HEAD(els_sgl_list);
4063     int rc;
4064 
4065     /*
4066      * update on pci function's els xri-sgl list
4067      */
4068     els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4069 
4070     if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
4071         /* els xri-sgl expanded */
4072         xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
4073         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4074                 "3157 ELS xri-sgl count increased from "
4075                 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4076                 els_xri_cnt);
4077         /* allocate the additional els sgls */
4078         for (i = 0; i < xri_cnt; i++) {
4079             sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4080                          GFP_KERNEL);
4081             if (sglq_entry == NULL) {
4082                 lpfc_printf_log(phba, KERN_ERR,
4083                         LOG_TRACE_EVENT,
4084                         "2562 Failure to allocate an "
4085                         "ELS sgl entry:%d\n", i);
4086                 rc = -ENOMEM;
4087                 goto out_free_mem;
4088             }
4089             sglq_entry->buff_type = GEN_BUFF_TYPE;
4090             sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
4091                                &sglq_entry->phys);
4092             if (sglq_entry->virt == NULL) {
4093                 kfree(sglq_entry);
4094                 lpfc_printf_log(phba, KERN_ERR,
4095                         LOG_TRACE_EVENT,
4096                         "2563 Failure to allocate an "
4097                         "ELS mbuf:%d\n", i);
4098                 rc = -ENOMEM;
4099                 goto out_free_mem;
4100             }
4101             sglq_entry->sgl = sglq_entry->virt;
4102             memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4103             sglq_entry->state = SGL_FREED;
4104             list_add_tail(&sglq_entry->list, &els_sgl_list);
4105         }
4106         spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4107         list_splice_init(&els_sgl_list,
4108                  &phba->sli4_hba.lpfc_els_sgl_list);
4109         spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4110     } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
4111         /* els xri-sgl shrinked */
4112         xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
4113         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4114                 "3158 ELS xri-sgl count decreased from "
4115                 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4116                 els_xri_cnt);
4117         spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4118         list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
4119                  &els_sgl_list);
4120         /* release extra els sgls from list */
4121         for (i = 0; i < xri_cnt; i++) {
4122             list_remove_head(&els_sgl_list,
4123                      sglq_entry, struct lpfc_sglq, list);
4124             if (sglq_entry) {
4125                 __lpfc_mbuf_free(phba, sglq_entry->virt,
4126                          sglq_entry->phys);
4127                 kfree(sglq_entry);
4128             }
4129         }
4130         list_splice_init(&els_sgl_list,
4131                  &phba->sli4_hba.lpfc_els_sgl_list);
4132         spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4133     } else
4134         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4135                 "3163 ELS xri-sgl count unchanged: %d\n",
4136                 els_xri_cnt);
4137     phba->sli4_hba.els_xri_cnt = els_xri_cnt;
4138 
4139     /* update xris to els sgls on the list */
4140     sglq_entry = NULL;
4141     sglq_entry_next = NULL;
4142     list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4143                  &phba->sli4_hba.lpfc_els_sgl_list, list) {
4144         lxri = lpfc_sli4_next_xritag(phba);
4145         if (lxri == NO_XRI) {
4146             lpfc_printf_log(phba, KERN_ERR,
4147                     LOG_TRACE_EVENT,
4148                     "2400 Failed to allocate xri for "
4149                     "ELS sgl\n");
4150             rc = -ENOMEM;
4151             goto out_free_mem;
4152         }
4153         sglq_entry->sli4_lxritag = lxri;
4154         sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4155     }
4156     return 0;
4157 
4158 out_free_mem:
4159     lpfc_free_els_sgl_list(phba);
4160     return rc;
4161 }
4162 
4163 /**
4164  * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
4165  * @phba: pointer to lpfc hba data structure.
4166  *
4167  * This routine first calculates the sizes of the current els and allocated
4168  * scsi sgl lists, and then goes through all sgls to updates the physical
4169  * XRIs assigned due to port function reset. During port initialization, the
4170  * current els and allocated scsi sgl lists are 0s.
4171  *
4172  * Return codes
4173  *   0 - successful (for now, it always returns 0)
4174  **/
4175 int
4176 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4177 {
4178     struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4179     uint16_t i, lxri, xri_cnt, els_xri_cnt;
4180     uint16_t nvmet_xri_cnt;
4181     LIST_HEAD(nvmet_sgl_list);
4182     int rc;
4183 
4184     /*
4185      * update on pci function's nvmet xri-sgl list
4186      */
4187     els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4188 
4189     /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
4190     nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4191     if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4192         /* els xri-sgl expanded */
4193         xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4194         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4195                 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4196                 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4197         /* allocate the additional nvmet sgls */
4198         for (i = 0; i < xri_cnt; i++) {
4199             sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4200                          GFP_KERNEL);
4201             if (sglq_entry == NULL) {
4202                 lpfc_printf_log(phba, KERN_ERR,
4203                         LOG_TRACE_EVENT,
4204                         "6303 Failure to allocate an "
4205                         "NVMET sgl entry:%d\n", i);
4206                 rc = -ENOMEM;
4207                 goto out_free_mem;
4208             }
4209             sglq_entry->buff_type = NVMET_BUFF_TYPE;
4210             sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4211                                &sglq_entry->phys);
4212             if (sglq_entry->virt == NULL) {
4213                 kfree(sglq_entry);
4214                 lpfc_printf_log(phba, KERN_ERR,
4215                         LOG_TRACE_EVENT,
4216                         "6304 Failure to allocate an "
4217                         "NVMET buf:%d\n", i);
4218                 rc = -ENOMEM;
4219                 goto out_free_mem;
4220             }
4221             sglq_entry->sgl = sglq_entry->virt;
4222             memset(sglq_entry->sgl, 0,
4223                    phba->cfg_sg_dma_buf_size);
4224             sglq_entry->state = SGL_FREED;
4225             list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4226         }
4227         spin_lock_irq(&phba->hbalock);
4228         spin_lock(&phba->sli4_hba.sgl_list_lock);
4229         list_splice_init(&nvmet_sgl_list,
4230                  &phba->sli4_hba.lpfc_nvmet_sgl_list);
4231         spin_unlock(&phba->sli4_hba.sgl_list_lock);
4232         spin_unlock_irq(&phba->hbalock);
4233     } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4234         /* nvmet xri-sgl shrunk */
4235         xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4236         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4237                 "6305 NVMET xri-sgl count decreased from "
4238                 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4239                 nvmet_xri_cnt);
4240         spin_lock_irq(&phba->hbalock);
4241         spin_lock(&phba->sli4_hba.sgl_list_lock);
4242         list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4243                  &nvmet_sgl_list);
4244         /* release extra nvmet sgls from list */
4245         for (i = 0; i < xri_cnt; i++) {
4246             list_remove_head(&nvmet_sgl_list,
4247                      sglq_entry, struct lpfc_sglq, list);
4248             if (sglq_entry) {
4249                 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4250                             sglq_entry->phys);
4251                 kfree(sglq_entry);
4252             }
4253         }
4254         list_splice_init(&nvmet_sgl_list,
4255                  &phba->sli4_hba.lpfc_nvmet_sgl_list);
4256         spin_unlock(&phba->sli4_hba.sgl_list_lock);
4257         spin_unlock_irq(&phba->hbalock);
4258     } else
4259         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4260                 "6306 NVMET xri-sgl count unchanged: %d\n",
4261                 nvmet_xri_cnt);
4262     phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4263 
4264     /* update xris to nvmet sgls on the list */
4265     sglq_entry = NULL;
4266     sglq_entry_next = NULL;
4267     list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4268                  &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4269         lxri = lpfc_sli4_next_xritag(phba);
4270         if (lxri == NO_XRI) {
4271             lpfc_printf_log(phba, KERN_ERR,
4272                     LOG_TRACE_EVENT,
4273                     "6307 Failed to allocate xri for "
4274                     "NVMET sgl\n");
4275             rc = -ENOMEM;
4276             goto out_free_mem;
4277         }
4278         sglq_entry->sli4_lxritag = lxri;
4279         sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4280     }
4281     return 0;
4282 
4283 out_free_mem:
4284     lpfc_free_nvmet_sgl_list(phba);
4285     return rc;
4286 }
4287 
4288 int
4289 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4290 {
4291     LIST_HEAD(blist);
4292     struct lpfc_sli4_hdw_queue *qp;
4293     struct lpfc_io_buf *lpfc_cmd;
4294     struct lpfc_io_buf *iobufp, *prev_iobufp;
4295     int idx, cnt, xri, inserted;
4296 
4297     cnt = 0;
4298     for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4299         qp = &phba->sli4_hba.hdwq[idx];
4300         spin_lock_irq(&qp->io_buf_list_get_lock);
4301         spin_lock(&qp->io_buf_list_put_lock);
4302 
4303         /* Take everything off the get and put lists */
4304         list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4305         list_splice(&qp->lpfc_io_buf_list_put, &blist);
4306         INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4307         INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4308         cnt += qp->get_io_bufs + qp->put_io_bufs;
4309         qp->get_io_bufs = 0;
4310         qp->put_io_bufs = 0;
4311         qp->total_io_bufs = 0;
4312         spin_unlock(&qp->io_buf_list_put_lock);
4313         spin_unlock_irq(&qp->io_buf_list_get_lock);
4314     }
4315 
4316     /*
4317      * Take IO buffers off blist and put on cbuf sorted by XRI.
4318      * This is because POST_SGL takes a sequential range of XRIs
4319      * to post to the firmware.
4320      */
4321     for (idx = 0; idx < cnt; idx++) {
4322         list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4323         if (!lpfc_cmd)
4324             return cnt;
4325         if (idx == 0) {
4326             list_add_tail(&lpfc_cmd->list, cbuf);
4327             continue;
4328         }
4329         xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4330         inserted = 0;
4331         prev_iobufp = NULL;
4332         list_for_each_entry(iobufp, cbuf, list) {
4333             if (xri < iobufp->cur_iocbq.sli4_xritag) {
4334                 if (prev_iobufp)
4335                     list_add(&lpfc_cmd->list,
4336                          &prev_iobufp->list);
4337                 else
4338                     list_add(&lpfc_cmd->list, cbuf);
4339                 inserted = 1;
4340                 break;
4341             }
4342             prev_iobufp = iobufp;
4343         }
4344         if (!inserted)
4345             list_add_tail(&lpfc_cmd->list, cbuf);
4346     }
4347     return cnt;
4348 }
4349 
4350 int
4351 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4352 {
4353     struct lpfc_sli4_hdw_queue *qp;
4354     struct lpfc_io_buf *lpfc_cmd;
4355     int idx, cnt;
4356 
4357     qp = phba->sli4_hba.hdwq;
4358     cnt = 0;
4359     while (!list_empty(cbuf)) {
4360         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4361             list_remove_head(cbuf, lpfc_cmd,
4362                      struct lpfc_io_buf, list);
4363             if (!lpfc_cmd)
4364                 return cnt;
4365             cnt++;
4366             qp = &phba->sli4_hba.hdwq[idx];
4367             lpfc_cmd->hdwq_no = idx;
4368             lpfc_cmd->hdwq = qp;
4369             lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
4370             spin_lock(&qp->io_buf_list_put_lock);
4371             list_add_tail(&lpfc_cmd->list,
4372                       &qp->lpfc_io_buf_list_put);
4373             qp->put_io_bufs++;
4374             qp->total_io_bufs++;
4375             spin_unlock(&qp->io_buf_list_put_lock);
4376         }
4377     }
4378     return cnt;
4379 }
4380 
4381 /**
4382  * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping
4383  * @phba: pointer to lpfc hba data structure.
4384  *
4385  * This routine first calculates the sizes of the current els and allocated
4386  * scsi sgl lists, and then goes through all sgls to updates the physical
4387  * XRIs assigned due to port function reset. During port initialization, the
4388  * current els and allocated scsi sgl lists are 0s.
4389  *
4390  * Return codes
4391  *   0 - successful (for now, it always returns 0)
4392  **/
4393 int
4394 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4395 {
4396     struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4397     uint16_t i, lxri, els_xri_cnt;
4398     uint16_t io_xri_cnt, io_xri_max;
4399     LIST_HEAD(io_sgl_list);
4400     int rc, cnt;
4401 
4402     /*
4403      * update on pci function's allocated nvme xri-sgl list
4404      */
4405 
4406     /* maximum number of xris available for nvme buffers */
4407     els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4408     io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4409     phba->sli4_hba.io_xri_max = io_xri_max;
4410 
4411     lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4412             "6074 Current allocated XRI sgl count:%d, "
4413             "maximum XRI count:%d els_xri_cnt:%d\n\n",
4414             phba->sli4_hba.io_xri_cnt,
4415             phba->sli4_hba.io_xri_max,
4416             els_xri_cnt);
4417 
4418     cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4419 
4420     if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4421         /* max nvme xri shrunk below the allocated nvme buffers */
4422         io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4423                     phba->sli4_hba.io_xri_max;
4424         /* release the extra allocated nvme buffers */
4425         for (i = 0; i < io_xri_cnt; i++) {
4426             list_remove_head(&io_sgl_list, lpfc_ncmd,
4427                      struct lpfc_io_buf, list);
4428             if (lpfc_ncmd) {
4429                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4430                           lpfc_ncmd->data,
4431                           lpfc_ncmd->dma_handle);
4432                 kfree(lpfc_ncmd);
4433             }
4434         }
4435         phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4436     }
4437 
4438     /* update xris associated to remaining allocated nvme buffers */
4439     lpfc_ncmd = NULL;
4440     lpfc_ncmd_next = NULL;
4441     phba->sli4_hba.io_xri_cnt = cnt;
4442     list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4443                  &io_sgl_list, list) {
4444         lxri = lpfc_sli4_next_xritag(phba);
4445         if (lxri == NO_XRI) {
4446             lpfc_printf_log(phba, KERN_ERR,
4447                     LOG_TRACE_EVENT,
4448                     "6075 Failed to allocate xri for "
4449                     "nvme buffer\n");
4450             rc = -ENOMEM;
4451             goto out_free_mem;
4452         }
4453         lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4454         lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4455     }
4456     cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4457     return 0;
4458 
4459 out_free_mem:
4460     lpfc_io_free(phba);
4461     return rc;
4462 }
4463 
4464 /**
4465  * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec
4466  * @phba: Pointer to lpfc hba data structure.
4467  * @num_to_alloc: The requested number of buffers to allocate.
4468  *
4469  * This routine allocates nvme buffers for device with SLI-4 interface spec,
4470  * the nvme buffer contains all the necessary information needed to initiate
4471  * an I/O. After allocating up to @num_to_allocate IO buffers and put
4472  * them on a list, it post them to the port by using SGL block post.
4473  *
4474  * Return codes:
4475  *   int - number of IO buffers that were allocated and posted.
4476  *   0 = failure, less than num_to_alloc is a partial failure.
4477  **/
4478 int
4479 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4480 {
4481     struct lpfc_io_buf *lpfc_ncmd;
4482     struct lpfc_iocbq *pwqeq;
4483     uint16_t iotag, lxri = 0;
4484     int bcnt, num_posted;
4485     LIST_HEAD(prep_nblist);
4486     LIST_HEAD(post_nblist);
4487     LIST_HEAD(nvme_nblist);
4488 
4489     phba->sli4_hba.io_xri_cnt = 0;
4490     for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4491         lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4492         if (!lpfc_ncmd)
4493             break;
4494         /*
4495          * Get memory from the pci pool to map the virt space to
4496          * pci bus space for an I/O. The DMA buffer includes the
4497          * number of SGE's necessary to support the sg_tablesize.
4498          */
4499         lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4500                           GFP_KERNEL,
4501                           &lpfc_ncmd->dma_handle);
4502         if (!lpfc_ncmd->data) {
4503             kfree(lpfc_ncmd);
4504             break;
4505         }
4506 
4507         if (phba->cfg_xpsgl && !phba->nvmet_support) {
4508             INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4509         } else {
4510             /*
4511              * 4K Page alignment is CRITICAL to BlockGuard, double
4512              * check to be sure.
4513              */
4514             if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4515                 (((unsigned long)(lpfc_ncmd->data) &
4516                 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4517                 lpfc_printf_log(phba, KERN_ERR,
4518                         LOG_TRACE_EVENT,
4519                         "3369 Memory alignment err: "
4520                         "addr=%lx\n",
4521                         (unsigned long)lpfc_ncmd->data);
4522                 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4523                           lpfc_ncmd->data,
4524                           lpfc_ncmd->dma_handle);
4525                 kfree(lpfc_ncmd);
4526                 break;
4527             }
4528         }
4529 
4530         INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4531 
4532         lxri = lpfc_sli4_next_xritag(phba);
4533         if (lxri == NO_XRI) {
4534             dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4535                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4536             kfree(lpfc_ncmd);
4537             break;
4538         }
4539         pwqeq = &lpfc_ncmd->cur_iocbq;
4540 
4541         /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
4542         iotag = lpfc_sli_next_iotag(phba, pwqeq);
4543         if (iotag == 0) {
4544             dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4545                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4546             kfree(lpfc_ncmd);
4547             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4548                     "6121 Failed to allocate IOTAG for"
4549                     " XRI:0x%x\n", lxri);
4550             lpfc_sli4_free_xri(phba, lxri);
4551             break;
4552         }
4553         pwqeq->sli4_lxritag = lxri;
4554         pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4555 
4556         /* Initialize local short-hand pointers. */
4557         lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4558         lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4559         lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
4560         spin_lock_init(&lpfc_ncmd->buf_lock);
4561 
4562         /* add the nvme buffer to a post list */
4563         list_add_tail(&lpfc_ncmd->list, &post_nblist);
4564         phba->sli4_hba.io_xri_cnt++;
4565     }
4566     lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4567             "6114 Allocate %d out of %d requested new NVME "
4568             "buffers of size x%zu bytes\n", bcnt, num_to_alloc,
4569             sizeof(*lpfc_ncmd));
4570 
4571 
4572     /* post the list of nvme buffer sgls to port if available */
4573     if (!list_empty(&post_nblist))
4574         num_posted = lpfc_sli4_post_io_sgl_list(
4575                 phba, &post_nblist, bcnt);
4576     else
4577         num_posted = 0;
4578 
4579     return num_posted;
4580 }
4581 
4582 static uint64_t
4583 lpfc_get_wwpn(struct lpfc_hba *phba)
4584 {
4585     uint64_t wwn;
4586     int rc;
4587     LPFC_MBOXQ_t *mboxq;
4588     MAILBOX_t *mb;
4589 
4590     mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4591                         GFP_KERNEL);
4592     if (!mboxq)
4593         return (uint64_t)-1;
4594 
4595     /* First get WWN of HBA instance */
4596     lpfc_read_nv(phba, mboxq);
4597     rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4598     if (rc != MBX_SUCCESS) {
4599         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4600                 "6019 Mailbox failed , mbxCmd x%x "
4601                 "READ_NV, mbxStatus x%x\n",
4602                 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4603                 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4604         mempool_free(mboxq, phba->mbox_mem_pool);
4605         return (uint64_t) -1;
4606     }
4607     mb = &mboxq->u.mb;
4608     memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4609     /* wwn is WWPN of HBA instance */
4610     mempool_free(mboxq, phba->mbox_mem_pool);
4611     if (phba->sli_rev == LPFC_SLI_REV4)
4612         return be64_to_cpu(wwn);
4613     else
4614         return rol64(wwn, 32);
4615 }
4616 
4617 /**
4618  * lpfc_vmid_res_alloc - Allocates resources for VMID
4619  * @phba: pointer to lpfc hba data structure.
4620  * @vport: pointer to vport data structure
4621  *
4622  * This routine allocated the resources needed for the VMID.
4623  *
4624  * Return codes
4625  *  0 on Success
4626  *  Non-0 on Failure
4627  */
4628 static int
4629 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4630 {
4631     /* VMID feature is supported only on SLI4 */
4632     if (phba->sli_rev == LPFC_SLI_REV3) {
4633         phba->cfg_vmid_app_header = 0;
4634         phba->cfg_vmid_priority_tagging = 0;
4635     }
4636 
4637     if (lpfc_is_vmid_enabled(phba)) {
4638         vport->vmid =
4639             kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4640                 GFP_KERNEL);
4641         if (!vport->vmid)
4642             return -ENOMEM;
4643 
4644         rwlock_init(&vport->vmid_lock);
4645 
4646         /* Set the VMID parameters for the vport */
4647         vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4648         vport->vmid_inactivity_timeout =
4649             phba->cfg_vmid_inactivity_timeout;
4650         vport->max_vmid = phba->cfg_max_vmid;
4651         vport->cur_vmid_cnt = 0;
4652 
4653         vport->vmid_priority_range = bitmap_zalloc
4654             (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4655 
4656         if (!vport->vmid_priority_range) {
4657             kfree(vport->vmid);
4658             return -ENOMEM;
4659         }
4660 
4661         hash_init(vport->hash_table);
4662     }
4663     return 0;
4664 }
4665 
4666 /**
4667  * lpfc_create_port - Create an FC port
4668  * @phba: pointer to lpfc hba data structure.
4669  * @instance: a unique integer ID to this FC port.
4670  * @dev: pointer to the device data structure.
4671  *
4672  * This routine creates a FC port for the upper layer protocol. The FC port
4673  * can be created on top of either a physical port or a virtual port provided
4674  * by the HBA. This routine also allocates a SCSI host data structure (shost)
4675  * and associates the FC port created before adding the shost into the SCSI
4676  * layer.
4677  *
4678  * Return codes
4679  *   @vport - pointer to the virtual N_Port data structure.
4680  *   NULL - port create failed.
4681  **/
4682 struct lpfc_vport *
4683 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4684 {
4685     struct lpfc_vport *vport;
4686     struct Scsi_Host  *shost = NULL;
4687     struct scsi_host_template *template;
4688     int error = 0;
4689     int i;
4690     uint64_t wwn;
4691     bool use_no_reset_hba = false;
4692     int rc;
4693 
4694     if (lpfc_no_hba_reset_cnt) {
4695         if (phba->sli_rev < LPFC_SLI_REV4 &&
4696             dev == &phba->pcidev->dev) {
4697             /* Reset the port first */
4698             lpfc_sli_brdrestart(phba);
4699             rc = lpfc_sli_chipset_init(phba);
4700             if (rc)
4701                 return NULL;
4702         }
4703         wwn = lpfc_get_wwpn(phba);
4704     }
4705 
4706     for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4707         if (wwn == lpfc_no_hba_reset[i]) {
4708             lpfc_printf_log(phba, KERN_ERR,
4709                     LOG_TRACE_EVENT,
4710                     "6020 Setting use_no_reset port=%llx\n",
4711                     wwn);
4712             use_no_reset_hba = true;
4713             break;
4714         }
4715     }
4716 
4717     /* Seed template for SCSI host registration */
4718     if (dev == &phba->pcidev->dev) {
4719         template = &phba->port_template;
4720 
4721         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4722             /* Seed physical port template */
4723             memcpy(template, &lpfc_template, sizeof(*template));
4724 
4725             if (use_no_reset_hba)
4726                 /* template is for a no reset SCSI Host */
4727                 template->eh_host_reset_handler = NULL;
4728 
4729             /* Template for all vports this physical port creates */
4730             memcpy(&phba->vport_template, &lpfc_template,
4731                    sizeof(*template));
4732             phba->vport_template.shost_groups = lpfc_vport_groups;
4733             phba->vport_template.eh_bus_reset_handler = NULL;
4734             phba->vport_template.eh_host_reset_handler = NULL;
4735             phba->vport_template.vendor_id = 0;
4736 
4737             /* Initialize the host templates with updated value */
4738             if (phba->sli_rev == LPFC_SLI_REV4) {
4739                 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4740                 phba->vport_template.sg_tablesize =
4741                     phba->cfg_scsi_seg_cnt;
4742             } else {
4743                 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4744                 phba->vport_template.sg_tablesize =
4745                     phba->cfg_sg_seg_cnt;
4746             }
4747 
4748         } else {
4749             /* NVMET is for physical port only */
4750             memcpy(template, &lpfc_template_nvme,
4751                    sizeof(*template));
4752         }
4753     } else {
4754         template = &phba->vport_template;
4755     }
4756 
4757     shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4758     if (!shost)
4759         goto out;
4760 
4761     vport = (struct lpfc_vport *) shost->hostdata;
4762     vport->phba = phba;
4763     vport->load_flag |= FC_LOADING;
4764     vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4765     vport->fc_rscn_flush = 0;
4766     lpfc_get_vport_cfgparam(vport);
4767 
4768     /* Adjust value in vport */
4769     vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4770 
4771     shost->unique_id = instance;
4772     shost->max_id = LPFC_MAX_TARGET;
4773     shost->max_lun = vport->cfg_max_luns;
4774     shost->this_id = -1;
4775     shost->max_cmd_len = 16;
4776 
4777     if (phba->sli_rev == LPFC_SLI_REV4) {
4778         if (!phba->cfg_fcp_mq_threshold ||
4779             phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4780             phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4781 
4782         shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4783                         phba->cfg_fcp_mq_threshold);
4784 
4785         shost->dma_boundary =
4786             phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4787 
4788         if (phba->cfg_xpsgl && !phba->nvmet_support)
4789             shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4790         else
4791             shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4792     } else
4793         /* SLI-3 has a limited number of hardware queues (3),
4794          * thus there is only one for FCP processing.
4795          */
4796         shost->nr_hw_queues = 1;
4797 
4798     /*
4799      * Set initial can_queue value since 0 is no longer supported and
4800      * scsi_add_host will fail. This will be adjusted later based on the
4801      * max xri value determined in hba setup.
4802      */
4803     shost->can_queue = phba->cfg_hba_queue_depth - 10;
4804     if (dev != &phba->pcidev->dev) {
4805         shost->transportt = lpfc_vport_transport_template;
4806         vport->port_type = LPFC_NPIV_PORT;
4807     } else {
4808         shost->transportt = lpfc_transport_template;
4809         vport->port_type = LPFC_PHYSICAL_PORT;
4810     }
4811 
4812     lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4813             "9081 CreatePort TMPLATE type %x TBLsize %d "
4814             "SEGcnt %d/%d\n",
4815             vport->port_type, shost->sg_tablesize,
4816             phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4817 
4818     /* Allocate the resources for VMID */
4819     rc = lpfc_vmid_res_alloc(phba, vport);
4820 
4821     if (rc)
4822         goto out;
4823 
4824     /* Initialize all internally managed lists. */
4825     INIT_LIST_HEAD(&vport->fc_nodes);
4826     INIT_LIST_HEAD(&vport->rcv_buffer_list);
4827     spin_lock_init(&vport->work_port_lock);
4828 
4829     timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4830 
4831     timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4832 
4833     timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4834 
4835     if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4836         lpfc_setup_bg(phba, shost);
4837 
4838     error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4839     if (error)
4840         goto out_put_shost;
4841 
4842     spin_lock_irq(&phba->port_list_lock);
4843     list_add_tail(&vport->listentry, &phba->port_list);
4844     spin_unlock_irq(&phba->port_list_lock);
4845     return vport;
4846 
4847 out_put_shost:
4848     kfree(vport->vmid);
4849     bitmap_free(vport->vmid_priority_range);
4850     scsi_host_put(shost);
4851 out:
4852     return NULL;
4853 }
4854 
4855 /**
4856  * destroy_port -  destroy an FC port
4857  * @vport: pointer to an lpfc virtual N_Port data structure.
4858  *
4859  * This routine destroys a FC port from the upper layer protocol. All the
4860  * resources associated with the port are released.
4861  **/
4862 void
4863 destroy_port(struct lpfc_vport *vport)
4864 {
4865     struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4866     struct lpfc_hba  *phba = vport->phba;
4867 
4868     lpfc_debugfs_terminate(vport);
4869     fc_remove_host(shost);
4870     scsi_remove_host(shost);
4871 
4872     spin_lock_irq(&phba->port_list_lock);
4873     list_del_init(&vport->listentry);
4874     spin_unlock_irq(&phba->port_list_lock);
4875 
4876     lpfc_cleanup(vport);
4877     return;
4878 }
4879 
4880 /**
4881  * lpfc_get_instance - Get a unique integer ID
4882  *
4883  * This routine allocates a unique integer ID from lpfc_hba_index pool. It
4884  * uses the kernel idr facility to perform the task.
4885  *
4886  * Return codes:
4887  *   instance - a unique integer ID allocated as the new instance.
4888  *   -1 - lpfc get instance failed.
4889  **/
4890 int
4891 lpfc_get_instance(void)
4892 {
4893     int ret;
4894 
4895     ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4896     return ret < 0 ? -1 : ret;
4897 }
4898 
4899 /**
4900  * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
4901  * @shost: pointer to SCSI host data structure.
4902  * @time: elapsed time of the scan in jiffies.
4903  *
4904  * This routine is called by the SCSI layer with a SCSI host to determine
4905  * whether the scan host is finished.
4906  *
4907  * Note: there is no scan_start function as adapter initialization will have
4908  * asynchronously kicked off the link initialization.
4909  *
4910  * Return codes
4911  *   0 - SCSI host scan is not over yet.
4912  *   1 - SCSI host scan is over.
4913  **/
4914 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4915 {
4916     struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4917     struct lpfc_hba   *phba = vport->phba;
4918     int stat = 0;
4919 
4920     spin_lock_irq(shost->host_lock);
4921 
4922     if (vport->load_flag & FC_UNLOADING) {
4923         stat = 1;
4924         goto finished;
4925     }
4926     if (time >= msecs_to_jiffies(30 * 1000)) {
4927         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4928                 "0461 Scanning longer than 30 "
4929                 "seconds.  Continuing initialization\n");
4930         stat = 1;
4931         goto finished;
4932     }
4933     if (time >= msecs_to_jiffies(15 * 1000) &&
4934         phba->link_state <= LPFC_LINK_DOWN) {
4935         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4936                 "0465 Link down longer than 15 "
4937                 "seconds.  Continuing initialization\n");
4938         stat = 1;
4939         goto finished;
4940     }
4941 
4942     if (vport->port_state != LPFC_VPORT_READY)
4943         goto finished;
4944     if (vport->num_disc_nodes || vport->fc_prli_sent)
4945         goto finished;
4946     if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4947         goto finished;
4948     if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4949         goto finished;
4950 
4951     stat = 1;
4952 
4953 finished:
4954     spin_unlock_irq(shost->host_lock);
4955     return stat;
4956 }
4957 
4958 static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4959 {
4960     struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4961     struct lpfc_hba   *phba = vport->phba;
4962 
4963     fc_host_supported_speeds(shost) = 0;
4964     /*
4965      * Avoid reporting supported link speed for FCoE as it can't be
4966      * controlled via FCoE.
4967      */
4968     if (phba->hba_flag & HBA_FCOE_MODE)
4969         return;
4970 
4971     if (phba->lmt & LMT_256Gb)
4972         fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4973     if (phba->lmt & LMT_128Gb)
4974         fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4975     if (phba->lmt & LMT_64Gb)
4976         fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4977     if (phba->lmt & LMT_32Gb)
4978         fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4979     if (phba->lmt & LMT_16Gb)
4980         fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4981     if (phba->lmt & LMT_10Gb)
4982         fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4983     if (phba->lmt & LMT_8Gb)
4984         fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4985     if (phba->lmt & LMT_4Gb)
4986         fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4987     if (phba->lmt & LMT_2Gb)
4988         fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4989     if (phba->lmt & LMT_1Gb)
4990         fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4991 }
4992 
4993 /**
4994  * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
4995  * @shost: pointer to SCSI host data structure.
4996  *
4997  * This routine initializes a given SCSI host attributes on a FC port. The
4998  * SCSI host can be either on top of a physical port or a virtual port.
4999  **/
5000 void lpfc_host_attrib_init(struct Scsi_Host *shost)
5001 {
5002     struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5003     struct lpfc_hba   *phba = vport->phba;
5004     /*
5005      * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
5006      */
5007 
5008     fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
5009     fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
5010     fc_host_supported_classes(shost) = FC_COS_CLASS3;
5011 
5012     memset(fc_host_supported_fc4s(shost), 0,
5013            sizeof(fc_host_supported_fc4s(shost)));
5014     fc_host_supported_fc4s(shost)[2] = 1;
5015     fc_host_supported_fc4s(shost)[7] = 1;
5016 
5017     lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
5018                  sizeof fc_host_symbolic_name(shost));
5019 
5020     lpfc_host_supported_speeds_set(shost);
5021 
5022     fc_host_maxframe_size(shost) =
5023         (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
5024         (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
5025 
5026     fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
5027 
5028     /* This value is also unchanging */
5029     memset(fc_host_active_fc4s(shost), 0,
5030            sizeof(fc_host_active_fc4s(shost)));
5031     fc_host_active_fc4s(shost)[2] = 1;
5032     fc_host_active_fc4s(shost)[7] = 1;
5033 
5034     fc_host_max_npiv_vports(shost) = phba->max_vpi;
5035     spin_lock_irq(shost->host_lock);
5036     vport->load_flag &= ~FC_LOADING;
5037     spin_unlock_irq(shost->host_lock);
5038 }
5039 
5040 /**
5041  * lpfc_stop_port_s3 - Stop SLI3 device port
5042  * @phba: pointer to lpfc hba data structure.
5043  *
5044  * This routine is invoked to stop an SLI3 device port, it stops the device
5045  * from generating interrupts and stops the device driver's timers for the
5046  * device.
5047  **/
5048 static void
5049 lpfc_stop_port_s3(struct lpfc_hba *phba)
5050 {
5051     /* Clear all interrupt enable conditions */
5052     writel(0, phba->HCregaddr);
5053     readl(phba->HCregaddr); /* flush */
5054     /* Clear all pending interrupts */
5055     writel(0xffffffff, phba->HAregaddr);
5056     readl(phba->HAregaddr); /* flush */
5057 
5058     /* Reset some HBA SLI setup states */
5059     lpfc_stop_hba_timers(phba);
5060     phba->pport->work_port_events = 0;
5061 }
5062 
5063 /**
5064  * lpfc_stop_port_s4 - Stop SLI4 device port
5065  * @phba: pointer to lpfc hba data structure.
5066  *
5067  * This routine is invoked to stop an SLI4 device port, it stops the device
5068  * from generating interrupts and stops the device driver's timers for the
5069  * device.
5070  **/
5071 static void
5072 lpfc_stop_port_s4(struct lpfc_hba *phba)
5073 {
5074     /* Reset some HBA SLI4 setup states */
5075     lpfc_stop_hba_timers(phba);
5076     if (phba->pport)
5077         phba->pport->work_port_events = 0;
5078     phba->sli4_hba.intr_enable = 0;
5079 }
5080 
5081 /**
5082  * lpfc_stop_port - Wrapper function for stopping hba port
5083  * @phba: Pointer to HBA context object.
5084  *
5085  * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
5086  * the API jump table function pointer from the lpfc_hba struct.
5087  **/
5088 void
5089 lpfc_stop_port(struct lpfc_hba *phba)
5090 {
5091     phba->lpfc_stop_port(phba);
5092 
5093     if (phba->wq)
5094         flush_workqueue(phba->wq);
5095 }
5096 
5097 /**
5098  * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
5099  * @phba: Pointer to hba for which this call is being executed.
5100  *
5101  * This routine starts the timer waiting for the FCF rediscovery to complete.
5102  **/
5103 void
5104 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
5105 {
5106     unsigned long fcf_redisc_wait_tmo =
5107         (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
5108     /* Start fcf rediscovery wait period timer */
5109     mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
5110     spin_lock_irq(&phba->hbalock);
5111     /* Allow action to new fcf asynchronous event */
5112     phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
5113     /* Mark the FCF rediscovery pending state */
5114     phba->fcf.fcf_flag |= FCF_REDISC_PEND;
5115     spin_unlock_irq(&phba->hbalock);
5116 }
5117 
5118 /**
5119  * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
5120  * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5121  *
5122  * This routine is invoked when waiting for FCF table rediscover has been
5123  * timed out. If new FCF record(s) has (have) been discovered during the
5124  * wait period, a new FCF event shall be added to the FCOE async event
5125  * list, and then worker thread shall be waked up for processing from the
5126  * worker thread context.
5127  **/
5128 static void
5129 lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
5130 {
5131     struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
5132 
5133     /* Don't send FCF rediscovery event if timer cancelled */
5134     spin_lock_irq(&phba->hbalock);
5135     if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
5136         spin_unlock_irq(&phba->hbalock);
5137         return;
5138     }
5139     /* Clear FCF rediscovery timer pending flag */
5140     phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
5141     /* FCF rediscovery event to worker thread */
5142     phba->fcf.fcf_flag |= FCF_REDISC_EVT;
5143     spin_unlock_irq(&phba->hbalock);
5144     lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
5145             "2776 FCF rediscover quiescent timer expired\n");
5146     /* wake up worker thread */
5147     lpfc_worker_wake_up(phba);
5148 }
5149 
5150 /**
5151  * lpfc_vmid_poll - VMID timeout detection
5152  * @t: Timer context used to obtain the pointer to lpfc hba data structure.
5153  *
5154  * This routine is invoked when there is no I/O on by a VM for the specified
5155  * amount of time. When this situation is detected, the VMID has to be
5156  * deregistered from the switch and all the local resources freed. The VMID
5157  * will be reassigned to the VM once the I/O begins.
5158  **/
5159 static void
5160 lpfc_vmid_poll(struct timer_list *t)
5161 {
5162     struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
5163     u32 wake_up = 0;
5164 
5165     /* check if there is a need to issue QFPA */
5166     if (phba->pport->vmid_priority_tagging) {
5167         wake_up = 1;
5168         phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5169     }
5170 
5171     /* Is the vmid inactivity timer enabled */
5172     if (phba->pport->vmid_inactivity_timeout ||
5173         phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
5174         wake_up = 1;
5175         phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5176     }
5177 
5178     if (wake_up)
5179         lpfc_worker_wake_up(phba);
5180 
5181     /* restart the timer for the next iteration */
5182     mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5183                             LPFC_VMID_TIMER));
5184 }
5185 
5186 /**
5187  * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
5188  * @phba: pointer to lpfc hba data structure.
5189  * @acqe_link: pointer to the async link completion queue entry.
5190  *
5191  * This routine is to parse the SLI4 link-attention link fault code.
5192  **/
5193 static void
5194 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5195                struct lpfc_acqe_link *acqe_link)
5196 {
5197     switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5198     case LPFC_ASYNC_LINK_FAULT_NONE:
5199     case LPFC_ASYNC_LINK_FAULT_LOCAL:
5200     case LPFC_ASYNC_LINK_FAULT_REMOTE:
5201     case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5202         break;
5203     default:
5204         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5205                 "0398 Unknown link fault code: x%x\n",
5206                 bf_get(lpfc_acqe_link_fault, acqe_link));
5207         break;
5208     }
5209 }
5210 
5211 /**
5212  * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
5213  * @phba: pointer to lpfc hba data structure.
5214  * @acqe_link: pointer to the async link completion queue entry.
5215  *
5216  * This routine is to parse the SLI4 link attention type and translate it
5217  * into the base driver's link attention type coding.
5218  *
5219  * Return: Link attention type in terms of base driver's coding.
5220  **/
5221 static uint8_t
5222 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5223               struct lpfc_acqe_link *acqe_link)
5224 {
5225     uint8_t att_type;
5226 
5227     switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5228     case LPFC_ASYNC_LINK_STATUS_DOWN:
5229     case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5230         att_type = LPFC_ATT_LINK_DOWN;
5231         break;
5232     case LPFC_ASYNC_LINK_STATUS_UP:
5233         /* Ignore physical link up events - wait for logical link up */
5234         att_type = LPFC_ATT_RESERVED;
5235         break;
5236     case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5237         att_type = LPFC_ATT_LINK_UP;
5238         break;
5239     default:
5240         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5241                 "0399 Invalid link attention type: x%x\n",
5242                 bf_get(lpfc_acqe_link_status, acqe_link));
5243         att_type = LPFC_ATT_RESERVED;
5244         break;
5245     }
5246     return att_type;
5247 }
5248 
5249 /**
5250  * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
5251  * @phba: pointer to lpfc hba data structure.
5252  *
5253  * This routine is to get an SLI3 FC port's link speed in Mbps.
5254  *
5255  * Return: link speed in terms of Mbps.
5256  **/
5257 uint32_t
5258 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5259 {
5260     uint32_t link_speed;
5261 
5262     if (!lpfc_is_link_up(phba))
5263         return 0;
5264 
5265     if (phba->sli_rev <= LPFC_SLI_REV3) {
5266         switch (phba->fc_linkspeed) {
5267         case LPFC_LINK_SPEED_1GHZ:
5268             link_speed = 1000;
5269             break;
5270         case LPFC_LINK_SPEED_2GHZ:
5271             link_speed = 2000;
5272             break;
5273         case LPFC_LINK_SPEED_4GHZ:
5274             link_speed = 4000;
5275             break;
5276         case LPFC_LINK_SPEED_8GHZ:
5277             link_speed = 8000;
5278             break;
5279         case LPFC_LINK_SPEED_10GHZ:
5280             link_speed = 10000;
5281             break;
5282         case LPFC_LINK_SPEED_16GHZ:
5283             link_speed = 16000;
5284             break;
5285         default:
5286             link_speed = 0;
5287         }
5288     } else {
5289         if (phba->sli4_hba.link_state.logical_speed)
5290             link_speed =
5291                   phba->sli4_hba.link_state.logical_speed;
5292         else
5293             link_speed = phba->sli4_hba.link_state.speed;
5294     }
5295     return link_speed;
5296 }
5297 
5298 /**
5299  * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
5300  * @phba: pointer to lpfc hba data structure.
5301  * @evt_code: asynchronous event code.
5302  * @speed_code: asynchronous event link speed code.
5303  *
5304  * This routine is to parse the giving SLI4 async event link speed code into
5305  * value of Mbps for the link speed.
5306  *
5307  * Return: link speed in terms of Mbps.
5308  **/
5309 static uint32_t
5310 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5311                uint8_t speed_code)
5312 {
5313     uint32_t port_speed;
5314 
5315     switch (evt_code) {
5316     case LPFC_TRAILER_CODE_LINK:
5317         switch (speed_code) {
5318         case LPFC_ASYNC_LINK_SPEED_ZERO:
5319             port_speed = 0;
5320             break;
5321         case LPFC_ASYNC_LINK_SPEED_10MBPS:
5322             port_speed = 10;
5323             break;
5324         case LPFC_ASYNC_LINK_SPEED_100MBPS:
5325             port_speed = 100;
5326             break;
5327         case LPFC_ASYNC_LINK_SPEED_1GBPS:
5328             port_speed = 1000;
5329             break;
5330         case LPFC_ASYNC_LINK_SPEED_10GBPS:
5331             port_speed = 10000;
5332             break;
5333         case LPFC_ASYNC_LINK_SPEED_20GBPS:
5334             port_speed = 20000;
5335             break;
5336         case LPFC_ASYNC_LINK_SPEED_25GBPS:
5337             port_speed = 25000;
5338             break;
5339         case LPFC_ASYNC_LINK_SPEED_40GBPS:
5340             port_speed = 40000;
5341             break;
5342         case LPFC_ASYNC_LINK_SPEED_100GBPS:
5343             port_speed = 100000;
5344             break;
5345         default:
5346             port_speed = 0;
5347         }
5348         break;
5349     case LPFC_TRAILER_CODE_FC:
5350         switch (speed_code) {
5351         case LPFC_FC_LA_SPEED_UNKNOWN:
5352             port_speed = 0;
5353             break;
5354         case LPFC_FC_LA_SPEED_1G:
5355             port_speed = 1000;
5356             break;
5357         case LPFC_FC_LA_SPEED_2G:
5358             port_speed = 2000;
5359             break;
5360         case LPFC_FC_LA_SPEED_4G:
5361             port_speed = 4000;
5362             break;
5363         case LPFC_FC_LA_SPEED_8G:
5364             port_speed = 8000;
5365             break;
5366         case LPFC_FC_LA_SPEED_10G:
5367             port_speed = 10000;
5368             break;
5369         case LPFC_FC_LA_SPEED_16G:
5370             port_speed = 16000;
5371             break;
5372         case LPFC_FC_LA_SPEED_32G:
5373             port_speed = 32000;
5374             break;
5375         case LPFC_FC_LA_SPEED_64G:
5376             port_speed = 64000;
5377             break;
5378         case LPFC_FC_LA_SPEED_128G:
5379             port_speed = 128000;
5380             break;
5381         case LPFC_FC_LA_SPEED_256G:
5382             port_speed = 256000;
5383             break;
5384         default:
5385             port_speed = 0;
5386         }
5387         break;
5388     default:
5389         port_speed = 0;
5390     }
5391     return port_speed;
5392 }
5393 
5394 /**
5395  * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
5396  * @phba: pointer to lpfc hba data structure.
5397  * @acqe_link: pointer to the async link completion queue entry.
5398  *
5399  * This routine is to handle the SLI4 asynchronous FCoE link event.
5400  **/
5401 static void
5402 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5403              struct lpfc_acqe_link *acqe_link)
5404 {
5405     LPFC_MBOXQ_t *pmb;
5406     MAILBOX_t *mb;
5407     struct lpfc_mbx_read_top *la;
5408     uint8_t att_type;
5409     int rc;
5410 
5411     att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5412     if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5413         return;
5414     phba->fcoe_eventtag = acqe_link->event_tag;
5415     pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5416     if (!pmb) {
5417         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5418                 "0395 The mboxq allocation failed\n");
5419         return;
5420     }
5421 
5422     rc = lpfc_mbox_rsrc_prep(phba, pmb);
5423     if (rc) {
5424         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5425                 "0396 mailbox allocation failed\n");
5426         goto out_free_pmb;
5427     }
5428 
5429     /* Cleanup any outstanding ELS commands */
5430     lpfc_els_flush_all_cmd(phba);
5431 
5432     /* Block ELS IOCBs until we have done process link event */
5433     phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5434 
5435     /* Update link event statistics */
5436     phba->sli.slistat.link_event++;
5437 
5438     /* Create lpfc_handle_latt mailbox command from link ACQE */
5439     lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
5440     pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5441     pmb->vport = phba->pport;
5442 
5443     /* Keep the link status for extra SLI4 state machine reference */
5444     phba->sli4_hba.link_state.speed =
5445             lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5446                 bf_get(lpfc_acqe_link_speed, acqe_link));
5447     phba->sli4_hba.link_state.duplex =
5448                 bf_get(lpfc_acqe_link_duplex, acqe_link);
5449     phba->sli4_hba.link_state.status =
5450                 bf_get(lpfc_acqe_link_status, acqe_link);
5451     phba->sli4_hba.link_state.type =
5452                 bf_get(lpfc_acqe_link_type, acqe_link);
5453     phba->sli4_hba.link_state.number =
5454                 bf_get(lpfc_acqe_link_number, acqe_link);
5455     phba->sli4_hba.link_state.fault =
5456                 bf_get(lpfc_acqe_link_fault, acqe_link);
5457     phba->sli4_hba.link_state.logical_speed =
5458             bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5459 
5460     lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5461             "2900 Async FC/FCoE Link event - Speed:%dGBit "
5462             "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5463             "Logical speed:%dMbps Fault:%d\n",
5464             phba->sli4_hba.link_state.speed,
5465             phba->sli4_hba.link_state.topology,
5466             phba->sli4_hba.link_state.status,
5467             phba->sli4_hba.link_state.type,
5468             phba->sli4_hba.link_state.number,
5469             phba->sli4_hba.link_state.logical_speed,
5470             phba->sli4_hba.link_state.fault);
5471     /*
5472      * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
5473      * topology info. Note: Optional for non FC-AL ports.
5474      */
5475     if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5476         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5477         if (rc == MBX_NOT_FINISHED)
5478             goto out_free_pmb;
5479         return;
5480     }
5481     /*
5482      * For FCoE Mode: fill in all the topology information we need and call
5483      * the READ_TOPOLOGY completion routine to continue without actually
5484      * sending the READ_TOPOLOGY mailbox command to the port.
5485      */
5486     /* Initialize completion status */
5487     mb = &pmb->u.mb;
5488     mb->mbxStatus = MBX_SUCCESS;
5489 
5490     /* Parse port fault information field */
5491     lpfc_sli4_parse_latt_fault(phba, acqe_link);
5492 
5493     /* Parse and translate link attention fields */
5494     la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5495     la->eventTag = acqe_link->event_tag;
5496     bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5497     bf_set(lpfc_mbx_read_top_link_spd, la,
5498            (bf_get(lpfc_acqe_link_speed, acqe_link)));
5499 
5500     /* Fake the the following irrelvant fields */
5501     bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5502     bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5503     bf_set(lpfc_mbx_read_top_il, la, 0);
5504     bf_set(lpfc_mbx_read_top_pb, la, 0);
5505     bf_set(lpfc_mbx_read_top_fa, la, 0);
5506     bf_set(lpfc_mbx_read_top_mm, la, 0);
5507 
5508     /* Invoke the lpfc_handle_latt mailbox command callback function */
5509     lpfc_mbx_cmpl_read_topology(phba, pmb);
5510 
5511     return;
5512 
5513 out_free_pmb:
5514     lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
5515 }
5516 
5517 /**
5518  * lpfc_async_link_speed_to_read_top - Parse async evt link speed code to read
5519  * topology.
5520  * @phba: pointer to lpfc hba data structure.
5521  * @speed_code: asynchronous event link speed code.
5522  *
5523  * This routine is to parse the giving SLI4 async event link speed code into
5524  * value of Read topology link speed.
5525  *
5526  * Return: link speed in terms of Read topology.
5527  **/
5528 static uint8_t
5529 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5530 {
5531     uint8_t port_speed;
5532 
5533     switch (speed_code) {
5534     case LPFC_FC_LA_SPEED_1G:
5535         port_speed = LPFC_LINK_SPEED_1GHZ;
5536         break;
5537     case LPFC_FC_LA_SPEED_2G:
5538         port_speed = LPFC_LINK_SPEED_2GHZ;
5539         break;
5540     case LPFC_FC_LA_SPEED_4G:
5541         port_speed = LPFC_LINK_SPEED_4GHZ;
5542         break;
5543     case LPFC_FC_LA_SPEED_8G:
5544         port_speed = LPFC_LINK_SPEED_8GHZ;
5545         break;
5546     case LPFC_FC_LA_SPEED_16G:
5547         port_speed = LPFC_LINK_SPEED_16GHZ;
5548         break;
5549     case LPFC_FC_LA_SPEED_32G:
5550         port_speed = LPFC_LINK_SPEED_32GHZ;
5551         break;
5552     case LPFC_FC_LA_SPEED_64G:
5553         port_speed = LPFC_LINK_SPEED_64GHZ;
5554         break;
5555     case LPFC_FC_LA_SPEED_128G:
5556         port_speed = LPFC_LINK_SPEED_128GHZ;
5557         break;
5558     case LPFC_FC_LA_SPEED_256G:
5559         port_speed = LPFC_LINK_SPEED_256GHZ;
5560         break;
5561     default:
5562         port_speed = 0;
5563         break;
5564     }
5565 
5566     return port_speed;
5567 }
5568 
5569 void
5570 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5571 {
5572     struct rxtable_entry *entry;
5573     int cnt = 0, head, tail, last, start;
5574 
5575     head = atomic_read(&phba->rxtable_idx_head);
5576     tail = atomic_read(&phba->rxtable_idx_tail);
5577     if (!phba->rxtable || head == tail) {
5578         lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
5579                 "4411 Rxtable is empty\n");
5580         return;
5581     }
5582     last = tail;
5583     start = head;
5584 
5585     /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
5586     while (start != last) {
5587         if (start)
5588             start--;
5589         else
5590             start = LPFC_MAX_RXMONITOR_ENTRY - 1;
5591         entry = &phba->rxtable[start];
5592         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5593                 "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
5594                 "Lat %lld ASz %lld Info %02d BWUtil %d "
5595                 "Int %d slot %d\n",
5596                 cnt, entry->max_bytes_per_interval,
5597                 entry->total_bytes, entry->rcv_bytes,
5598                 entry->avg_io_latency, entry->avg_io_size,
5599                 entry->cmf_info, entry->timer_utilization,
5600                 entry->timer_interval, start);
5601         cnt++;
5602         if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
5603             return;
5604     }
5605 }
5606 
5607 /**
5608  * lpfc_cgn_update_stat - Save data into congestion stats buffer
5609  * @phba: pointer to lpfc hba data structure.
5610  * @dtag: FPIN descriptor received
5611  *
5612  * Increment the FPIN received counter/time when it happens.
5613  */
5614 void
5615 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5616 {
5617     struct lpfc_cgn_info *cp;
5618     struct tm broken;
5619     struct timespec64 cur_time;
5620     u32 cnt;
5621     u32 value;
5622 
5623     /* Make sure we have a congestion info buffer */
5624     if (!phba->cgn_i)
5625         return;
5626     cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5627     ktime_get_real_ts64(&cur_time);
5628     time64_to_tm(cur_time.tv_sec, 0, &broken);
5629 
5630     /* Update congestion statistics */
5631     switch (dtag) {
5632     case ELS_DTAG_LNK_INTEGRITY:
5633         cnt = le32_to_cpu(cp->link_integ_notification);
5634         cnt++;
5635         cp->link_integ_notification = cpu_to_le32(cnt);
5636 
5637         cp->cgn_stat_lnk_month = broken.tm_mon + 1;
5638         cp->cgn_stat_lnk_day = broken.tm_mday;
5639         cp->cgn_stat_lnk_year = broken.tm_year - 100;
5640         cp->cgn_stat_lnk_hour = broken.tm_hour;
5641         cp->cgn_stat_lnk_min = broken.tm_min;
5642         cp->cgn_stat_lnk_sec = broken.tm_sec;
5643         break;
5644     case ELS_DTAG_DELIVERY:
5645         cnt = le32_to_cpu(cp->delivery_notification);
5646         cnt++;
5647         cp->delivery_notification = cpu_to_le32(cnt);
5648 
5649         cp->cgn_stat_del_month = broken.tm_mon + 1;
5650         cp->cgn_stat_del_day = broken.tm_mday;
5651         cp->cgn_stat_del_year = broken.tm_year - 100;
5652         cp->cgn_stat_del_hour = broken.tm_hour;
5653         cp->cgn_stat_del_min = broken.tm_min;
5654         cp->cgn_stat_del_sec = broken.tm_sec;
5655         break;
5656     case ELS_DTAG_PEER_CONGEST:
5657         cnt = le32_to_cpu(cp->cgn_peer_notification);
5658         cnt++;
5659         cp->cgn_peer_notification = cpu_to_le32(cnt);
5660 
5661         cp->cgn_stat_peer_month = broken.tm_mon + 1;
5662         cp->cgn_stat_peer_day = broken.tm_mday;
5663         cp->cgn_stat_peer_year = broken.tm_year - 100;
5664         cp->cgn_stat_peer_hour = broken.tm_hour;
5665         cp->cgn_stat_peer_min = broken.tm_min;
5666         cp->cgn_stat_peer_sec = broken.tm_sec;
5667         break;
5668     case ELS_DTAG_CONGESTION:
5669         cnt = le32_to_cpu(cp->cgn_notification);
5670         cnt++;
5671         cp->cgn_notification = cpu_to_le32(cnt);
5672 
5673         cp->cgn_stat_cgn_month = broken.tm_mon + 1;
5674         cp->cgn_stat_cgn_day = broken.tm_mday;
5675         cp->cgn_stat_cgn_year = broken.tm_year - 100;
5676         cp->cgn_stat_cgn_hour = broken.tm_hour;
5677         cp->cgn_stat_cgn_min = broken.tm_min;
5678         cp->cgn_stat_cgn_sec = broken.tm_sec;
5679     }
5680     if (phba->cgn_fpin_frequency &&
5681         phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5682         value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5683         cp->cgn_stat_npm = value;
5684     }
5685     value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5686                     LPFC_CGN_CRC32_SEED);
5687     cp->cgn_info_crc = cpu_to_le32(value);
5688 }
5689 
5690 /**
5691  * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer
5692  * @phba: pointer to lpfc hba data structure.
5693  *
5694  * Save the congestion event data every minute.
5695  * On the hour collapse all the minute data into hour data. Every day
5696  * collapse all the hour data into daily data. Separate driver
5697  * and fabrc congestion event counters that will be saved out
5698  * to the registered congestion buffer every minute.
5699  */
5700 static void
5701 lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
5702 {
5703     struct lpfc_cgn_info *cp;
5704     struct tm broken;
5705     struct timespec64 cur_time;
5706     uint32_t i, index;
5707     uint16_t value, mvalue;
5708     uint64_t bps;
5709     uint32_t mbps;
5710     uint32_t dvalue, wvalue, lvalue, avalue;
5711     uint64_t latsum;
5712     __le16 *ptr;
5713     __le32 *lptr;
5714     __le16 *mptr;
5715 
5716     /* Make sure we have a congestion info buffer */
5717     if (!phba->cgn_i)
5718         return;
5719     cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5720 
5721     if (time_before(jiffies, phba->cgn_evt_timestamp))
5722         return;
5723     phba->cgn_evt_timestamp = jiffies +
5724             msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5725     phba->cgn_evt_minute++;
5726 
5727     /* We should get to this point in the routine on 1 minute intervals */
5728 
5729     ktime_get_real_ts64(&cur_time);
5730     time64_to_tm(cur_time.tv_sec, 0, &broken);
5731 
5732     if (phba->cgn_fpin_frequency &&
5733         phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5734         value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5735         cp->cgn_stat_npm = value;
5736     }
5737 
5738     /* Read and clear the latency counters for this minute */
5739     lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5740     latsum = atomic64_read(&phba->cgn_latency_evt);
5741     atomic_set(&phba->cgn_latency_evt_cnt, 0);
5742     atomic64_set(&phba->cgn_latency_evt, 0);
5743 
5744     /* We need to store MB/sec bandwidth in the congestion information.
5745      * block_cnt is count of 512 byte blocks for the entire minute,
5746      * bps will get bytes per sec before finally converting to MB/sec.
5747      */
5748     bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5749     phba->rx_block_cnt = 0;
5750     mvalue = bps / (1024 * 1024); /* convert to MB/sec */
5751 
5752     /* Every minute */
5753     /* cgn parameters */
5754     cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5755     cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5756     cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5757     cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5758 
5759     /* Fill in default LUN qdepth */
5760     value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5761     cp->cgn_lunq = cpu_to_le16(value);
5762 
5763     /* Record congestion buffer info - every minute
5764      * cgn_driver_evt_cnt (Driver events)
5765      * cgn_fabric_warn_cnt (Congestion Warnings)
5766      * cgn_latency_evt_cnt / cgn_latency_evt (IO Latency)
5767      * cgn_fabric_alarm_cnt (Congestion Alarms)
5768      */
5769     index = ++cp->cgn_index_minute;
5770     if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5771         cp->cgn_index_minute = 0;
5772         index = 0;
5773     }
5774 
5775     /* Get the number of driver events in this sample and reset counter */
5776     dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5777     atomic_set(&phba->cgn_driver_evt_cnt, 0);
5778 
5779     /* Get the number of warning events - FPIN and Signal for this minute */
5780     wvalue = 0;
5781     if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5782         phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5783         phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5784         wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5785     atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5786 
5787     /* Get the number of alarm events - FPIN and Signal for this minute */
5788     avalue = 0;
5789     if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5790         phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5791         avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5792     atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5793 
5794     /* Collect the driver, warning, alarm and latency counts for this
5795      * minute into the driver congestion buffer.
5796      */
5797     ptr = &cp->cgn_drvr_min[index];
5798     value = (uint16_t)dvalue;
5799     *ptr = cpu_to_le16(value);
5800 
5801     ptr = &cp->cgn_warn_min[index];
5802     value = (uint16_t)wvalue;
5803     *ptr = cpu_to_le16(value);
5804 
5805     ptr = &cp->cgn_alarm_min[index];
5806     value = (uint16_t)avalue;
5807     *ptr = cpu_to_le16(value);
5808 
5809     lptr = &cp->cgn_latency_min[index];
5810     if (lvalue) {
5811         lvalue = (uint32_t)div_u64(latsum, lvalue);
5812         *lptr = cpu_to_le32(lvalue);
5813     } else {
5814         *lptr = 0;
5815     }
5816 
5817     /* Collect the bandwidth value into the driver's congesion buffer. */
5818     mptr = &cp->cgn_bw_min[index];
5819     *mptr = cpu_to_le16(mvalue);
5820 
5821     lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5822             "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5823             index, dvalue, wvalue, *lptr, mvalue, avalue);
5824 
5825     /* Every hour */
5826     if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5827         /* Record congestion buffer info - every hour
5828          * Collapse all minutes into an hour
5829          */
5830         index = ++cp->cgn_index_hour;
5831         if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5832             cp->cgn_index_hour = 0;
5833             index = 0;
5834         }
5835 
5836         dvalue = 0;
5837         wvalue = 0;
5838         lvalue = 0;
5839         avalue = 0;
5840         mvalue = 0;
5841         mbps = 0;
5842         for (i = 0; i < LPFC_MIN_HOUR; i++) {
5843             dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5844             wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5845             lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5846             mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5847             avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5848         }
5849         if (lvalue)     /* Avg of latency averages */
5850             lvalue /= LPFC_MIN_HOUR;
5851         if (mbps)       /* Avg of Bandwidth averages */
5852             mvalue = mbps / LPFC_MIN_HOUR;
5853 
5854         lptr = &cp->cgn_drvr_hr[index];
5855         *lptr = cpu_to_le32(dvalue);
5856         lptr = &cp->cgn_warn_hr[index];
5857         *lptr = cpu_to_le32(wvalue);
5858         lptr = &cp->cgn_latency_hr[index];
5859         *lptr = cpu_to_le32(lvalue);
5860         mptr = &cp->cgn_bw_hr[index];
5861         *mptr = cpu_to_le16(mvalue);
5862         lptr = &cp->cgn_alarm_hr[index];
5863         *lptr = cpu_to_le32(avalue);
5864 
5865         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5866                 "2419 Congestion Info - hour "
5867                 "(%d): %d %d %d %d %d\n",
5868                 index, dvalue, wvalue, lvalue, mvalue, avalue);
5869     }
5870 
5871     /* Every day */
5872     if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5873         /* Record congestion buffer info - every hour
5874          * Collapse all hours into a day. Rotate days
5875          * after LPFC_MAX_CGN_DAYS.
5876          */
5877         index = ++cp->cgn_index_day;
5878         if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5879             cp->cgn_index_day = 0;
5880             index = 0;
5881         }
5882 
5883         /* Anytime we overwrite daily index 0, after we wrap,
5884          * we will be overwriting the oldest day, so we must
5885          * update the congestion data start time for that day.
5886          * That start time should have previously been saved after
5887          * we wrote the last days worth of data.
5888          */
5889         if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) {
5890             time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken);
5891 
5892             cp->cgn_info_month = broken.tm_mon + 1;
5893             cp->cgn_info_day = broken.tm_mday;
5894             cp->cgn_info_year = broken.tm_year - 100;
5895             cp->cgn_info_hour = broken.tm_hour;
5896             cp->cgn_info_minute = broken.tm_min;
5897             cp->cgn_info_second = broken.tm_sec;
5898 
5899             lpfc_printf_log
5900                 (phba, KERN_INFO, LOG_CGN_MGMT,
5901                 "2646 CGNInfo idx0 Start Time: "
5902                 "%d/%d/%d %d:%d:%d\n",
5903                 cp->cgn_info_day, cp->cgn_info_month,
5904                 cp->cgn_info_year, cp->cgn_info_hour,
5905                 cp->cgn_info_minute, cp->cgn_info_second);
5906         }
5907 
5908         dvalue = 0;
5909         wvalue = 0;
5910         lvalue = 0;
5911         mvalue = 0;
5912         mbps = 0;
5913         avalue = 0;
5914         for (i = 0; i < LPFC_HOUR_DAY; i++) {
5915             dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5916             wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5917             lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5918             mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5919             avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5920         }
5921         if (lvalue)     /* Avg of latency averages */
5922             lvalue /= LPFC_HOUR_DAY;
5923         if (mbps)       /* Avg of Bandwidth averages */
5924             mvalue = mbps / LPFC_HOUR_DAY;
5925 
5926         lptr = &cp->cgn_drvr_day[index];
5927         *lptr = cpu_to_le32(dvalue);
5928         lptr = &cp->cgn_warn_day[index];
5929         *lptr = cpu_to_le32(wvalue);
5930         lptr = &cp->cgn_latency_day[index];
5931         *lptr = cpu_to_le32(lvalue);
5932         mptr = &cp->cgn_bw_day[index];
5933         *mptr = cpu_to_le16(mvalue);
5934         lptr = &cp->cgn_alarm_day[index];
5935         *lptr = cpu_to_le32(avalue);
5936 
5937         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5938                 "2420 Congestion Info - daily (%d): "
5939                 "%d %d %d %d %d\n",
5940                 index, dvalue, wvalue, lvalue, mvalue, avalue);
5941 
5942         /* We just wrote LPFC_MAX_CGN_DAYS of data,
5943          * so we are wrapped on any data after this.
5944          * Save this as the start time for the next day.
5945          */
5946         if (index == (LPFC_MAX_CGN_DAYS - 1)) {
5947             phba->hba_flag |= HBA_CGN_DAY_WRAP;
5948             ktime_get_real_ts64(&phba->cgn_daily_ts);
5949         }
5950     }
5951 
5952     /* Use the frequency found in the last rcv'ed FPIN */
5953     value = phba->cgn_fpin_frequency;
5954     cp->cgn_warn_freq = cpu_to_le16(value);
5955     cp->cgn_alarm_freq = cpu_to_le16(value);
5956 
5957     lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5958                      LPFC_CGN_CRC32_SEED);
5959     cp->cgn_info_crc = cpu_to_le32(lvalue);
5960 }
5961 
5962 /**
5963  * lpfc_calc_cmf_latency - latency from start of rxate timer interval
5964  * @phba: The Hba for which this call is being executed.
5965  *
5966  * The routine calculates the latency from the beginning of the CMF timer
5967  * interval to the current point in time. It is called from IO completion
5968  * when we exceed our Bandwidth limitation for the time interval.
5969  */
5970 uint32_t
5971 lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5972 {
5973     struct timespec64 cmpl_time;
5974     uint32_t msec = 0;
5975 
5976     ktime_get_real_ts64(&cmpl_time);
5977 
5978     /* This routine works on a ms granularity so sec and usec are
5979      * converted accordingly.
5980      */
5981     if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5982         msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5983             NSEC_PER_MSEC;
5984     } else {
5985         if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5986             msec = (cmpl_time.tv_sec -
5987                 phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5988             msec += ((cmpl_time.tv_nsec -
5989                   phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5990         } else {
5991             msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5992                 1) * MSEC_PER_SEC;
5993             msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5994                  cmpl_time.tv_nsec) / NSEC_PER_MSEC);
5995         }
5996     }
5997     return msec;
5998 }
5999 
6000 /**
6001  * lpfc_cmf_timer -  This is the timer function for one congestion
6002  * rate interval.
6003  * @timer: Pointer to the high resolution timer that expired
6004  */
6005 static enum hrtimer_restart
6006 lpfc_cmf_timer(struct hrtimer *timer)
6007 {
6008     struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
6009                          cmf_timer);
6010     struct rxtable_entry *entry;
6011     uint32_t io_cnt;
6012     uint32_t head, tail;
6013     uint32_t busy, max_read;
6014     uint64_t total, rcv, lat, mbpi, extra, cnt;
6015     int timer_interval = LPFC_CMF_INTERVAL;
6016     uint32_t ms;
6017     struct lpfc_cgn_stat *cgs;
6018     int cpu;
6019 
6020     /* Only restart the timer if congestion mgmt is on */
6021     if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6022         !phba->cmf_latency.tv_sec) {
6023         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
6024                 "6224 CMF timer exit: %d %lld\n",
6025                 phba->cmf_active_mode,
6026                 (uint64_t)phba->cmf_latency.tv_sec);
6027         return HRTIMER_NORESTART;
6028     }
6029 
6030     /* If pport is not ready yet, just exit and wait for
6031      * the next timer cycle to hit.
6032      */
6033     if (!phba->pport)
6034         goto skip;
6035 
6036     /* Do not block SCSI IO while in the timer routine since
6037      * total_bytes will be cleared
6038      */
6039     atomic_set(&phba->cmf_stop_io, 1);
6040 
6041     /* First we need to calculate the actual ms between
6042      * the last timer interrupt and this one. We ask for
6043      * LPFC_CMF_INTERVAL, however the actual time may
6044      * vary depending on system overhead.
6045      */
6046     ms = lpfc_calc_cmf_latency(phba);
6047 
6048 
6049     /* Immediately after we calculate the time since the last
6050      * timer interrupt, set the start time for the next
6051      * interrupt
6052      */
6053     ktime_get_real_ts64(&phba->cmf_latency);
6054 
6055     phba->cmf_link_byte_count =
6056         div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
6057 
6058     /* Collect all the stats from the prior timer interval */
6059     total = 0;
6060     io_cnt = 0;
6061     lat = 0;
6062     rcv = 0;
6063     for_each_present_cpu(cpu) {
6064         cgs = per_cpu_ptr(phba->cmf_stat, cpu);
6065         total += atomic64_xchg(&cgs->total_bytes, 0);
6066         io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
6067         lat += atomic64_xchg(&cgs->rx_latency, 0);
6068         rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
6069     }
6070 
6071     /* Before we issue another CMF_SYNC_WQE, retrieve the BW
6072      * returned from the last CMF_SYNC_WQE issued, from
6073      * cmf_last_sync_bw. This will be the target BW for
6074      * this next timer interval.
6075      */
6076     if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
6077         phba->link_state != LPFC_LINK_DOWN &&
6078         phba->hba_flag & HBA_SETUP) {
6079         mbpi = phba->cmf_last_sync_bw;
6080         phba->cmf_last_sync_bw = 0;
6081         extra = 0;
6082 
6083         /* Calculate any extra bytes needed to account for the
6084          * timer accuracy. If we are less than LPFC_CMF_INTERVAL
6085          * calculate the adjustment needed for total to reflect
6086          * a full LPFC_CMF_INTERVAL.
6087          */
6088         if (ms && ms < LPFC_CMF_INTERVAL) {
6089             cnt = div_u64(total, ms); /* bytes per ms */
6090             cnt *= LPFC_CMF_INTERVAL; /* what total should be */
6091 
6092             /* If the timeout is scheduled to be shorter,
6093              * this value may skew the data, so cap it at mbpi.
6094              */
6095             if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi)
6096                 cnt = mbpi;
6097 
6098             extra = cnt - total;
6099         }
6100         lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
6101     } else {
6102         /* For Monitor mode or link down we want mbpi
6103          * to be the full link speed
6104          */
6105         mbpi = phba->cmf_link_byte_count;
6106         extra = 0;
6107     }
6108     phba->cmf_timer_cnt++;
6109 
6110     if (io_cnt) {
6111         /* Update congestion info buffer latency in us */
6112         atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
6113         atomic64_add(lat, &phba->cgn_latency_evt);
6114     }
6115     busy = atomic_xchg(&phba->cmf_busy, 0);
6116     max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
6117 
6118     /* Calculate MBPI for the next timer interval */
6119     if (mbpi) {
6120         if (mbpi > phba->cmf_link_byte_count ||
6121             phba->cmf_active_mode == LPFC_CFG_MONITOR)
6122             mbpi = phba->cmf_link_byte_count;
6123 
6124         /* Change max_bytes_per_interval to what the prior
6125          * CMF_SYNC_WQE cmpl indicated.
6126          */
6127         if (mbpi != phba->cmf_max_bytes_per_interval)
6128             phba->cmf_max_bytes_per_interval = mbpi;
6129     }
6130 
6131     /* Save rxmonitor information for debug */
6132     if (phba->rxtable) {
6133         head = atomic_xchg(&phba->rxtable_idx_head,
6134                    LPFC_RXMONITOR_TABLE_IN_USE);
6135         entry = &phba->rxtable[head];
6136         entry->total_bytes = total;
6137         entry->cmf_bytes = total + extra;
6138         entry->rcv_bytes = rcv;
6139         entry->cmf_busy = busy;
6140         entry->cmf_info = phba->cmf_active_info;
6141         if (io_cnt) {
6142             entry->avg_io_latency = div_u64(lat, io_cnt);
6143             entry->avg_io_size = div_u64(rcv, io_cnt);
6144         } else {
6145             entry->avg_io_latency = 0;
6146             entry->avg_io_size = 0;
6147         }
6148         entry->max_read_cnt = max_read;
6149         entry->io_cnt = io_cnt;
6150         entry->max_bytes_per_interval = mbpi;
6151         if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
6152             entry->timer_utilization = phba->cmf_last_ts;
6153         else
6154             entry->timer_utilization = ms;
6155         entry->timer_interval = ms;
6156         phba->cmf_last_ts = 0;
6157 
6158         /* Increment rxtable index */
6159         head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
6160         tail = atomic_read(&phba->rxtable_idx_tail);
6161         if (head == tail) {
6162             tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
6163             atomic_set(&phba->rxtable_idx_tail, tail);
6164         }
6165         atomic_set(&phba->rxtable_idx_head, head);
6166     }
6167 
6168     if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
6169         /* If Monitor mode, check if we are oversubscribed
6170          * against the full line rate.
6171          */
6172         if (mbpi && total > mbpi)
6173             atomic_inc(&phba->cgn_driver_evt_cnt);
6174     }
6175     phba->rx_block_cnt += div_u64(rcv, 512);  /* save 512 byte block cnt */
6176 
6177     /* Each minute save Fabric and Driver congestion information */
6178     lpfc_cgn_save_evt_cnt(phba);
6179 
6180     phba->hba_flag &= ~HBA_SHORT_CMF;
6181 
6182     /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
6183      * minute, adjust our next timer interval, if needed, to ensure a
6184      * 1 minute granularity when we get the next timer interrupt.
6185      */
6186     if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL),
6187                phba->cgn_evt_timestamp)) {
6188         timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp -
6189                           jiffies);
6190         if (timer_interval <= 0)
6191             timer_interval = LPFC_CMF_INTERVAL;
6192         else
6193             phba->hba_flag |= HBA_SHORT_CMF;
6194 
6195         /* If we adjust timer_interval, max_bytes_per_interval
6196          * needs to be adjusted as well.
6197          */
6198         phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
6199                             timer_interval, 1000);
6200         if (phba->cmf_active_mode == LPFC_CFG_MONITOR)
6201             phba->cmf_max_bytes_per_interval =
6202                 phba->cmf_link_byte_count;
6203     }
6204 
6205     /* Since total_bytes has already been zero'ed, its okay to unblock
6206      * after max_bytes_per_interval is setup.
6207      */
6208     if (atomic_xchg(&phba->cmf_bw_wait, 0))
6209         queue_work(phba->wq, &phba->unblock_request_work);
6210 
6211     /* SCSI IO is now unblocked */
6212     atomic_set(&phba->cmf_stop_io, 0);
6213 
6214 skip:
6215     hrtimer_forward_now(timer,
6216                 ktime_set(0, timer_interval * NSEC_PER_MSEC));
6217     return HRTIMER_RESTART;
6218 }
6219 
6220 #define trunk_link_status(__idx)\
6221     bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6222            ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6223         "Link up" : "Link down") : "NA"
6224 /* Did port __idx reported an error */
6225 #define trunk_port_fault(__idx)\
6226     bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6227            (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6228 
6229 static void
6230 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6231                   struct lpfc_acqe_fc_la *acqe_fc)
6232 {
6233     uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6234     uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6235 
6236     phba->sli4_hba.link_state.speed =
6237         lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6238                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6239 
6240     phba->sli4_hba.link_state.logical_speed =
6241                 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6242     /* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
6243     phba->fc_linkspeed =
6244          lpfc_async_link_speed_to_read_top(
6245                 phba,
6246                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6247 
6248     if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6249         phba->trunk_link.link0.state =
6250             bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6251             ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6252         phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6253     }
6254     if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6255         phba->trunk_link.link1.state =
6256             bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6257             ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6258         phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6259     }
6260     if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6261         phba->trunk_link.link2.state =
6262             bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6263             ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6264         phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6265     }
6266     if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6267         phba->trunk_link.link3.state =
6268             bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6269             ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6270         phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6271     }
6272 
6273     lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6274             "2910 Async FC Trunking Event - Speed:%d\n"
6275             "\tLogical speed:%d "
6276             "port0: %s port1: %s port2: %s port3: %s\n",
6277             phba->sli4_hba.link_state.speed,
6278             phba->sli4_hba.link_state.logical_speed,
6279             trunk_link_status(0), trunk_link_status(1),
6280             trunk_link_status(2), trunk_link_status(3));
6281 
6282     if (phba->cmf_active_mode != LPFC_CFG_OFF)
6283         lpfc_cmf_signal_init(phba);
6284 
6285     if (port_fault)
6286         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6287                 "3202 trunk error:0x%x (%s) seen on port0:%s "
6288                 /*
6289                  * SLI-4: We have only 0xA error codes
6290                  * defined as of now. print an appropriate
6291                  * message in case driver needs to be updated.
6292                  */
6293                 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6294                 "UNDEFINED. update driver." : trunk_errmsg[err],
6295                 trunk_port_fault(0), trunk_port_fault(1),
6296                 trunk_port_fault(2), trunk_port_fault(3));
6297 }
6298 
6299 
6300 /**
6301  * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
6302  * @phba: pointer to lpfc hba data structure.
6303  * @acqe_fc: pointer to the async fc completion queue entry.
6304  *
6305  * This routine is to handle the SLI4 asynchronous FC event. It will simply log
6306  * that the event was received and then issue a read_topology mailbox command so
6307  * that the rest of the driver will treat it the same as SLI3.
6308  **/
6309 static void
6310 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6311 {
6312     LPFC_MBOXQ_t *pmb;
6313     MAILBOX_t *mb;
6314     struct lpfc_mbx_read_top *la;
6315     int rc;
6316 
6317     if (bf_get(lpfc_trailer_type, acqe_fc) !=
6318         LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6319         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6320                 "2895 Non FC link Event detected.(%d)\n",
6321                 bf_get(lpfc_trailer_type, acqe_fc));
6322         return;
6323     }
6324 
6325     if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6326         LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6327         lpfc_update_trunk_link_status(phba, acqe_fc);
6328         return;
6329     }
6330 
6331     /* Keep the link status for extra SLI4 state machine reference */
6332     phba->sli4_hba.link_state.speed =
6333             lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6334                 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6335     phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6336     phba->sli4_hba.link_state.topology =
6337                 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6338     phba->sli4_hba.link_state.status =
6339                 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6340     phba->sli4_hba.link_state.type =
6341                 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6342     phba->sli4_hba.link_state.number =
6343                 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6344     phba->sli4_hba.link_state.fault =
6345                 bf_get(lpfc_acqe_link_fault, acqe_fc);
6346 
6347     if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6348         LPFC_FC_LA_TYPE_LINK_DOWN)
6349         phba->sli4_hba.link_state.logical_speed = 0;
6350     else if (!phba->sli4_hba.conf_trunk)
6351         phba->sli4_hba.link_state.logical_speed =
6352                 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6353 
6354     lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6355             "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6356             "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6357             "%dMbps Fault:%d\n",
6358             phba->sli4_hba.link_state.speed,
6359             phba->sli4_hba.link_state.topology,
6360             phba->sli4_hba.link_state.status,
6361             phba->sli4_hba.link_state.type,
6362             phba->sli4_hba.link_state.number,
6363             phba->sli4_hba.link_state.logical_speed,
6364             phba->sli4_hba.link_state.fault);
6365     pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6366     if (!pmb) {
6367         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6368                 "2897 The mboxq allocation failed\n");
6369         return;
6370     }
6371     rc = lpfc_mbox_rsrc_prep(phba, pmb);
6372     if (rc) {
6373         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6374                 "2898 The mboxq prep failed\n");
6375         goto out_free_pmb;
6376     }
6377 
6378     /* Cleanup any outstanding ELS commands */
6379     lpfc_els_flush_all_cmd(phba);
6380 
6381     /* Block ELS IOCBs until we have done process link event */
6382     phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6383 
6384     /* Update link event statistics */
6385     phba->sli.slistat.link_event++;
6386 
6387     /* Create lpfc_handle_latt mailbox command from link ACQE */
6388     lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
6389     pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6390     pmb->vport = phba->pport;
6391 
6392     if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6393         phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6394 
6395         switch (phba->sli4_hba.link_state.status) {
6396         case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6397             phba->link_flag |= LS_MDS_LINK_DOWN;
6398             break;
6399         case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6400             phba->link_flag |= LS_MDS_LOOPBACK;
6401             break;
6402         default:
6403             break;
6404         }
6405 
6406         /* Initialize completion status */
6407         mb = &pmb->u.mb;
6408         mb->mbxStatus = MBX_SUCCESS;
6409 
6410         /* Parse port fault information field */
6411         lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6412 
6413         /* Parse and translate link attention fields */
6414         la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6415         la->eventTag = acqe_fc->event_tag;
6416 
6417         if (phba->sli4_hba.link_state.status ==
6418             LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6419             bf_set(lpfc_mbx_read_top_att_type, la,
6420                    LPFC_FC_LA_TYPE_UNEXP_WWPN);
6421         } else {
6422             bf_set(lpfc_mbx_read_top_att_type, la,
6423                    LPFC_FC_LA_TYPE_LINK_DOWN);
6424         }
6425         /* Invoke the mailbox command callback function */
6426         lpfc_mbx_cmpl_read_topology(phba, pmb);
6427 
6428         return;
6429     }
6430 
6431     rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6432     if (rc == MBX_NOT_FINISHED)
6433         goto out_free_pmb;
6434     return;
6435 
6436 out_free_pmb:
6437     lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
6438 }
6439 
6440 /**
6441  * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
6442  * @phba: pointer to lpfc hba data structure.
6443  * @acqe_sli: pointer to the async SLI completion queue entry.
6444  *
6445  * This routine is to handle the SLI4 asynchronous SLI events.
6446  **/
6447 static void
6448 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6449 {
6450     char port_name;
6451     char message[128];
6452     uint8_t status;
6453     uint8_t evt_type;
6454     uint8_t operational = 0;
6455     struct temp_event temp_event_data;
6456     struct lpfc_acqe_misconfigured_event *misconfigured;
6457     struct lpfc_acqe_cgn_signal *cgn_signal;
6458     struct Scsi_Host  *shost;
6459     struct lpfc_vport **vports;
6460     int rc, i, cnt;
6461 
6462     evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6463 
6464     lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6465             "2901 Async SLI event - Type:%d, Event Data: x%08x "
6466             "x%08x x%08x x%08x\n", evt_type,
6467             acqe_sli->event_data1, acqe_sli->event_data2,
6468             acqe_sli->reserved, acqe_sli->trailer);
6469 
6470     port_name = phba->Port[0];
6471     if (port_name == 0x00)
6472         port_name = '?'; /* get port name is empty */
6473 
6474     switch (evt_type) {
6475     case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6476         temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6477         temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6478         temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6479 
6480         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6481                 "3190 Over Temperature:%d Celsius- Port Name %c\n",
6482                 acqe_sli->event_data1, port_name);
6483 
6484         phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6485         shost = lpfc_shost_from_vport(phba->pport);
6486         fc_host_post_vendor_event(shost, fc_get_event_number(),
6487                       sizeof(temp_event_data),
6488                       (char *)&temp_event_data,
6489                       SCSI_NL_VID_TYPE_PCI
6490                       | PCI_VENDOR_ID_EMULEX);
6491         break;
6492     case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6493         temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6494         temp_event_data.event_code = LPFC_NORMAL_TEMP;
6495         temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6496 
6497         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6498                 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6499                 acqe_sli->event_data1, port_name);
6500 
6501         shost = lpfc_shost_from_vport(phba->pport);
6502         fc_host_post_vendor_event(shost, fc_get_event_number(),
6503                       sizeof(temp_event_data),
6504                       (char *)&temp_event_data,
6505                       SCSI_NL_VID_TYPE_PCI
6506                       | PCI_VENDOR_ID_EMULEX);
6507         break;
6508     case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6509         misconfigured = (struct lpfc_acqe_misconfigured_event *)
6510                     &acqe_sli->event_data1;
6511 
6512         /* fetch the status for this port */
6513         switch (phba->sli4_hba.lnk_info.lnk_no) {
6514         case LPFC_LINK_NUMBER_0:
6515             status = bf_get(lpfc_sli_misconfigured_port0_state,
6516                     &misconfigured->theEvent);
6517             operational = bf_get(lpfc_sli_misconfigured_port0_op,
6518                     &misconfigured->theEvent);
6519             break;
6520         case LPFC_LINK_NUMBER_1:
6521             status = bf_get(lpfc_sli_misconfigured_port1_state,
6522                     &misconfigured->theEvent);
6523             operational = bf_get(lpfc_sli_misconfigured_port1_op,
6524                     &misconfigured->theEvent);
6525             break;
6526         case LPFC_LINK_NUMBER_2:
6527             status = bf_get(lpfc_sli_misconfigured_port2_state,
6528                     &misconfigured->theEvent);
6529             operational = bf_get(lpfc_sli_misconfigured_port2_op,
6530                     &misconfigured->theEvent);
6531             break;
6532         case LPFC_LINK_NUMBER_3:
6533             status = bf_get(lpfc_sli_misconfigured_port3_state,
6534                     &misconfigured->theEvent);
6535             operational = bf_get(lpfc_sli_misconfigured_port3_op,
6536                     &misconfigured->theEvent);
6537             break;
6538         default:
6539             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6540                     "3296 "
6541                     "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6542                     "event: Invalid link %d",
6543                     phba->sli4_hba.lnk_info.lnk_no);
6544             return;
6545         }
6546 
6547         /* Skip if optic state unchanged */
6548         if (phba->sli4_hba.lnk_info.optic_state == status)
6549             return;
6550 
6551         switch (status) {
6552         case LPFC_SLI_EVENT_STATUS_VALID:
6553             sprintf(message, "Physical Link is functional");
6554             break;
6555         case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6556             sprintf(message, "Optics faulted/incorrectly "
6557                 "installed/not installed - Reseat optics, "
6558                 "if issue not resolved, replace.");
6559             break;
6560         case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6561             sprintf(message,
6562                 "Optics of two types installed - Remove one "
6563                 "optic or install matching pair of optics.");
6564             break;
6565         case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6566             sprintf(message, "Incompatible optics - Replace with "
6567                 "compatible optics for card to function.");
6568             break;
6569         case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6570             sprintf(message, "Unqualified optics - Replace with "
6571                 "Avago optics for Warranty and Technical "
6572                 "Support - Link is%s operational",
6573                 (operational) ? " not" : "");
6574             break;
6575         case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6576             sprintf(message, "Uncertified optics - Replace with "
6577                 "Avago-certified optics to enable link "
6578                 "operation - Link is%s operational",
6579                 (operational) ? " not" : "");
6580             break;
6581         default:
6582             /* firmware is reporting a status we don't know about */
6583             sprintf(message, "Unknown event status x%02x", status);
6584             break;
6585         }
6586 
6587         /* Issue READ_CONFIG mbox command to refresh supported speeds */
6588         rc = lpfc_sli4_read_config(phba);
6589         if (rc) {
6590             phba->lmt = 0;
6591             lpfc_printf_log(phba, KERN_ERR,
6592                     LOG_TRACE_EVENT,
6593                     "3194 Unable to retrieve supported "
6594                     "speeds, rc = 0x%x\n", rc);
6595         }
6596         rc = lpfc_sli4_refresh_params(phba);
6597         if (rc) {
6598             lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6599                     "3174 Unable to update pls support, "
6600                     "rc x%x\n", rc);
6601         }
6602         vports = lpfc_create_vport_work_array(phba);
6603         if (vports != NULL) {
6604             for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6605                     i++) {
6606                 shost = lpfc_shost_from_vport(vports[i]);
6607                 lpfc_host_supported_speeds_set(shost);
6608             }
6609         }
6610         lpfc_destroy_vport_work_array(phba, vports);
6611 
6612         phba->sli4_hba.lnk_info.optic_state = status;
6613         lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6614                 "3176 Port Name %c %s\n", port_name, message);
6615         break;
6616     case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6617         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6618                 "3192 Remote DPort Test Initiated - "
6619                 "Event Data1:x%08x Event Data2: x%08x\n",
6620                 acqe_sli->event_data1, acqe_sli->event_data2);
6621         break;
6622     case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6623         /* Call FW to obtain active parms */
6624         lpfc_sli4_cgn_parm_chg_evt(phba);
6625         break;
6626     case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6627         /* Misconfigured WWN. Reports that the SLI Port is configured
6628          * to use FA-WWN, but the attached device doesn’t support it.
6629          * Event Data1 - N.A, Event Data2 - N.A
6630          * This event only happens on the physical port.
6631          */
6632         lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
6633                  "2699 Misconfigured FA-PWWN - Attached device "
6634                  "does not support FA-PWWN\n");
6635         phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
6636         memset(phba->pport->fc_portname.u.wwn, 0,
6637                sizeof(struct lpfc_name));
6638         break;
6639     case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6640         /* EEPROM failure. No driver action is required */
6641         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6642                  "2518 EEPROM failure - "
6643                  "Event Data1: x%08x Event Data2: x%08x\n",
6644                  acqe_sli->event_data1, acqe_sli->event_data2);
6645         break;
6646     case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6647         if (phba->cmf_active_mode == LPFC_CFG_OFF)
6648             break;
6649         cgn_signal = (struct lpfc_acqe_cgn_signal *)
6650                     &acqe_sli->event_data1;
6651         phba->cgn_acqe_cnt++;
6652 
6653         cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6654         atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6655         atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6656 
6657         /* no threshold for CMF, even 1 signal will trigger an event */
6658 
6659         /* Alarm overrides warning, so check that first */
6660         if (cgn_signal->alarm_cnt) {
6661             if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6662                 /* Keep track of alarm cnt for CMF_SYNC_WQE */
6663                 atomic_add(cgn_signal->alarm_cnt,
6664                        &phba->cgn_sync_alarm_cnt);
6665             }
6666         } else if (cnt) {
6667             /* signal action needs to be taken */
6668             if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6669                 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6670                 /* Keep track of warning cnt for CMF_SYNC_WQE */
6671                 atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6672             }
6673         }
6674         break;
6675     default:
6676         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6677                 "3193 Unrecognized SLI event, type: 0x%x",
6678                 evt_type);
6679         break;
6680     }
6681 }
6682 
6683 /**
6684  * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
6685  * @vport: pointer to vport data structure.
6686  *
6687  * This routine is to perform Clear Virtual Link (CVL) on a vport in
6688  * response to a CVL event.
6689  *
6690  * Return the pointer to the ndlp with the vport if successful, otherwise
6691  * return NULL.
6692  **/
6693 static struct lpfc_nodelist *
6694 lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6695 {
6696     struct lpfc_nodelist *ndlp;
6697     struct Scsi_Host *shost;
6698     struct lpfc_hba *phba;
6699 
6700     if (!vport)
6701         return NULL;
6702     phba = vport->phba;
6703     if (!phba)
6704         return NULL;
6705     ndlp = lpfc_findnode_did(vport, Fabric_DID);
6706     if (!ndlp) {
6707         /* Cannot find existing Fabric ndlp, so allocate a new one */
6708         ndlp = lpfc_nlp_init(vport, Fabric_DID);
6709         if (!ndlp)
6710             return NULL;
6711         /* Set the node type */
6712         ndlp->nlp_type |= NLP_FABRIC;
6713         /* Put ndlp onto node list */
6714         lpfc_enqueue_node(vport, ndlp);
6715     }
6716     if ((phba->pport->port_state < LPFC_FLOGI) &&
6717         (phba->pport->port_state != LPFC_VPORT_FAILED))
6718         return NULL;
6719     /* If virtual link is not yet instantiated ignore CVL */
6720     if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6721         && (vport->port_state != LPFC_VPORT_FAILED))
6722         return NULL;
6723     shost = lpfc_shost_from_vport(vport);
6724     if (!shost)
6725         return NULL;
6726     lpfc_linkdown_port(vport);
6727     lpfc_cleanup_pending_mbox(vport);
6728     spin_lock_irq(shost->host_lock);
6729     vport->fc_flag |= FC_VPORT_CVL_RCVD;
6730     spin_unlock_irq(shost->host_lock);
6731 
6732     return ndlp;
6733 }
6734 
6735 /**
6736  * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
6737  * @phba: pointer to lpfc hba data structure.
6738  *
6739  * This routine is to perform Clear Virtual Link (CVL) on all vports in
6740  * response to a FCF dead event.
6741  **/
6742 static void
6743 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6744 {
6745     struct lpfc_vport **vports;
6746     int i;
6747 
6748     vports = lpfc_create_vport_work_array(phba);
6749     if (vports)
6750         for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6751             lpfc_sli4_perform_vport_cvl(vports[i]);
6752     lpfc_destroy_vport_work_array(phba, vports);
6753 }
6754 
6755 /**
6756  * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
6757  * @phba: pointer to lpfc hba data structure.
6758  * @acqe_fip: pointer to the async fcoe completion queue entry.
6759  *
6760  * This routine is to handle the SLI4 asynchronous fcoe event.
6761  **/
6762 static void
6763 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6764             struct lpfc_acqe_fip *acqe_fip)
6765 {
6766     uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6767     int rc;
6768     struct lpfc_vport *vport;
6769     struct lpfc_nodelist *ndlp;
6770     int active_vlink_present;
6771     struct lpfc_vport **vports;
6772     int i;
6773 
6774     phba->fc_eventTag = acqe_fip->event_tag;
6775     phba->fcoe_eventtag = acqe_fip->event_tag;
6776     switch (event_type) {
6777     case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6778     case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6779         if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6780             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6781                     "2546 New FCF event, evt_tag:x%x, "
6782                     "index:x%x\n",
6783                     acqe_fip->event_tag,
6784                     acqe_fip->index);
6785         else
6786             lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6787                     LOG_DISCOVERY,
6788                     "2788 FCF param modified event, "
6789                     "evt_tag:x%x, index:x%x\n",
6790                     acqe_fip->event_tag,
6791                     acqe_fip->index);
6792         if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6793             /*
6794              * During period of FCF discovery, read the FCF
6795              * table record indexed by the event to update
6796              * FCF roundrobin failover eligible FCF bmask.
6797              */
6798             lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6799                     LOG_DISCOVERY,
6800                     "2779 Read FCF (x%x) for updating "
6801                     "roundrobin FCF failover bmask\n",
6802                     acqe_fip->index);
6803             rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6804         }
6805 
6806         /* If the FCF discovery is in progress, do nothing. */
6807         spin_lock_irq(&phba->hbalock);
6808         if (phba->hba_flag & FCF_TS_INPROG) {
6809             spin_unlock_irq(&phba->hbalock);
6810             break;
6811         }
6812         /* If fast FCF failover rescan event is pending, do nothing */
6813         if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6814             spin_unlock_irq(&phba->hbalock);
6815             break;
6816         }
6817 
6818         /* If the FCF has been in discovered state, do nothing. */
6819         if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6820             spin_unlock_irq(&phba->hbalock);
6821             break;
6822         }
6823         spin_unlock_irq(&phba->hbalock);
6824 
6825         /* Otherwise, scan the entire FCF table and re-discover SAN */
6826         lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6827                 "2770 Start FCF table scan per async FCF "
6828                 "event, evt_tag:x%x, index:x%x\n",
6829                 acqe_fip->event_tag, acqe_fip->index);
6830         rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6831                              LPFC_FCOE_FCF_GET_FIRST);
6832         if (rc)
6833             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6834                     "2547 Issue FCF scan read FCF mailbox "
6835                     "command failed (x%x)\n", rc);
6836         break;
6837 
6838     case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6839         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6840                 "2548 FCF Table full count 0x%x tag 0x%x\n",
6841                 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6842                 acqe_fip->event_tag);
6843         break;
6844 
6845     case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6846         phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6847         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6848                 "2549 FCF (x%x) disconnected from network, "
6849                  "tag:x%x\n", acqe_fip->index,
6850                  acqe_fip->event_tag);
6851         /*
6852          * If we are in the middle of FCF failover process, clear
6853          * the corresponding FCF bit in the roundrobin bitmap.
6854          */
6855         spin_lock_irq(&phba->hbalock);
6856         if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6857             (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6858             spin_unlock_irq(&phba->hbalock);
6859             /* Update FLOGI FCF failover eligible FCF bmask */
6860             lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6861             break;
6862         }
6863         spin_unlock_irq(&phba->hbalock);
6864 
6865         /* If the event is not for currently used fcf do nothing */
6866         if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6867             break;
6868 
6869         /*
6870          * Otherwise, request the port to rediscover the entire FCF
6871          * table for a fast recovery from case that the current FCF
6872          * is no longer valid as we are not in the middle of FCF
6873          * failover process already.
6874          */
6875         spin_lock_irq(&phba->hbalock);
6876         /* Mark the fast failover process in progress */
6877         phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6878         spin_unlock_irq(&phba->hbalock);
6879 
6880         lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6881                 "2771 Start FCF fast failover process due to "
6882                 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6883                 "\n", acqe_fip->event_tag, acqe_fip->index);
6884         rc = lpfc_sli4_redisc_fcf_table(phba);
6885         if (rc) {
6886             lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6887                     LOG_TRACE_EVENT,
6888                     "2772 Issue FCF rediscover mailbox "
6889                     "command failed, fail through to FCF "
6890                     "dead event\n");
6891             spin_lock_irq(&phba->hbalock);
6892             phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6893             spin_unlock_irq(&phba->hbalock);
6894             /*
6895              * Last resort will fail over by treating this
6896              * as a link down to FCF registration.
6897              */
6898             lpfc_sli4_fcf_dead_failthrough(phba);
6899         } else {
6900             /* Reset FCF roundrobin bmask for new discovery */
6901             lpfc_sli4_clear_fcf_rr_bmask(phba);
6902             /*
6903              * Handling fast FCF failover to a DEAD FCF event is
6904              * considered equalivant to receiving CVL to all vports.
6905              */
6906             lpfc_sli4_perform_all_vport_cvl(phba);
6907         }
6908         break;
6909     case LPFC_FIP_EVENT_TYPE_CVL:
6910         phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6911         lpfc_printf_log(phba, KERN_ERR,
6912                 LOG_TRACE_EVENT,
6913             "2718 Clear Virtual Link Received for VPI 0x%x"
6914             " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6915 
6916         vport = lpfc_find_vport_by_vpid(phba,
6917                         acqe_fip->index);
6918         ndlp = lpfc_sli4_perform_vport_cvl(vport);
6919         if (!ndlp)
6920             break;
6921         active_vlink_present = 0;
6922 
6923         vports = lpfc_create_vport_work_array(phba);
6924         if (vports) {
6925             for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6926                     i++) {
6927                 if ((!(vports[i]->fc_flag &
6928                     FC_VPORT_CVL_RCVD)) &&
6929                     (vports[i]->port_state > LPFC_FDISC)) {
6930                     active_vlink_present = 1;
6931                     break;
6932                 }
6933             }
6934             lpfc_destroy_vport_work_array(phba, vports);
6935         }
6936 
6937         /*
6938          * Don't re-instantiate if vport is marked for deletion.
6939          * If we are here first then vport_delete is going to wait
6940          * for discovery to complete.
6941          */
6942         if (!(vport->load_flag & FC_UNLOADING) &&
6943                     active_vlink_present) {
6944             /*
6945              * If there are other active VLinks present,
6946              * re-instantiate the Vlink using FDISC.
6947              */
6948             mod_timer(&ndlp->nlp_delayfunc,
6949                   jiffies + msecs_to_jiffies(1000));
6950             spin_lock_irq(&ndlp->lock);
6951             ndlp->nlp_flag |= NLP_DELAY_TMO;
6952             spin_unlock_irq(&ndlp->lock);
6953             ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6954             vport->port_state = LPFC_FDISC;
6955         } else {
6956             /*
6957              * Otherwise, we request port to rediscover
6958              * the entire FCF table for a fast recovery
6959              * from possible case that the current FCF
6960              * is no longer valid if we are not already
6961              * in the FCF failover process.
6962              */
6963             spin_lock_irq(&phba->hbalock);
6964             if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6965                 spin_unlock_irq(&phba->hbalock);
6966                 break;
6967             }
6968             /* Mark the fast failover process in progress */
6969             phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6970             spin_unlock_irq(&phba->hbalock);
6971             lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6972                     LOG_DISCOVERY,
6973                     "2773 Start FCF failover per CVL, "
6974                     "evt_tag:x%x\n", acqe_fip->event_tag);
6975             rc = lpfc_sli4_redisc_fcf_table(phba);
6976             if (rc) {
6977                 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6978                         LOG_TRACE_EVENT,
6979                         "2774 Issue FCF rediscover "
6980                         "mailbox command failed, "
6981                         "through to CVL event\n");
6982                 spin_lock_irq(&phba->hbalock);
6983                 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6984                 spin_unlock_irq(&phba->hbalock);
6985                 /*
6986                  * Last resort will be re-try on the
6987                  * the current registered FCF entry.
6988                  */
6989                 lpfc_retry_pport_discovery(phba);
6990             } else
6991                 /*
6992                  * Reset FCF roundrobin bmask for new
6993                  * discovery.
6994                  */
6995                 lpfc_sli4_clear_fcf_rr_bmask(phba);
6996         }
6997         break;
6998     default:
6999         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7000                 "0288 Unknown FCoE event type 0x%x event tag "
7001                 "0x%x\n", event_type, acqe_fip->event_tag);
7002         break;
7003     }
7004 }
7005 
7006 /**
7007  * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
7008  * @phba: pointer to lpfc hba data structure.
7009  * @acqe_dcbx: pointer to the async dcbx completion queue entry.
7010  *
7011  * This routine is to handle the SLI4 asynchronous dcbx event.
7012  **/
7013 static void
7014 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
7015              struct lpfc_acqe_dcbx *acqe_dcbx)
7016 {
7017     phba->fc_eventTag = acqe_dcbx->event_tag;
7018     lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7019             "0290 The SLI4 DCBX asynchronous event is not "
7020             "handled yet\n");
7021 }
7022 
7023 /**
7024  * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
7025  * @phba: pointer to lpfc hba data structure.
7026  * @acqe_grp5: pointer to the async grp5 completion queue entry.
7027  *
7028  * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
7029  * is an asynchronous notified of a logical link speed change.  The Port
7030  * reports the logical link speed in units of 10Mbps.
7031  **/
7032 static void
7033 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
7034              struct lpfc_acqe_grp5 *acqe_grp5)
7035 {
7036     uint16_t prev_ll_spd;
7037 
7038     phba->fc_eventTag = acqe_grp5->event_tag;
7039     phba->fcoe_eventtag = acqe_grp5->event_tag;
7040     prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
7041     phba->sli4_hba.link_state.logical_speed =
7042         (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
7043     lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7044             "2789 GRP5 Async Event: Updating logical link speed "
7045             "from %dMbps to %dMbps\n", prev_ll_spd,
7046             phba->sli4_hba.link_state.logical_speed);
7047 }
7048 
7049 /**
7050  * lpfc_sli4_async_cmstat_evt - Process the asynchronous cmstat event
7051  * @phba: pointer to lpfc hba data structure.
7052  *
7053  * This routine is to handle the SLI4 asynchronous cmstat event. A cmstat event
7054  * is an asynchronous notification of a request to reset CM stats.
7055  **/
7056 static void
7057 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
7058 {
7059     if (!phba->cgn_i)
7060         return;
7061     lpfc_init_congestion_stat(phba);
7062 }
7063 
7064 /**
7065  * lpfc_cgn_params_val - Validate FW congestion parameters.
7066  * @phba: pointer to lpfc hba data structure.
7067  * @p_cfg_param: pointer to FW provided congestion parameters.
7068  *
7069  * This routine validates the congestion parameters passed
7070  * by the FW to the driver via an ACQE event.
7071  **/
7072 static void
7073 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
7074 {
7075     spin_lock_irq(&phba->hbalock);
7076 
7077     if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
7078                  LPFC_CFG_MONITOR)) {
7079         lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
7080                 "6225 CMF mode param out of range: %d\n",
7081                  p_cfg_param->cgn_param_mode);
7082         p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
7083     }
7084 
7085     spin_unlock_irq(&phba->hbalock);
7086 }
7087 
7088 /**
7089  * lpfc_cgn_params_parse - Process a FW cong parm change event
7090  * @phba: pointer to lpfc hba data structure.
7091  * @p_cgn_param: pointer to a data buffer with the FW cong params.
7092  * @len: the size of pdata in bytes.
7093  *
7094  * This routine validates the congestion management buffer signature
7095  * from the FW, validates the contents and makes corrections for
7096  * valid, in-range values.  If the signature magic is correct and
7097  * after parameter validation, the contents are copied to the driver's
7098  * @phba structure. If the magic is incorrect, an error message is
7099  * logged.
7100  **/
7101 static void
7102 lpfc_cgn_params_parse(struct lpfc_hba *phba,
7103               struct lpfc_cgn_param *p_cgn_param, uint32_t len)
7104 {
7105     struct lpfc_cgn_info *cp;
7106     uint32_t crc, oldmode;
7107 
7108     /* Make sure the FW has encoded the correct magic number to
7109      * validate the congestion parameter in FW memory.
7110      */
7111     if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
7112         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7113                 "4668 FW cgn parm buffer data: "
7114                 "magic 0x%x version %d mode %d "
7115                 "level0 %d level1 %d "
7116                 "level2 %d byte13 %d "
7117                 "byte14 %d byte15 %d "
7118                 "byte11 %d byte12 %d activeMode %d\n",
7119                 p_cgn_param->cgn_param_magic,
7120                 p_cgn_param->cgn_param_version,
7121                 p_cgn_param->cgn_param_mode,
7122                 p_cgn_param->cgn_param_level0,
7123                 p_cgn_param->cgn_param_level1,
7124                 p_cgn_param->cgn_param_level2,
7125                 p_cgn_param->byte13,
7126                 p_cgn_param->byte14,
7127                 p_cgn_param->byte15,
7128                 p_cgn_param->byte11,
7129                 p_cgn_param->byte12,
7130                 phba->cmf_active_mode);
7131 
7132         oldmode = phba->cmf_active_mode;
7133 
7134         /* Any parameters out of range are corrected to defaults
7135          * by this routine.  No need to fail.
7136          */
7137         lpfc_cgn_params_val(phba, p_cgn_param);
7138 
7139         /* Parameters are verified, move them into driver storage */
7140         spin_lock_irq(&phba->hbalock);
7141         memcpy(&phba->cgn_p, p_cgn_param,
7142                sizeof(struct lpfc_cgn_param));
7143 
7144         /* Update parameters in congestion info buffer now */
7145         if (phba->cgn_i) {
7146             cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
7147             cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
7148             cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
7149             cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
7150             cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
7151             crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
7152                           LPFC_CGN_CRC32_SEED);
7153             cp->cgn_info_crc = cpu_to_le32(crc);
7154         }
7155         spin_unlock_irq(&phba->hbalock);
7156 
7157         phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
7158 
7159         switch (oldmode) {
7160         case LPFC_CFG_OFF:
7161             if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
7162                 /* Turning CMF on */
7163                 lpfc_cmf_start(phba);
7164 
7165                 if (phba->link_state >= LPFC_LINK_UP) {
7166                     phba->cgn_reg_fpin =
7167                         phba->cgn_init_reg_fpin;
7168                     phba->cgn_reg_signal =
7169                         phba->cgn_init_reg_signal;
7170                     lpfc_issue_els_edc(phba->pport, 0);
7171                 }
7172             }
7173             break;
7174         case LPFC_CFG_MANAGED:
7175             switch (phba->cgn_p.cgn_param_mode) {
7176             case LPFC_CFG_OFF:
7177                 /* Turning CMF off */
7178                 lpfc_cmf_stop(phba);
7179                 if (phba->link_state >= LPFC_LINK_UP)
7180                     lpfc_issue_els_edc(phba->pport, 0);
7181                 break;
7182             case LPFC_CFG_MONITOR:
7183                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7184                         "4661 Switch from MANAGED to "
7185                         "`MONITOR mode\n");
7186                 phba->cmf_max_bytes_per_interval =
7187                     phba->cmf_link_byte_count;
7188 
7189                 /* Resume blocked IO - unblock on workqueue */
7190                 queue_work(phba->wq,
7191                        &phba->unblock_request_work);
7192                 break;
7193             }
7194             break;
7195         case LPFC_CFG_MONITOR:
7196             switch (phba->cgn_p.cgn_param_mode) {
7197             case LPFC_CFG_OFF:
7198                 /* Turning CMF off */
7199                 lpfc_cmf_stop(phba);
7200                 if (phba->link_state >= LPFC_LINK_UP)
7201                     lpfc_issue_els_edc(phba->pport, 0);
7202                 break;
7203             case LPFC_CFG_MANAGED:
7204                 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7205                         "4662 Switch from MONITOR to "
7206                         "MANAGED mode\n");
7207                 lpfc_cmf_signal_init(phba);
7208                 break;
7209             }
7210             break;
7211         }
7212     } else {
7213         lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7214                 "4669 FW cgn parm buf wrong magic 0x%x "
7215                 "version %d\n", p_cgn_param->cgn_param_magic,
7216                 p_cgn_param->cgn_param_version);
7217     }
7218 }
7219 
7220 /**
7221  * lpfc_sli4_cgn_params_read - Read and Validate FW congestion parameters.
7222  * @phba: pointer to lpfc hba data structure.
7223  *
7224  * This routine issues a read_object mailbox command to
7225  * get the congestion management parameters from the FW
7226  * parses it and updates the driver maintained values.
7227  *
7228  * Returns
7229  *  0     if the object was empty
7230  *  -Eval if an error was encountered
7231  *  Count if bytes were read from object
7232  **/
7233 int
7234 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7235 {
7236     int ret = 0;
7237     struct lpfc_cgn_param *p_cgn_param = NULL;
7238     u32 *pdata = NULL;
7239     u32 len = 0;
7240 
7241     /* Find out if the FW has a new set of congestion parameters. */
7242     len = sizeof(struct lpfc_cgn_param);
7243     pdata = kzalloc(len, GFP_KERNEL);
7244     ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7245                    pdata, len);
7246 
7247     /* 0 means no data.  A negative means error.  A positive means
7248      * bytes were copied.
7249      */
7250     if (!ret) {
7251         lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7252                 "4670 CGN RD OBJ returns no data\n");
7253         goto rd_obj_err;
7254     } else if (ret < 0) {
7255         /* Some error.  Just exit and return it to the caller.*/
7256         goto rd_obj_err;
7257     }
7258 
7259     lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7260             "6234 READ CGN PARAMS Successful %d\n", len);
7261 
7262     /* Parse data pointer over len and update the phba congestion
7263      * parameters with values passed back.  The receive rate values
7264      * may have been altered in FW, but take no action here.
7265      */
7266     p_cgn_param = (struct lpfc_cgn_param *)pdata;
7267     lpfc_cgn_params_parse(phba, p_cgn_param, len);
7268 
7269  rd_obj_err:
7270     kfree(pdata);
7271     return ret;
7272 }
7273 
7274 /**
7275  * lpfc_sli4_cgn_parm_chg_evt - Process a FW congestion param change event
7276  * @phba: pointer to lpfc hba data structure.
7277  *
7278  * The FW generated Async ACQE SLI event calls this routine when
7279  * the event type is an SLI Internal Port Event and the Event Code
7280  * indicates a change to the FW maintained congestion parameters.
7281  *
7282  * This routine executes a Read_Object mailbox call to obtain the
7283  * current congestion parameters maintained in FW and corrects
7284  * the driver's active congestion parameters.
7285  *
7286  * The acqe event is not passed because there is no further data
7287  * required.
7288  *
7289  * Returns nonzero error if event processing encountered an error.
7290  * Zero otherwise for success.
7291  **/
7292 static int
7293 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7294 {
7295     int ret = 0;
7296 
7297     if (!phba->sli4_hba.pc_sli4_params.cmf) {
7298         lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7299                 "4664 Cgn Evt when E2E off. Drop event\n");
7300         return -EACCES;
7301     }
7302 
7303     /* If the event is claiming an empty object, it's ok.  A write
7304      * could have cleared it.  Only error is a negative return
7305      * status.
7306      */
7307     ret = lpfc_sli4_cgn_params_read(phba);
7308     if (ret < 0) {
7309         lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7310                 "4667 Error reading Cgn Params (%d)\n",
7311                 ret);
7312     } else if (!ret) {
7313         lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7314                 "4673 CGN Event empty object.\n");
7315     }
7316     return ret;
7317 }
7318 
7319 /**
7320  * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
7321  * @phba: pointer to lpfc hba data structure.
7322  *
7323  * This routine is invoked by the worker thread to process all the pending
7324  * SLI4 asynchronous events.
7325  **/
7326 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7327 {
7328     struct lpfc_cq_event *cq_event;
7329     unsigned long iflags;
7330 
7331     /* First, declare the async event has been handled */
7332     spin_lock_irqsave(&phba->hbalock, iflags);
7333     phba->hba_flag &= ~ASYNC_EVENT;
7334     spin_unlock_irqrestore(&phba->hbalock, iflags);
7335 
7336     /* Now, handle all the async events */
7337     spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7338     while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7339         list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7340                  cq_event, struct lpfc_cq_event, list);
7341         spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7342                        iflags);
7343 
7344         /* Process the asynchronous event */
7345         switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7346         case LPFC_TRAILER_CODE_LINK:
7347             lpfc_sli4_async_link_evt(phba,
7348                          &cq_event->cqe.acqe_link);
7349             break;
7350         case LPFC_TRAILER_CODE_FCOE:
7351             lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7352             break;
7353         case LPFC_TRAILER_CODE_DCBX:
7354             lpfc_sli4_async_dcbx_evt(phba,
7355                          &cq_event->cqe.acqe_dcbx);
7356             break;
7357         case LPFC_TRAILER_CODE_GRP5:
7358             lpfc_sli4_async_grp5_evt(phba,
7359                          &cq_event->cqe.acqe_grp5);
7360             break;
7361         case LPFC_TRAILER_CODE_FC:
7362             lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7363             break;
7364         case LPFC_TRAILER_CODE_SLI:
7365             lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7366             break;
7367         case LPFC_TRAILER_CODE_CMSTAT:
7368             lpfc_sli4_async_cmstat_evt(phba);
7369             break;
7370         default:
7371             lpfc_printf_log(phba, KERN_ERR,
7372                     LOG_TRACE_EVENT,
7373                     "1804 Invalid asynchronous event code: "
7374                     "x%x\n", bf_get(lpfc_trailer_code,
7375                     &cq_event->cqe.mcqe_cmpl));
7376             break;
7377         }
7378 
7379         /* Free the completion event processed to the free pool */
7380         lpfc_sli4_cq_event_release(phba, cq_event);
7381         spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7382     }
7383     spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7384 }
7385 
7386 /**
7387  * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
7388  * @phba: pointer to lpfc hba data structure.
7389  *
7390  * This routine is invoked by the worker thread to process FCF table
7391  * rediscovery pending completion event.
7392  **/
7393 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7394 {
7395     int rc;
7396 
7397     spin_lock_irq(&phba->hbalock);
7398     /* Clear FCF rediscovery timeout event */
7399     phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7400     /* Clear driver fast failover FCF record flag */
7401     phba->fcf.failover_rec.flag = 0;
7402     /* Set state for FCF fast failover */
7403     phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7404     spin_unlock_irq(&phba->hbalock);
7405 
7406     /* Scan FCF table from the first entry to re-discover SAN */
7407     lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7408             "2777 Start post-quiescent FCF table scan\n");
7409     rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7410     if (rc)
7411         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7412                 "2747 Issue FCF scan read FCF mailbox "
7413                 "command failed 0x%x\n", rc);
7414 }
7415 
7416 /**
7417  * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
7418  * @phba: pointer to lpfc hba data structure.
7419  * @dev_grp: The HBA PCI-Device group number.
7420  *
7421  * This routine is invoked to set up the per HBA PCI-Device group function
7422  * API jump table entries.
7423  *
7424  * Return: 0 if success, otherwise -ENODEV
7425  **/
7426 int
7427 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7428 {
7429     int rc;
7430 
7431     /* Set up lpfc PCI-device group */
7432     phba->pci_dev_grp = dev_grp;
7433 
7434     /* The LPFC_PCI_DEV_OC uses SLI4 */
7435     if (dev_grp == LPFC_PCI_DEV_OC)
7436         phba->sli_rev = LPFC_SLI_REV4;
7437 
7438     /* Set up device INIT API function jump table */
7439     rc = lpfc_init_api_table_setup(phba, dev_grp);
7440     if (rc)
7441         return -ENODEV;
7442     /* Set up SCSI API function jump table */
7443     rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7444     if (rc)
7445         return -ENODEV;
7446     /* Set up SLI API function jump table */
7447     rc = lpfc_sli_api_table_setup(phba, dev_grp);
7448     if (rc)
7449         return -ENODEV;
7450     /* Set up MBOX API function jump table */
7451     rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7452     if (rc)
7453         return -ENODEV;
7454 
7455     return 0;
7456 }
7457 
7458 /**
7459  * lpfc_log_intr_mode - Log the active interrupt mode
7460  * @phba: pointer to lpfc hba data structure.
7461  * @intr_mode: active interrupt mode adopted.
7462  *
7463  * This routine it invoked to log the currently used active interrupt mode
7464  * to the device.
7465  **/
7466 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7467 {
7468     switch (intr_mode) {
7469     case 0:
7470         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7471                 "0470 Enable INTx interrupt mode.\n");
7472         break;
7473     case 1:
7474         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7475                 "0481 Enabled MSI interrupt mode.\n");
7476         break;
7477     case 2:
7478         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7479                 "0480 Enabled MSI-X interrupt mode.\n");
7480         break;
7481     default:
7482         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7483                 "0482 Illegal interrupt mode.\n");
7484         break;
7485     }
7486     return;
7487 }
7488 
7489 /**
7490  * lpfc_enable_pci_dev - Enable a generic PCI device.
7491  * @phba: pointer to lpfc hba data structure.
7492  *
7493  * This routine is invoked to enable the PCI device that is common to all
7494  * PCI devices.
7495  *
7496  * Return codes
7497  *  0 - successful
7498  *  other values - error
7499  **/
7500 static int
7501 lpfc_enable_pci_dev(struct lpfc_hba *phba)
7502 {
7503     struct pci_dev *pdev;
7504 
7505     /* Obtain PCI device reference */
7506     if (!phba->pcidev)
7507         goto out_error;
7508     else
7509         pdev = phba->pcidev;
7510     /* Enable PCI device */
7511     if (pci_enable_device_mem(pdev))
7512         goto out_error;
7513     /* Request PCI resource for the device */
7514     if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7515         goto out_disable_device;
7516     /* Set up device as PCI master and save state for EEH */
7517     pci_set_master(pdev);
7518     pci_try_set_mwi(pdev);
7519     pci_save_state(pdev);
7520 
7521     /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7522     if (pci_is_pcie(pdev))
7523         pdev->needs_freset = 1;
7524 
7525     return 0;
7526 
7527 out_disable_device:
7528     pci_disable_device(pdev);
7529 out_error:
7530     lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7531             "1401 Failed to enable pci device\n");
7532     return -ENODEV;
7533 }
7534 
7535 /**
7536  * lpfc_disable_pci_dev - Disable a generic PCI device.
7537  * @phba: pointer to lpfc hba data structure.
7538  *
7539  * This routine is invoked to disable the PCI device that is common to all
7540  * PCI devices.
7541  **/
7542 static void
7543 lpfc_disable_pci_dev(struct lpfc_hba *phba)
7544 {
7545     struct pci_dev *pdev;
7546 
7547     /* Obtain PCI device reference */
7548     if (!phba->pcidev)
7549         return;
7550     else
7551         pdev = phba->pcidev;
7552     /* Release PCI resource and disable PCI device */
7553     pci_release_mem_regions(pdev);
7554     pci_disable_device(pdev);
7555 
7556     return;
7557 }
7558 
7559 /**
7560  * lpfc_reset_hba - Reset a hba
7561  * @phba: pointer to lpfc hba data structure.
7562  *
7563  * This routine is invoked to reset a hba device. It brings the HBA
7564  * offline, performs a board restart, and then brings the board back
7565  * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
7566  * on outstanding mailbox commands.
7567  **/
7568 void
7569 lpfc_reset_hba(struct lpfc_hba *phba)
7570 {
7571     /* If resets are disabled then set error state and return. */
7572     if (!phba->cfg_enable_hba_reset) {
7573         phba->link_state = LPFC_HBA_ERROR;
7574         return;
7575     }
7576 
7577     /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
7578     if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7579         lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7580     } else {
7581         lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7582         lpfc_sli_flush_io_rings(phba);
7583     }
7584     lpfc_offline(phba);
7585     lpfc_sli_brdrestart(phba);
7586     lpfc_online(phba);
7587     lpfc_unblock_mgmt_io(phba);
7588 }
7589 
7590 /**
7591  * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
7592  * @phba: pointer to lpfc hba data structure.
7593  *
7594  * This function enables the PCI SR-IOV virtual functions to a physical
7595  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7596  * enable the number of virtual functions to the physical function. As
7597  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7598  * API call does not considered as an error condition for most of the device.
7599  **/
7600 uint16_t
7601 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7602 {
7603     struct pci_dev *pdev = phba->pcidev;
7604     uint16_t nr_virtfn;
7605     int pos;
7606 
7607     pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7608     if (pos == 0)
7609         return 0;
7610 
7611     pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7612     return nr_virtfn;
7613 }
7614 
7615 /**
7616  * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
7617  * @phba: pointer to lpfc hba data structure.
7618  * @nr_vfn: number of virtual functions to be enabled.
7619  *
7620  * This function enables the PCI SR-IOV virtual functions to a physical
7621  * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
7622  * enable the number of virtual functions to the physical function. As
7623  * not all devices support SR-IOV, the return code from the pci_enable_sriov()
7624  * API call does not considered as an error condition for most of the device.
7625  **/
7626 int
7627 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7628 {
7629     struct pci_dev *pdev = phba->pcidev;
7630     uint16_t max_nr_vfn;
7631     int rc;
7632 
7633     max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7634     if (nr_vfn > max_nr_vfn) {
7635         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7636                 "3057 Requested vfs (%d) greater than "
7637                 "supported vfs (%d)", nr_vfn, max_nr_vfn);
7638         return -EINVAL;
7639     }
7640 
7641     rc = pci_enable_sriov(pdev, nr_vfn);
7642     if (rc) {
7643         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7644                 "2806 Failed to enable sriov on this device "
7645                 "with vfn number nr_vf:%d, rc:%d\n",
7646                 nr_vfn, rc);
7647     } else
7648         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7649                 "2807 Successful enable sriov on this device "
7650                 "with vfn number nr_vf:%d\n", nr_vfn);
7651     return rc;
7652 }
7653 
7654 static void
7655 lpfc_unblock_requests_work(struct work_struct *work)
7656 {
7657     struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7658                          unblock_request_work);
7659 
7660     lpfc_unblock_requests(phba);
7661 }
7662 
7663 /**
7664  * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
7665  * @phba: pointer to lpfc hba data structure.
7666  *
7667  * This routine is invoked to set up the driver internal resources before the
7668  * device specific resource setup to support the HBA device it attached to.
7669  *
7670  * Return codes
7671  *  0 - successful
7672  *  other values - error
7673  **/
7674 static int
7675 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7676 {
7677     struct lpfc_sli *psli = &phba->sli;
7678 
7679     /*
7680      * Driver resources common to all SLI revisions
7681      */
7682     atomic_set(&phba->fast_event_count, 0);
7683     atomic_set(&phba->dbg_log_idx, 0);
7684     atomic_set(&phba->dbg_log_cnt, 0);
7685     atomic_set(&phba->dbg_log_dmping, 0);
7686     spin_lock_init(&phba->hbalock);
7687 
7688     /* Initialize port_list spinlock */
7689     spin_lock_init(&phba->port_list_lock);
7690     INIT_LIST_HEAD(&phba->port_list);
7691 
7692     INIT_LIST_HEAD(&phba->work_list);
7693 
7694     /* Initialize the wait queue head for the kernel thread */
7695     init_waitqueue_head(&phba->work_waitq);
7696 
7697     lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7698             "1403 Protocols supported %s %s %s\n",
7699             ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7700                 "SCSI" : " "),
7701             ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7702                 "NVME" : " "),
7703             (phba->nvmet_support ? "NVMET" : " "));
7704 
7705     /* Initialize the IO buffer list used by driver for SLI3 SCSI */
7706     spin_lock_init(&phba->scsi_buf_list_get_lock);
7707     INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7708     spin_lock_init(&phba->scsi_buf_list_put_lock);
7709     INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7710 
7711     /* Initialize the fabric iocb list */
7712     INIT_LIST_HEAD(&phba->fabric_iocb_list);
7713 
7714     /* Initialize list to save ELS buffers */
7715     INIT_LIST_HEAD(&phba->elsbuf);
7716 
7717     /* Initialize FCF connection rec list */
7718     INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7719 
7720     /* Initialize OAS configuration list */
7721     spin_lock_init(&phba->devicelock);
7722     INIT_LIST_HEAD(&phba->luns);
7723 
7724     /* MBOX heartbeat timer */
7725     timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7726     /* Fabric block timer */
7727     timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7728     /* EA polling mode timer */
7729     timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7730     /* Heartbeat timer */
7731     timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7732 
7733     INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7734 
7735     INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7736               lpfc_idle_stat_delay_work);
7737     INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7738     return 0;
7739 }
7740 
7741 /**
7742  * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
7743  * @phba: pointer to lpfc hba data structure.
7744  *
7745  * This routine is invoked to set up the driver internal resources specific to
7746  * support the SLI-3 HBA device it attached to.
7747  *
7748  * Return codes
7749  * 0 - successful
7750  * other values - error
7751  **/
7752 static int
7753 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7754 {
7755     int rc, entry_sz;
7756 
7757     /*
7758      * Initialize timers used by driver
7759      */
7760 
7761     /* FCP polling mode timer */
7762     timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7763 
7764     /* Host attention work mask setup */
7765     phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7766     phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7767 
7768     /* Get all the module params for configuring this host */
7769     lpfc_get_cfgparam(phba);
7770     /* Set up phase-1 common device driver resources */
7771 
7772     rc = lpfc_setup_driver_resource_phase1(phba);
7773     if (rc)
7774         return -ENODEV;
7775 
7776     if (!phba->sli.sli3_ring)
7777         phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7778                           sizeof(struct lpfc_sli_ring),
7779                           GFP_KERNEL);
7780     if (!phba->sli.sli3_ring)
7781         return -ENOMEM;
7782 
7783     /*
7784      * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
7785      * used to create the sg_dma_buf_pool must be dynamically calculated.
7786      */
7787 
7788     if (phba->sli_rev == LPFC_SLI_REV4)
7789         entry_sz = sizeof(struct sli4_sge);
7790     else
7791         entry_sz = sizeof(struct ulp_bde64);
7792 
7793     /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
7794     if (phba->cfg_enable_bg) {
7795         /*
7796          * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
7797          * the FCP rsp, and a BDE for each. Sice we have no control
7798          * over how many protection data segments the SCSI Layer
7799          * will hand us (ie: there could be one for every block
7800          * in the IO), we just allocate enough BDEs to accomidate
7801          * our max amount and we need to limit lpfc_sg_seg_cnt to
7802          * minimize the risk of running out.
7803          */
7804         phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7805             sizeof(struct fcp_rsp) +
7806             (LPFC_MAX_SG_SEG_CNT * entry_sz);
7807 
7808         if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7809             phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7810 
7811         /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
7812         phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7813     } else {
7814         /*
7815          * The scsi_buf for a regular I/O will hold the FCP cmnd,
7816          * the FCP rsp, a BDE for each, and a BDE for up to
7817          * cfg_sg_seg_cnt data segments.
7818          */
7819         phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7820             sizeof(struct fcp_rsp) +
7821             ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7822 
7823         /* Total BDEs in BPL for scsi_sg_list */
7824         phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7825     }
7826 
7827     lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7828             "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7829             phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7830             phba->cfg_total_seg_cnt);
7831 
7832     phba->max_vpi = LPFC_MAX_VPI;
7833     /* This will be set to correct value after config_port mbox */
7834     phba->max_vports = 0;
7835 
7836     /*
7837      * Initialize the SLI Layer to run with lpfc HBAs.
7838      */
7839     lpfc_sli_setup(phba);
7840     lpfc_sli_queue_init(phba);
7841 
7842     /* Allocate device driver memory */
7843     if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7844         return -ENOMEM;
7845 
7846     phba->lpfc_sg_dma_buf_pool =
7847         dma_pool_create("lpfc_sg_dma_buf_pool",
7848                 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7849                 BPL_ALIGN_SZ, 0);
7850 
7851     if (!phba->lpfc_sg_dma_buf_pool)
7852         goto fail_free_mem;
7853 
7854     phba->lpfc_cmd_rsp_buf_pool =
7855             dma_pool_create("lpfc_cmd_rsp_buf_pool",
7856                     &phba->pcidev->dev,
7857                     sizeof(struct fcp_cmnd) +
7858                     sizeof(struct fcp_rsp),
7859                     BPL_ALIGN_SZ, 0);
7860 
7861     if (!phba->lpfc_cmd_rsp_buf_pool)
7862         goto fail_free_dma_buf_pool;
7863 
7864     /*
7865      * Enable sr-iov virtual functions if supported and configured
7866      * through the module parameter.
7867      */
7868     if (phba->cfg_sriov_nr_virtfn > 0) {
7869         rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7870                          phba->cfg_sriov_nr_virtfn);
7871         if (rc) {
7872             lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7873                     "2808 Requested number of SR-IOV "
7874                     "virtual functions (%d) is not "
7875                     "supported\n",
7876                     phba->cfg_sriov_nr_virtfn);
7877             phba->cfg_sriov_nr_virtfn = 0;
7878         }
7879     }
7880 
7881     return 0;
7882 
7883 fail_free_dma_buf_pool:
7884     dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7885     phba->lpfc_sg_dma_buf_pool = NULL;
7886 fail_free_mem:
7887     lpfc_mem_free(phba);
7888     return -ENOMEM;
7889 }
7890 
7891 /**
7892  * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
7893  * @phba: pointer to lpfc hba data structure.
7894  *
7895  * This routine is invoked to unset the driver internal resources set up
7896  * specific for supporting the SLI-3 HBA device it attached to.
7897  **/
7898 static void
7899 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7900 {
7901     /* Free device driver memory allocated */
7902     lpfc_mem_free_all(phba);
7903 
7904     return;
7905 }
7906 
7907 /**
7908  * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
7909  * @phba: pointer to lpfc hba data structure.
7910  *
7911  * This routine is invoked to set up the driver internal resources specific to
7912  * support the SLI-4 HBA device it attached to.
7913  *
7914  * Return codes
7915  *  0 - successful
7916  *  other values - error
7917  **/
7918 static int
7919 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7920 {
7921     LPFC_MBOXQ_t *mboxq;
7922     MAILBOX_t *mb;
7923     int rc, i, max_buf_size;
7924     int longs;
7925     int extra;
7926     uint64_t wwn;
7927     u32 if_type;
7928     u32 if_fam;
7929 
7930     phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7931     phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7932     phba->sli4_hba.curr_disp_cpu = 0;
7933 
7934     /* Get all the module params for configuring this host */
7935     lpfc_get_cfgparam(phba);
7936 
7937     /* Set up phase-1 common device driver resources */
7938     rc = lpfc_setup_driver_resource_phase1(phba);
7939     if (rc)
7940         return -ENODEV;
7941 
7942     /* Before proceed, wait for POST done and device ready */
7943     rc = lpfc_sli4_post_status_check(phba);
7944     if (rc)
7945         return -ENODEV;
7946 
7947     /* Allocate all driver workqueues here */
7948 
7949     /* The lpfc_wq workqueue for deferred irq use */
7950     phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7951     if (!phba->wq)
7952         return -ENOMEM;
7953 
7954     /*
7955      * Initialize timers used by driver
7956      */
7957 
7958     timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7959 
7960     /* FCF rediscover timer */
7961     timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7962 
7963     /* CMF congestion timer */
7964     hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7965     phba->cmf_timer.function = lpfc_cmf_timer;
7966 
7967     /*
7968      * Control structure for handling external multi-buffer mailbox
7969      * command pass-through.
7970      */
7971     memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7972         sizeof(struct lpfc_mbox_ext_buf_ctx));
7973     INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7974 
7975     phba->max_vpi = LPFC_MAX_VPI;
7976 
7977     /* This will be set to correct value after the read_config mbox */
7978     phba->max_vports = 0;
7979 
7980     /* Program the default value of vlan_id and fc_map */
7981     phba->valid_vlan = 0;
7982     phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7983     phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7984     phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7985 
7986     /*
7987      * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
7988      * we will associate a new ring, for each EQ/CQ/WQ tuple.
7989      * The WQ create will allocate the ring.
7990      */
7991 
7992     /* Initialize buffer queue management fields */
7993     INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
7994     phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
7995     phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
7996 
7997     /* for VMID idle timeout if VMID is enabled */
7998     if (lpfc_is_vmid_enabled(phba))
7999         timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
8000 
8001     /*
8002      * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
8003      */
8004     /* Initialize the Abort buffer list used by driver */
8005     spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
8006     INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
8007 
8008     if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8009         /* Initialize the Abort nvme buffer list used by driver */
8010         spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
8011         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8012         INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
8013         spin_lock_init(&phba->sli4_hba.t_active_list_lock);
8014         INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
8015     }
8016 
8017     /* This abort list used by worker thread */
8018     spin_lock_init(&phba->sli4_hba.sgl_list_lock);
8019     spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
8020     spin_lock_init(&phba->sli4_hba.asynce_list_lock);
8021     spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
8022 
8023     /*
8024      * Initialize driver internal slow-path work queues
8025      */
8026 
8027     /* Driver internel slow-path CQ Event pool */
8028     INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
8029     /* Response IOCB work queue list */
8030     INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
8031     /* Asynchronous event CQ Event work queue list */
8032     INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
8033     /* Slow-path XRI aborted CQ Event work queue list */
8034     INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
8035     /* Receive queue CQ Event work queue list */
8036     INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
8037 
8038     /* Initialize extent block lists. */
8039     INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
8040     INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
8041     INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
8042     INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
8043 
8044     /* Initialize mboxq lists. If the early init routines fail
8045      * these lists need to be correctly initialized.
8046      */
8047     INIT_LIST_HEAD(&phba->sli.mboxq);
8048     INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
8049 
8050     /* initialize optic_state to 0xFF */
8051     phba->sli4_hba.lnk_info.optic_state = 0xff;
8052 
8053     /* Allocate device driver memory */
8054     rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
8055     if (rc)
8056         goto out_destroy_workqueue;
8057 
8058     /* IF Type 2 ports get initialized now. */
8059     if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
8060         LPFC_SLI_INTF_IF_TYPE_2) {
8061         rc = lpfc_pci_function_reset(phba);
8062         if (unlikely(rc)) {
8063             rc = -ENODEV;
8064             goto out_free_mem;
8065         }
8066         phba->temp_sensor_support = 1;
8067     }
8068 
8069     /* Create the bootstrap mailbox command */
8070     rc = lpfc_create_bootstrap_mbox(phba);
8071     if (unlikely(rc))
8072         goto out_free_mem;
8073 
8074     /* Set up the host's endian order with the device. */
8075     rc = lpfc_setup_endian_order(phba);
8076     if (unlikely(rc))
8077         goto out_free_bsmbx;
8078 
8079     /* Set up the hba's configuration parameters. */
8080     rc = lpfc_sli4_read_config(phba);
8081     if (unlikely(rc))
8082         goto out_free_bsmbx;
8083 
8084     if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
8085         /* Right now the link is down, if FA-PWWN is configured the
8086          * firmware will try FLOGI before the driver gets a link up.
8087          * If it fails, the driver should get a MISCONFIGURED async
8088          * event which will clear this flag. The only notification
8089          * the driver gets is if it fails, if it succeeds there is no
8090          * notification given. Assume success.
8091          */
8092         phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
8093     }
8094 
8095     rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
8096     if (unlikely(rc))
8097         goto out_free_bsmbx;
8098 
8099     /* IF Type 0 ports get initialized now. */
8100     if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8101         LPFC_SLI_INTF_IF_TYPE_0) {
8102         rc = lpfc_pci_function_reset(phba);
8103         if (unlikely(rc))
8104             goto out_free_bsmbx;
8105     }
8106 
8107     mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8108                                GFP_KERNEL);
8109     if (!mboxq) {
8110         rc = -ENOMEM;
8111         goto out_free_bsmbx;
8112     }
8113 
8114     /* Check for NVMET being configured */
8115     phba->nvmet_support = 0;
8116     if (lpfc_enable_nvmet_cnt) {
8117 
8118         /* First get WWN of HBA instance */
8119         lpfc_read_nv(phba, mboxq);
8120         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8121         if (rc != MBX_SUCCESS) {
8122             lpfc_printf_log(phba, KERN_ERR,
8123                     LOG_TRACE_EVENT,
8124                     "6016 Mailbox failed , mbxCmd x%x "
8125                     "READ_NV, mbxStatus x%x\n",
8126                     bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8127                     bf_get(lpfc_mqe_status, &mboxq->u.mqe));
8128             mempool_free(mboxq, phba->mbox_mem_pool);
8129             rc = -EIO;
8130             goto out_free_bsmbx;
8131         }
8132         mb = &mboxq->u.mb;
8133         memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
8134                sizeof(uint64_t));
8135         wwn = cpu_to_be64(wwn);
8136         phba->sli4_hba.wwnn.u.name = wwn;
8137         memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
8138                sizeof(uint64_t));
8139         /* wwn is WWPN of HBA instance */
8140         wwn = cpu_to_be64(wwn);
8141         phba->sli4_hba.wwpn.u.name = wwn;
8142 
8143         /* Check to see if it matches any module parameter */
8144         for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
8145             if (wwn == lpfc_enable_nvmet[i]) {
8146 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
8147                 if (lpfc_nvmet_mem_alloc(phba))
8148                     break;
8149 
8150                 phba->nvmet_support = 1; /* a match */
8151 
8152                 lpfc_printf_log(phba, KERN_ERR,
8153                         LOG_TRACE_EVENT,
8154                         "6017 NVME Target %016llx\n",
8155                         wwn);
8156 #else
8157                 lpfc_printf_log(phba, KERN_ERR,
8158                         LOG_TRACE_EVENT,
8159                         "6021 Can't enable NVME Target."
8160                         " NVME_TARGET_FC infrastructure"
8161                         " is not in kernel\n");
8162 #endif
8163                 /* Not supported for NVMET */
8164                 phba->cfg_xri_rebalancing = 0;
8165                 if (phba->irq_chann_mode == NHT_MODE) {
8166                     phba->cfg_irq_chann =
8167                         phba->sli4_hba.num_present_cpu;
8168                     phba->cfg_hdw_queue =
8169                         phba->sli4_hba.num_present_cpu;
8170                     phba->irq_chann_mode = NORMAL_MODE;
8171                 }
8172                 break;
8173             }
8174         }
8175     }
8176 
8177     lpfc_nvme_mod_param_dep(phba);
8178 
8179     /*
8180      * Get sli4 parameters that override parameters from Port capabilities.
8181      * If this call fails, it isn't critical unless the SLI4 parameters come
8182      * back in conflict.
8183      */
8184     rc = lpfc_get_sli4_parameters(phba, mboxq);
8185     if (rc) {
8186         if_type = bf_get(lpfc_sli_intf_if_type,
8187                  &phba->sli4_hba.sli_intf);
8188         if_fam = bf_get(lpfc_sli_intf_sli_family,
8189                 &phba->sli4_hba.sli_intf);
8190         if (phba->sli4_hba.extents_in_use &&
8191             phba->sli4_hba.rpi_hdrs_in_use) {
8192             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8193                     "2999 Unsupported SLI4 Parameters "
8194                     "Extents and RPI headers enabled.\n");
8195             if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8196                 if_fam ==  LPFC_SLI_INTF_FAMILY_BE2) {
8197                 mempool_free(mboxq, phba->mbox_mem_pool);
8198                 rc = -EIO;
8199                 goto out_free_bsmbx;
8200             }
8201         }
8202         if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8203               if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
8204             mempool_free(mboxq, phba->mbox_mem_pool);
8205             rc = -EIO;
8206             goto out_free_bsmbx;
8207         }
8208     }
8209 
8210     /*
8211      * 1 for cmd, 1 for rsp, NVME adds an extra one
8212      * for boundary conditions in its max_sgl_segment template.
8213      */
8214     extra = 2;
8215     if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8216         extra++;
8217 
8218     /*
8219      * It doesn't matter what family our adapter is in, we are
8220      * limited to 2 Pages, 512 SGEs, for our SGL.
8221      * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
8222      */
8223     max_buf_size = (2 * SLI4_PAGE_SIZE);
8224 
8225     /*
8226      * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
8227      * used to create the sg_dma_buf_pool must be calculated.
8228      */
8229     if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8230         /* Both cfg_enable_bg and cfg_external_dif code paths */
8231 
8232         /*
8233          * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
8234          * the FCP rsp, and a SGE. Sice we have no control
8235          * over how many protection segments the SCSI Layer
8236          * will hand us (ie: there could be one for every block
8237          * in the IO), just allocate enough SGEs to accomidate
8238          * our max amount and we need to limit lpfc_sg_seg_cnt
8239          * to minimize the risk of running out.
8240          */
8241         phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8242                 sizeof(struct fcp_rsp) + max_buf_size;
8243 
8244         /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
8245         phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8246 
8247         /*
8248          * If supporting DIF, reduce the seg count for scsi to
8249          * allow room for the DIF sges.
8250          */
8251         if (phba->cfg_enable_bg &&
8252             phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8253             phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8254         else
8255             phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8256 
8257     } else {
8258         /*
8259          * The scsi_buf for a regular I/O holds the FCP cmnd,
8260          * the FCP rsp, a SGE for each, and a SGE for up to
8261          * cfg_sg_seg_cnt data segments.
8262          */
8263         phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8264                 sizeof(struct fcp_rsp) +
8265                 ((phba->cfg_sg_seg_cnt + extra) *
8266                 sizeof(struct sli4_sge));
8267 
8268         /* Total SGEs for scsi_sg_list */
8269         phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8270         phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8271 
8272         /*
8273          * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8274          * need to post 1 page for the SGL.
8275          */
8276     }
8277 
8278     if (phba->cfg_xpsgl && !phba->nvmet_support)
8279         phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8280     else if (phba->cfg_sg_dma_buf_size  <= LPFC_MIN_SG_SLI4_BUF_SZ)
8281         phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8282     else
8283         phba->cfg_sg_dma_buf_size =
8284                 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8285 
8286     phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8287                    sizeof(struct sli4_sge);
8288 
8289     /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
8290     if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8291         if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8292             lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8293                     "6300 Reducing NVME sg segment "
8294                     "cnt to %d\n",
8295                     LPFC_MAX_NVME_SEG_CNT);
8296             phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8297         } else
8298             phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8299     }
8300 
8301     lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8302             "9087 sg_seg_cnt:%d dmabuf_size:%d "
8303             "total:%d scsi:%d nvme:%d\n",
8304             phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8305             phba->cfg_total_seg_cnt,  phba->cfg_scsi_seg_cnt,
8306             phba->cfg_nvme_seg_cnt);
8307 
8308     if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8309         i = phba->cfg_sg_dma_buf_size;
8310     else
8311         i = SLI4_PAGE_SIZE;
8312 
8313     phba->lpfc_sg_dma_buf_pool =
8314             dma_pool_create("lpfc_sg_dma_buf_pool",
8315                     &phba->pcidev->dev,
8316                     phba->cfg_sg_dma_buf_size,
8317                     i, 0);
8318     if (!phba->lpfc_sg_dma_buf_pool)
8319         goto out_free_bsmbx;
8320 
8321     phba->lpfc_cmd_rsp_buf_pool =
8322             dma_pool_create("lpfc_cmd_rsp_buf_pool",
8323                     &phba->pcidev->dev,
8324                     sizeof(struct fcp_cmnd) +
8325                     sizeof(struct fcp_rsp),
8326                     i, 0);
8327     if (!phba->lpfc_cmd_rsp_buf_pool)
8328         goto out_free_sg_dma_buf;
8329 
8330     mempool_free(mboxq, phba->mbox_mem_pool);
8331 
8332     /* Verify OAS is supported */
8333     lpfc_sli4_oas_verify(phba);
8334 
8335     /* Verify RAS support on adapter */
8336     lpfc_sli4_ras_init(phba);
8337 
8338     /* Verify all the SLI4 queues */
8339     rc = lpfc_sli4_queue_verify(phba);
8340     if (rc)
8341         goto out_free_cmd_rsp_buf;
8342 
8343     /* Create driver internal CQE event pool */
8344     rc = lpfc_sli4_cq_event_pool_create(phba);
8345     if (rc)
8346         goto out_free_cmd_rsp_buf;
8347 
8348     /* Initialize sgl lists per host */
8349     lpfc_init_sgl_list(phba);
8350 
8351     /* Allocate and initialize active sgl array */
8352     rc = lpfc_init_active_sgl_array(phba);
8353     if (rc) {
8354         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8355                 "1430 Failed to initialize sgl list.\n");
8356         goto out_destroy_cq_event_pool;
8357     }
8358     rc = lpfc_sli4_init_rpi_hdrs(phba);
8359     if (rc) {
8360         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8361                 "1432 Failed to initialize rpi headers.\n");
8362         goto out_free_active_sgl;
8363     }
8364 
8365     /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
8366     longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8367     phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8368                      GFP_KERNEL);
8369     if (!phba->fcf.fcf_rr_bmask) {
8370         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8371                 "2759 Failed allocate memory for FCF round "
8372                 "robin failover bmask\n");
8373         rc = -ENOMEM;
8374         goto out_remove_rpi_hdrs;
8375     }
8376 
8377     phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8378                         sizeof(struct lpfc_hba_eq_hdl),
8379                         GFP_KERNEL);
8380     if (!phba->sli4_hba.hba_eq_hdl) {
8381         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8382                 "2572 Failed allocate memory for "
8383                 "fast-path per-EQ handle array\n");
8384         rc = -ENOMEM;
8385         goto out_free_fcf_rr_bmask;
8386     }
8387 
8388     phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8389                     sizeof(struct lpfc_vector_map_info),
8390                     GFP_KERNEL);
8391     if (!phba->sli4_hba.cpu_map) {
8392         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8393                 "3327 Failed allocate memory for msi-x "
8394                 "interrupt vector mapping\n");
8395         rc = -ENOMEM;
8396         goto out_free_hba_eq_hdl;
8397     }
8398 
8399     phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8400     if (!phba->sli4_hba.eq_info) {
8401         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8402                 "3321 Failed allocation for per_cpu stats\n");
8403         rc = -ENOMEM;
8404         goto out_free_hba_cpu_map;
8405     }
8406 
8407     phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8408                        sizeof(*phba->sli4_hba.idle_stat),
8409                        GFP_KERNEL);
8410     if (!phba->sli4_hba.idle_stat) {
8411         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8412                 "3390 Failed allocation for idle_stat\n");
8413         rc = -ENOMEM;
8414         goto out_free_hba_eq_info;
8415     }
8416 
8417 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8418     phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8419     if (!phba->sli4_hba.c_stat) {
8420         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8421                 "3332 Failed allocating per cpu hdwq stats\n");
8422         rc = -ENOMEM;
8423         goto out_free_hba_idle_stat;
8424     }
8425 #endif
8426 
8427     phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8428     if (!phba->cmf_stat) {
8429         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8430                 "3331 Failed allocating per cpu cgn stats\n");
8431         rc = -ENOMEM;
8432         goto out_free_hba_hdwq_info;
8433     }
8434 
8435     /*
8436      * Enable sr-iov virtual functions if supported and configured
8437      * through the module parameter.
8438      */
8439     if (phba->cfg_sriov_nr_virtfn > 0) {
8440         rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8441                          phba->cfg_sriov_nr_virtfn);
8442         if (rc) {
8443             lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8444                     "3020 Requested number of SR-IOV "
8445                     "virtual functions (%d) is not "
8446                     "supported\n",
8447                     phba->cfg_sriov_nr_virtfn);
8448             phba->cfg_sriov_nr_virtfn = 0;
8449         }
8450     }
8451 
8452     return 0;
8453 
8454 out_free_hba_hdwq_info:
8455 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8456     free_percpu(phba->sli4_hba.c_stat);
8457 out_free_hba_idle_stat:
8458 #endif
8459     kfree(phba->sli4_hba.idle_stat);
8460 out_free_hba_eq_info:
8461     free_percpu(phba->sli4_hba.eq_info);
8462 out_free_hba_cpu_map:
8463     kfree(phba->sli4_hba.cpu_map);
8464 out_free_hba_eq_hdl:
8465     kfree(phba->sli4_hba.hba_eq_hdl);
8466 out_free_fcf_rr_bmask:
8467     kfree(phba->fcf.fcf_rr_bmask);
8468 out_remove_rpi_hdrs:
8469     lpfc_sli4_remove_rpi_hdrs(phba);
8470 out_free_active_sgl:
8471     lpfc_free_active_sgl(phba);
8472 out_destroy_cq_event_pool:
8473     lpfc_sli4_cq_event_pool_destroy(phba);
8474 out_free_cmd_rsp_buf:
8475     dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8476     phba->lpfc_cmd_rsp_buf_pool = NULL;
8477 out_free_sg_dma_buf:
8478     dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8479     phba->lpfc_sg_dma_buf_pool = NULL;
8480 out_free_bsmbx:
8481     lpfc_destroy_bootstrap_mbox(phba);
8482 out_free_mem:
8483     lpfc_mem_free(phba);
8484 out_destroy_workqueue:
8485     destroy_workqueue(phba->wq);
8486     phba->wq = NULL;
8487     return rc;
8488 }
8489 
8490 /**
8491  * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
8492  * @phba: pointer to lpfc hba data structure.
8493  *
8494  * This routine is invoked to unset the driver internal resources set up
8495  * specific for supporting the SLI-4 HBA device it attached to.
8496  **/
8497 static void
8498 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8499 {
8500     struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8501 
8502     free_percpu(phba->sli4_hba.eq_info);
8503 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8504     free_percpu(phba->sli4_hba.c_stat);
8505 #endif
8506     free_percpu(phba->cmf_stat);
8507     kfree(phba->sli4_hba.idle_stat);
8508 
8509     /* Free memory allocated for msi-x interrupt vector to CPU mapping */
8510     kfree(phba->sli4_hba.cpu_map);
8511     phba->sli4_hba.num_possible_cpu = 0;
8512     phba->sli4_hba.num_present_cpu = 0;
8513     phba->sli4_hba.curr_disp_cpu = 0;
8514     cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8515 
8516     /* Free memory allocated for fast-path work queue handles */
8517     kfree(phba->sli4_hba.hba_eq_hdl);
8518 
8519     /* Free the allocated rpi headers. */
8520     lpfc_sli4_remove_rpi_hdrs(phba);
8521     lpfc_sli4_remove_rpis(phba);
8522 
8523     /* Free eligible FCF index bmask */
8524     kfree(phba->fcf.fcf_rr_bmask);
8525 
8526     /* Free the ELS sgl list */
8527     lpfc_free_active_sgl(phba);
8528     lpfc_free_els_sgl_list(phba);
8529     lpfc_free_nvmet_sgl_list(phba);
8530 
8531     /* Free the completion queue EQ event pool */
8532     lpfc_sli4_cq_event_release_all(phba);
8533     lpfc_sli4_cq_event_pool_destroy(phba);
8534 
8535     /* Release resource identifiers. */
8536     lpfc_sli4_dealloc_resource_identifiers(phba);
8537 
8538     /* Free the bsmbx region. */
8539     lpfc_destroy_bootstrap_mbox(phba);
8540 
8541     /* Free the SLI Layer memory with SLI4 HBAs */
8542     lpfc_mem_free_all(phba);
8543 
8544     /* Free the current connect table */
8545     list_for_each_entry_safe(conn_entry, next_conn_entry,
8546         &phba->fcf_conn_rec_list, list) {
8547         list_del_init(&conn_entry->list);
8548         kfree(conn_entry);
8549     }
8550 
8551     return;
8552 }
8553 
8554 /**
8555  * lpfc_init_api_table_setup - Set up init api function jump table
8556  * @phba: The hba struct for which this call is being executed.
8557  * @dev_grp: The HBA PCI-Device group number.
8558  *
8559  * This routine sets up the device INIT interface API function jump table
8560  * in @phba struct.
8561  *
8562  * Returns: 0 - success, -ENODEV - failure.
8563  **/
8564 int
8565 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8566 {
8567     phba->lpfc_hba_init_link = lpfc_hba_init_link;
8568     phba->lpfc_hba_down_link = lpfc_hba_down_link;
8569     phba->lpfc_selective_reset = lpfc_selective_reset;
8570     switch (dev_grp) {
8571     case LPFC_PCI_DEV_LP:
8572         phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8573         phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8574         phba->lpfc_stop_port = lpfc_stop_port_s3;
8575         break;
8576     case LPFC_PCI_DEV_OC:
8577         phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8578         phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8579         phba->lpfc_stop_port = lpfc_stop_port_s4;
8580         break;
8581     default:
8582         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8583                 "1431 Invalid HBA PCI-device group: 0x%x\n",
8584                 dev_grp);
8585         return -ENODEV;
8586     }
8587     return 0;
8588 }
8589 
8590 /**
8591  * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
8592  * @phba: pointer to lpfc hba data structure.
8593  *
8594  * This routine is invoked to set up the driver internal resources after the
8595  * device specific resource setup to support the HBA device it attached to.
8596  *
8597  * Return codes
8598  *  0 - successful
8599  *  other values - error
8600  **/
8601 static int
8602 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8603 {
8604     int error;
8605 
8606     /* Startup the kernel thread for this host adapter. */
8607     phba->worker_thread = kthread_run(lpfc_do_work, phba,
8608                       "lpfc_worker_%d", phba->brd_no);
8609     if (IS_ERR(phba->worker_thread)) {
8610         error = PTR_ERR(phba->worker_thread);
8611         return error;
8612     }
8613 
8614     return 0;
8615 }
8616 
8617 /**
8618  * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
8619  * @phba: pointer to lpfc hba data structure.
8620  *
8621  * This routine is invoked to unset the driver internal resources set up after
8622  * the device specific resource setup for supporting the HBA device it
8623  * attached to.
8624  **/
8625 static void
8626 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8627 {
8628     if (phba->wq) {
8629         destroy_workqueue(phba->wq);
8630         phba->wq = NULL;
8631     }
8632 
8633     /* Stop kernel worker thread */
8634     if (phba->worker_thread)
8635         kthread_stop(phba->worker_thread);
8636 }
8637 
8638 /**
8639  * lpfc_free_iocb_list - Free iocb list.
8640  * @phba: pointer to lpfc hba data structure.
8641  *
8642  * This routine is invoked to free the driver's IOCB list and memory.
8643  **/
8644 void
8645 lpfc_free_iocb_list(struct lpfc_hba *phba)
8646 {
8647     struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8648 
8649     spin_lock_irq(&phba->hbalock);
8650     list_for_each_entry_safe(iocbq_entry, iocbq_next,
8651                  &phba->lpfc_iocb_list, list) {
8652         list_del(&iocbq_entry->list);
8653         kfree(iocbq_entry);
8654         phba->total_iocbq_bufs--;
8655     }
8656     spin_unlock_irq(&phba->hbalock);
8657 
8658     return;
8659 }
8660 
8661 /**
8662  * lpfc_init_iocb_list - Allocate and initialize iocb list.
8663  * @phba: pointer to lpfc hba data structure.
8664  * @iocb_count: number of requested iocbs
8665  *
8666  * This routine is invoked to allocate and initizlize the driver's IOCB
8667  * list and set up the IOCB tag array accordingly.
8668  *
8669  * Return codes
8670  *  0 - successful
8671  *  other values - error
8672  **/
8673 int
8674 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8675 {
8676     struct lpfc_iocbq *iocbq_entry = NULL;
8677     uint16_t iotag;
8678     int i;
8679 
8680     /* Initialize and populate the iocb list per host.  */
8681     INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8682     for (i = 0; i < iocb_count; i++) {
8683         iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
8684         if (iocbq_entry == NULL) {
8685             printk(KERN_ERR "%s: only allocated %d iocbs of "
8686                 "expected %d count. Unloading driver.\n",
8687                 __func__, i, iocb_count);
8688             goto out_free_iocbq;
8689         }
8690 
8691         iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8692         if (iotag == 0) {
8693             kfree(iocbq_entry);
8694             printk(KERN_ERR "%s: failed to allocate IOTAG. "
8695                 "Unloading driver.\n", __func__);
8696             goto out_free_iocbq;
8697         }
8698         iocbq_entry->sli4_lxritag = NO_XRI;
8699         iocbq_entry->sli4_xritag = NO_XRI;
8700 
8701         spin_lock_irq(&phba->hbalock);
8702         list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8703         phba->total_iocbq_bufs++;
8704         spin_unlock_irq(&phba->hbalock);
8705     }
8706 
8707     return 0;
8708 
8709 out_free_iocbq:
8710     lpfc_free_iocb_list(phba);
8711 
8712     return -ENOMEM;
8713 }
8714 
8715 /**
8716  * lpfc_free_sgl_list - Free a given sgl list.
8717  * @phba: pointer to lpfc hba data structure.
8718  * @sglq_list: pointer to the head of sgl list.
8719  *
8720  * This routine is invoked to free a give sgl list and memory.
8721  **/
8722 void
8723 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8724 {
8725     struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8726 
8727     list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8728         list_del(&sglq_entry->list);
8729         lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8730         kfree(sglq_entry);
8731     }
8732 }
8733 
8734 /**
8735  * lpfc_free_els_sgl_list - Free els sgl list.
8736  * @phba: pointer to lpfc hba data structure.
8737  *
8738  * This routine is invoked to free the driver's els sgl list and memory.
8739  **/
8740 static void
8741 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8742 {
8743     LIST_HEAD(sglq_list);
8744 
8745     /* Retrieve all els sgls from driver list */
8746     spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8747     list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8748     spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8749 
8750     /* Now free the sgl list */
8751     lpfc_free_sgl_list(phba, &sglq_list);
8752 }
8753 
8754 /**
8755  * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
8756  * @phba: pointer to lpfc hba data structure.
8757  *
8758  * This routine is invoked to free the driver's nvmet sgl list and memory.
8759  **/
8760 static void
8761 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8762 {
8763     struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8764     LIST_HEAD(sglq_list);
8765 
8766     /* Retrieve all nvmet sgls from driver list */
8767     spin_lock_irq(&phba->hbalock);
8768     spin_lock(&phba->sli4_hba.sgl_list_lock);
8769     list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8770     spin_unlock(&phba->sli4_hba.sgl_list_lock);
8771     spin_unlock_irq(&phba->hbalock);
8772 
8773     /* Now free the sgl list */
8774     list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8775         list_del(&sglq_entry->list);
8776         lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8777         kfree(sglq_entry);
8778     }
8779 
8780     /* Update the nvmet_xri_cnt to reflect no current sgls.
8781      * The next initialization cycle sets the count and allocates
8782      * the sgls over again.
8783      */
8784     phba->sli4_hba.nvmet_xri_cnt = 0;
8785 }
8786 
8787 /**
8788  * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
8789  * @phba: pointer to lpfc hba data structure.
8790  *
8791  * This routine is invoked to allocate the driver's active sgl memory.
8792  * This array will hold the sglq_entry's for active IOs.
8793  **/
8794 static int
8795 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8796 {
8797     int size;
8798     size = sizeof(struct lpfc_sglq *);
8799     size *= phba->sli4_hba.max_cfg_param.max_xri;
8800 
8801     phba->sli4_hba.lpfc_sglq_active_list =
8802         kzalloc(size, GFP_KERNEL);
8803     if (!phba->sli4_hba.lpfc_sglq_active_list)
8804         return -ENOMEM;
8805     return 0;
8806 }
8807 
8808 /**
8809  * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
8810  * @phba: pointer to lpfc hba data structure.
8811  *
8812  * This routine is invoked to walk through the array of active sglq entries
8813  * and free all of the resources.
8814  * This is just a place holder for now.
8815  **/
8816 static void
8817 lpfc_free_active_sgl(struct lpfc_hba *phba)
8818 {
8819     kfree(phba->sli4_hba.lpfc_sglq_active_list);
8820 }
8821 
8822 /**
8823  * lpfc_init_sgl_list - Allocate and initialize sgl list.
8824  * @phba: pointer to lpfc hba data structure.
8825  *
8826  * This routine is invoked to allocate and initizlize the driver's sgl
8827  * list and set up the sgl xritag tag array accordingly.
8828  *
8829  **/
8830 static void
8831 lpfc_init_sgl_list(struct lpfc_hba *phba)
8832 {
8833     /* Initialize and populate the sglq list per host/VF. */
8834     INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8835     INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8836     INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8837     INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8838 
8839     /* els xri-sgl book keeping */
8840     phba->sli4_hba.els_xri_cnt = 0;
8841 
8842     /* nvme xri-buffer book keeping */
8843     phba->sli4_hba.io_xri_cnt = 0;
8844 }
8845 
8846 /**
8847  * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
8848  * @phba: pointer to lpfc hba data structure.
8849  *
8850  * This routine is invoked to post rpi header templates to the
8851  * port for those SLI4 ports that do not support extents.  This routine
8852  * posts a PAGE_SIZE memory region to the port to hold up to
8853  * PAGE_SIZE modulo 64 rpi context headers.  This is an initialization routine
8854  * and should be called only when interrupts are disabled.
8855  *
8856  * Return codes
8857  *  0 - successful
8858  *  -ERROR - otherwise.
8859  **/
8860 int
8861 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8862 {
8863     int rc = 0;
8864     struct lpfc_rpi_hdr *rpi_hdr;
8865 
8866     INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8867     if (!phba->sli4_hba.rpi_hdrs_in_use)
8868         return rc;
8869     if (phba->sli4_hba.extents_in_use)
8870         return -EIO;
8871 
8872     rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8873     if (!rpi_hdr) {
8874         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8875                 "0391 Error during rpi post operation\n");
8876         lpfc_sli4_remove_rpis(phba);
8877         rc = -ENODEV;
8878     }
8879 
8880     return rc;
8881 }
8882 
8883 /**
8884  * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
8885  * @phba: pointer to lpfc hba data structure.
8886  *
8887  * This routine is invoked to allocate a single 4KB memory region to
8888  * support rpis and stores them in the phba.  This single region
8889  * provides support for up to 64 rpis.  The region is used globally
8890  * by the device.
8891  *
8892  * Returns:
8893  *   A valid rpi hdr on success.
8894  *   A NULL pointer on any failure.
8895  **/
8896 struct lpfc_rpi_hdr *
8897 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8898 {
8899     uint16_t rpi_limit, curr_rpi_range;
8900     struct lpfc_dmabuf *dmabuf;
8901     struct lpfc_rpi_hdr *rpi_hdr;
8902 
8903     /*
8904      * If the SLI4 port supports extents, posting the rpi header isn't
8905      * required.  Set the expected maximum count and let the actual value
8906      * get set when extents are fully allocated.
8907      */
8908     if (!phba->sli4_hba.rpi_hdrs_in_use)
8909         return NULL;
8910     if (phba->sli4_hba.extents_in_use)
8911         return NULL;
8912 
8913     /* The limit on the logical index is just the max_rpi count. */
8914     rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8915 
8916     spin_lock_irq(&phba->hbalock);
8917     /*
8918      * Establish the starting RPI in this header block.  The starting
8919      * rpi is normalized to a zero base because the physical rpi is
8920      * port based.
8921      */
8922     curr_rpi_range = phba->sli4_hba.next_rpi;
8923     spin_unlock_irq(&phba->hbalock);
8924 
8925     /* Reached full RPI range */
8926     if (curr_rpi_range == rpi_limit)
8927         return NULL;
8928 
8929     /*
8930      * First allocate the protocol header region for the port.  The
8931      * port expects a 4KB DMA-mapped memory region that is 4K aligned.
8932      */
8933     dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8934     if (!dmabuf)
8935         return NULL;
8936 
8937     dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8938                       LPFC_HDR_TEMPLATE_SIZE,
8939                       &dmabuf->phys, GFP_KERNEL);
8940     if (!dmabuf->virt) {
8941         rpi_hdr = NULL;
8942         goto err_free_dmabuf;
8943     }
8944 
8945     if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8946         rpi_hdr = NULL;
8947         goto err_free_coherent;
8948     }
8949 
8950     /* Save the rpi header data for cleanup later. */
8951     rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
8952     if (!rpi_hdr)
8953         goto err_free_coherent;
8954 
8955     rpi_hdr->dmabuf = dmabuf;
8956     rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8957     rpi_hdr->page_count = 1;
8958     spin_lock_irq(&phba->hbalock);
8959 
8960     /* The rpi_hdr stores the logical index only. */
8961     rpi_hdr->start_rpi = curr_rpi_range;
8962     rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8963     list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8964 
8965     spin_unlock_irq(&phba->hbalock);
8966     return rpi_hdr;
8967 
8968  err_free_coherent:
8969     dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8970               dmabuf->virt, dmabuf->phys);
8971  err_free_dmabuf:
8972     kfree(dmabuf);
8973     return NULL;
8974 }
8975 
8976 /**
8977  * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
8978  * @phba: pointer to lpfc hba data structure.
8979  *
8980  * This routine is invoked to remove all memory resources allocated
8981  * to support rpis for SLI4 ports not supporting extents. This routine
8982  * presumes the caller has released all rpis consumed by fabric or port
8983  * logins and is prepared to have the header pages removed.
8984  **/
8985 void
8986 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
8987 {
8988     struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
8989 
8990     if (!phba->sli4_hba.rpi_hdrs_in_use)
8991         goto exit;
8992 
8993     list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
8994                  &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
8995         list_del(&rpi_hdr->list);
8996         dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
8997                   rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
8998         kfree(rpi_hdr->dmabuf);
8999         kfree(rpi_hdr);
9000     }
9001  exit:
9002     /* There are no rpis available to the port now. */
9003     phba->sli4_hba.next_rpi = 0;
9004 }
9005 
9006 /**
9007  * lpfc_hba_alloc - Allocate driver hba data structure for a device.
9008  * @pdev: pointer to pci device data structure.
9009  *
9010  * This routine is invoked to allocate the driver hba data structure for an
9011  * HBA device. If the allocation is successful, the phba reference to the
9012  * PCI device data structure is set.
9013  *
9014  * Return codes
9015  *      pointer to @phba - successful
9016  *      NULL - error
9017  **/
9018 static struct lpfc_hba *
9019 lpfc_hba_alloc(struct pci_dev *pdev)
9020 {
9021     struct lpfc_hba *phba;
9022 
9023     /* Allocate memory for HBA structure */
9024     phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
9025     if (!phba) {
9026         dev_err(&pdev->dev, "failed to allocate hba struct\n");
9027         return NULL;
9028     }
9029 
9030     /* Set reference to PCI device in HBA structure */
9031     phba->pcidev = pdev;
9032 
9033     /* Assign an unused board number */
9034     phba->brd_no = lpfc_get_instance();
9035     if (phba->brd_no < 0) {
9036         kfree(phba);
9037         return NULL;
9038     }
9039     phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
9040 
9041     spin_lock_init(&phba->ct_ev_lock);
9042     INIT_LIST_HEAD(&phba->ct_ev_waiters);
9043 
9044     return phba;
9045 }
9046 
9047 /**
9048  * lpfc_hba_free - Free driver hba data structure with a device.
9049  * @phba: pointer to lpfc hba data structure.
9050  *
9051  * This routine is invoked to free the driver hba data structure with an
9052  * HBA device.
9053  **/
9054 static void
9055 lpfc_hba_free(struct lpfc_hba *phba)
9056 {
9057     if (phba->sli_rev == LPFC_SLI_REV4)
9058         kfree(phba->sli4_hba.hdwq);
9059 
9060     /* Release the driver assigned board number */
9061     idr_remove(&lpfc_hba_index, phba->brd_no);
9062 
9063     /* Free memory allocated with sli3 rings */
9064     kfree(phba->sli.sli3_ring);
9065     phba->sli.sli3_ring = NULL;
9066 
9067     kfree(phba);
9068     return;
9069 }
9070 
9071 /**
9072  * lpfc_setup_fdmi_mask - Setup initial FDMI mask for HBA and Port attributes
9073  * @vport: pointer to lpfc vport data structure.
9074  *
9075  * This routine is will setup initial FDMI attribute masks for
9076  * FDMI2 or SmartSAN depending on module parameters. The driver will attempt
9077  * to get these attributes first before falling back, the attribute
9078  * fallback hierarchy is SmartSAN -> FDMI2 -> FMDI1
9079  **/
9080 void
9081 lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
9082 {
9083     struct lpfc_hba *phba = vport->phba;
9084 
9085     vport->load_flag |= FC_ALLOW_FDMI;
9086     if (phba->cfg_enable_SmartSAN ||
9087         phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
9088         /* Setup appropriate attribute masks */
9089         vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
9090         if (phba->cfg_enable_SmartSAN)
9091             vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
9092         else
9093             vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
9094     }
9095 
9096     lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
9097             "6077 Setup FDMI mask: hba x%x port x%x\n",
9098             vport->fdmi_hba_mask, vport->fdmi_port_mask);
9099 }
9100 
9101 /**
9102  * lpfc_create_shost - Create hba physical port with associated scsi host.
9103  * @phba: pointer to lpfc hba data structure.
9104  *
9105  * This routine is invoked to create HBA physical port and associate a SCSI
9106  * host with it.
9107  *
9108  * Return codes
9109  *      0 - successful
9110  *      other values - error
9111  **/
9112 static int
9113 lpfc_create_shost(struct lpfc_hba *phba)
9114 {
9115     struct lpfc_vport *vport;
9116     struct Scsi_Host  *shost;
9117 
9118     /* Initialize HBA FC structure */
9119     phba->fc_edtov = FF_DEF_EDTOV;
9120     phba->fc_ratov = FF_DEF_RATOV;
9121     phba->fc_altov = FF_DEF_ALTOV;
9122     phba->fc_arbtov = FF_DEF_ARBTOV;
9123 
9124     atomic_set(&phba->sdev_cnt, 0);
9125     vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
9126     if (!vport)
9127         return -ENODEV;
9128 
9129     shost = lpfc_shost_from_vport(vport);
9130     phba->pport = vport;
9131 
9132     if (phba->nvmet_support) {
9133         /* Only 1 vport (pport) will support NVME target */
9134         phba->targetport = NULL;
9135         phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
9136         lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
9137                 "6076 NVME Target Found\n");
9138     }
9139 
9140     lpfc_debugfs_initialize(vport);
9141     /* Put reference to SCSI host to driver's device private data */
9142     pci_set_drvdata(phba->pcidev, shost);
9143 
9144     lpfc_setup_fdmi_mask(vport);
9145 
9146     /*
9147      * At this point we are fully registered with PSA. In addition,
9148      * any initial discovery should be completed.
9149      */
9150     return 0;
9151 }
9152 
9153 /**
9154  * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
9155  * @phba: pointer to lpfc hba data structure.
9156  *
9157  * This routine is invoked to destroy HBA physical port and the associated
9158  * SCSI host.
9159  **/
9160 static void
9161 lpfc_destroy_shost(struct lpfc_hba *phba)
9162 {
9163     struct lpfc_vport *vport = phba->pport;
9164 
9165     /* Destroy physical port that associated with the SCSI host */
9166     destroy_port(vport);
9167 
9168     return;
9169 }
9170 
9171 /**
9172  * lpfc_setup_bg - Setup Block guard structures and debug areas.
9173  * @phba: pointer to lpfc hba data structure.
9174  * @shost: the shost to be used to detect Block guard settings.
9175  *
9176  * This routine sets up the local Block guard protocol settings for @shost.
9177  * This routine also allocates memory for debugging bg buffers.
9178  **/
9179 static void
9180 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
9181 {
9182     uint32_t old_mask;
9183     uint32_t old_guard;
9184 
9185     if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9186         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9187                 "1478 Registering BlockGuard with the "
9188                 "SCSI layer\n");
9189 
9190         old_mask = phba->cfg_prot_mask;
9191         old_guard = phba->cfg_prot_guard;
9192 
9193         /* Only allow supported values */
9194         phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
9195             SHOST_DIX_TYPE0_PROTECTION |
9196             SHOST_DIX_TYPE1_PROTECTION);
9197         phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9198                      SHOST_DIX_GUARD_CRC);
9199 
9200         /* DIF Type 1 protection for profiles AST1/C1 is end to end */
9201         if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9202             phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9203 
9204         if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9205             if ((old_mask != phba->cfg_prot_mask) ||
9206                 (old_guard != phba->cfg_prot_guard))
9207                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9208                     "1475 Registering BlockGuard with the "
9209                     "SCSI layer: mask %d  guard %d\n",
9210                     phba->cfg_prot_mask,
9211                     phba->cfg_prot_guard);
9212 
9213             scsi_host_set_prot(shost, phba->cfg_prot_mask);
9214             scsi_host_set_guard(shost, phba->cfg_prot_guard);
9215         } else
9216             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9217                 "1479 Not Registering BlockGuard with the SCSI "
9218                 "layer, Bad protection parameters: %d %d\n",
9219                 old_mask, old_guard);
9220     }
9221 }
9222 
9223 /**
9224  * lpfc_post_init_setup - Perform necessary device post initialization setup.
9225  * @phba: pointer to lpfc hba data structure.
9226  *
9227  * This routine is invoked to perform all the necessary post initialization
9228  * setup for the device.
9229  **/
9230 static void
9231 lpfc_post_init_setup(struct lpfc_hba *phba)
9232 {
9233     struct Scsi_Host  *shost;
9234     struct lpfc_adapter_event_header adapter_event;
9235 
9236     /* Get the default values for Model Name and Description */
9237     lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9238 
9239     /*
9240      * hba setup may have changed the hba_queue_depth so we need to
9241      * adjust the value of can_queue.
9242      */
9243     shost = pci_get_drvdata(phba->pcidev);
9244     shost->can_queue = phba->cfg_hba_queue_depth - 10;
9245 
9246     lpfc_host_attrib_init(shost);
9247 
9248     if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9249         spin_lock_irq(shost->host_lock);
9250         lpfc_poll_start_timer(phba);
9251         spin_unlock_irq(shost->host_lock);
9252     }
9253 
9254     lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9255             "0428 Perform SCSI scan\n");
9256     /* Send board arrival event to upper layer */
9257     adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9258     adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9259     fc_host_post_vendor_event(shost, fc_get_event_number(),
9260                   sizeof(adapter_event),
9261                   (char *) &adapter_event,
9262                   LPFC_NL_VENDOR_ID);
9263     return;
9264 }
9265 
9266 /**
9267  * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
9268  * @phba: pointer to lpfc hba data structure.
9269  *
9270  * This routine is invoked to set up the PCI device memory space for device
9271  * with SLI-3 interface spec.
9272  *
9273  * Return codes
9274  *  0 - successful
9275  *  other values - error
9276  **/
9277 static int
9278 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9279 {
9280     struct pci_dev *pdev = phba->pcidev;
9281     unsigned long bar0map_len, bar2map_len;
9282     int i, hbq_count;
9283     void *ptr;
9284     int error;
9285 
9286     if (!pdev)
9287         return -ENODEV;
9288 
9289     /* Set the device DMA mask size */
9290     error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9291     if (error)
9292         error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9293     if (error)
9294         return error;
9295     error = -ENODEV;
9296 
9297     /* Get the bus address of Bar0 and Bar2 and the number of bytes
9298      * required by each mapping.
9299      */
9300     phba->pci_bar0_map = pci_resource_start(pdev, 0);
9301     bar0map_len = pci_resource_len(pdev, 0);
9302 
9303     phba->pci_bar2_map = pci_resource_start(pdev, 2);
9304     bar2map_len = pci_resource_len(pdev, 2);
9305 
9306     /* Map HBA SLIM to a kernel virtual address. */
9307     phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9308     if (!phba->slim_memmap_p) {
9309         dev_printk(KERN_ERR, &pdev->dev,
9310                "ioremap failed for SLIM memory.\n");
9311         goto out;
9312     }
9313 
9314     /* Map HBA Control Registers to a kernel virtual address. */
9315     phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9316     if (!phba->ctrl_regs_memmap_p) {
9317         dev_printk(KERN_ERR, &pdev->dev,
9318                "ioremap failed for HBA control registers.\n");
9319         goto out_iounmap_slim;
9320     }
9321 
9322     /* Allocate memory for SLI-2 structures */
9323     phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9324                            &phba->slim2p.phys, GFP_KERNEL);
9325     if (!phba->slim2p.virt)
9326         goto out_iounmap;
9327 
9328     phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9329     phba->mbox_ext = (phba->slim2p.virt +
9330         offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9331     phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9332     phba->IOCBs = (phba->slim2p.virt +
9333                offsetof(struct lpfc_sli2_slim, IOCBs));
9334 
9335     phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9336                          lpfc_sli_hbq_size(),
9337                          &phba->hbqslimp.phys,
9338                          GFP_KERNEL);
9339     if (!phba->hbqslimp.virt)
9340         goto out_free_slim;
9341 
9342     hbq_count = lpfc_sli_hbq_count();
9343     ptr = phba->hbqslimp.virt;
9344     for (i = 0; i < hbq_count; ++i) {
9345         phba->hbqs[i].hbq_virt = ptr;
9346         INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9347         ptr += (lpfc_hbq_defs[i]->entry_count *
9348             sizeof(struct lpfc_hbq_entry));
9349     }
9350     phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9351     phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9352 
9353     memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9354 
9355     phba->MBslimaddr = phba->slim_memmap_p;
9356     phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9357     phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9358     phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9359     phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9360 
9361     return 0;
9362 
9363 out_free_slim:
9364     dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9365               phba->slim2p.virt, phba->slim2p.phys);
9366 out_iounmap:
9367     iounmap(phba->ctrl_regs_memmap_p);
9368 out_iounmap_slim:
9369     iounmap(phba->slim_memmap_p);
9370 out:
9371     return error;
9372 }
9373 
9374 /**
9375  * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
9376  * @phba: pointer to lpfc hba data structure.
9377  *
9378  * This routine is invoked to unset the PCI device memory space for device
9379  * with SLI-3 interface spec.
9380  **/
9381 static void
9382 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9383 {
9384     struct pci_dev *pdev;
9385 
9386     /* Obtain PCI device reference */
9387     if (!phba->pcidev)
9388         return;
9389     else
9390         pdev = phba->pcidev;
9391 
9392     /* Free coherent DMA memory allocated */
9393     dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9394               phba->hbqslimp.virt, phba->hbqslimp.phys);
9395     dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9396               phba->slim2p.virt, phba->slim2p.phys);
9397 
9398     /* I/O memory unmap */
9399     iounmap(phba->ctrl_regs_memmap_p);
9400     iounmap(phba->slim_memmap_p);
9401 
9402     return;
9403 }
9404 
9405 /**
9406  * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
9407  * @phba: pointer to lpfc hba data structure.
9408  *
9409  * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
9410  * done and check status.
9411  *
9412  * Return 0 if successful, otherwise -ENODEV.
9413  **/
9414 int
9415 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9416 {
9417     struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9418     struct lpfc_register reg_data;
9419     int i, port_error = 0;
9420     uint32_t if_type;
9421 
9422     memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9423     memset(&reg_data, 0, sizeof(reg_data));
9424     if (!phba->sli4_hba.PSMPHRregaddr)
9425         return -ENODEV;
9426 
9427     /* Wait up to 30 seconds for the SLI Port POST done and ready */
9428     for (i = 0; i < 3000; i++) {
9429         if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9430             &portsmphr_reg.word0) ||
9431             (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9432             /* Port has a fatal POST error, break out */
9433             port_error = -ENODEV;
9434             break;
9435         }
9436         if (LPFC_POST_STAGE_PORT_READY ==
9437             bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9438             break;
9439         msleep(10);
9440     }
9441 
9442     /*
9443      * If there was a port error during POST, then don't proceed with
9444      * other register reads as the data may not be valid.  Just exit.
9445      */
9446     if (port_error) {
9447         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9448             "1408 Port Failed POST - portsmphr=0x%x, "
9449             "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9450             "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9451             portsmphr_reg.word0,
9452             bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9453             bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9454             bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9455             bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9456             bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9457             bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9458             bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9459             bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9460     } else {
9461         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9462                 "2534 Device Info: SLIFamily=0x%x, "
9463                 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9464                 "SLIHint_2=0x%x, FT=0x%x\n",
9465                 bf_get(lpfc_sli_intf_sli_family,
9466                        &phba->sli4_hba.sli_intf),
9467                 bf_get(lpfc_sli_intf_slirev,
9468                        &phba->sli4_hba.sli_intf),
9469                 bf_get(lpfc_sli_intf_if_type,
9470                        &phba->sli4_hba.sli_intf),
9471                 bf_get(lpfc_sli_intf_sli_hint1,
9472                        &phba->sli4_hba.sli_intf),
9473                 bf_get(lpfc_sli_intf_sli_hint2,
9474                        &phba->sli4_hba.sli_intf),
9475                 bf_get(lpfc_sli_intf_func_type,
9476                        &phba->sli4_hba.sli_intf));
9477         /*
9478          * Check for other Port errors during the initialization
9479          * process.  Fail the load if the port did not come up
9480          * correctly.
9481          */
9482         if_type = bf_get(lpfc_sli_intf_if_type,
9483                  &phba->sli4_hba.sli_intf);
9484         switch (if_type) {
9485         case LPFC_SLI_INTF_IF_TYPE_0:
9486             phba->sli4_hba.ue_mask_lo =
9487                   readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9488             phba->sli4_hba.ue_mask_hi =
9489                   readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9490             uerrlo_reg.word0 =
9491                   readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9492             uerrhi_reg.word0 =
9493                 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9494             if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9495                 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9496                 lpfc_printf_log(phba, KERN_ERR,
9497                         LOG_TRACE_EVENT,
9498                         "1422 Unrecoverable Error "
9499                         "Detected during POST "
9500                         "uerr_lo_reg=0x%x, "
9501                         "uerr_hi_reg=0x%x, "
9502                         "ue_mask_lo_reg=0x%x, "
9503                         "ue_mask_hi_reg=0x%x\n",
9504                         uerrlo_reg.word0,
9505                         uerrhi_reg.word0,
9506                         phba->sli4_hba.ue_mask_lo,
9507                         phba->sli4_hba.ue_mask_hi);
9508                 port_error = -ENODEV;
9509             }
9510             break;
9511         case LPFC_SLI_INTF_IF_TYPE_2:
9512         case LPFC_SLI_INTF_IF_TYPE_6:
9513             /* Final checks.  The port status should be clean. */
9514             if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9515                 &reg_data.word0) ||
9516                 (bf_get(lpfc_sliport_status_err, &reg_data) &&
9517                  !bf_get(lpfc_sliport_status_rn, &reg_data))) {
9518                 phba->work_status[0] =
9519                     readl(phba->sli4_hba.u.if_type2.
9520                           ERR1regaddr);
9521                 phba->work_status[1] =
9522                     readl(phba->sli4_hba.u.if_type2.
9523                           ERR2regaddr);
9524                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9525                     "2888 Unrecoverable port error "
9526                     "following POST: port status reg "
9527                     "0x%x, port_smphr reg 0x%x, "
9528                     "error 1=0x%x, error 2=0x%x\n",
9529                     reg_data.word0,
9530                     portsmphr_reg.word0,
9531                     phba->work_status[0],
9532                     phba->work_status[1]);
9533                 port_error = -ENODEV;
9534                 break;
9535             }
9536 
9537             if (lpfc_pldv_detect &&
9538                 bf_get(lpfc_sli_intf_sli_family,
9539                    &phba->sli4_hba.sli_intf) ==
9540                     LPFC_SLI_INTF_FAMILY_G6)
9541                 pci_write_config_byte(phba->pcidev,
9542                               LPFC_SLI_INTF, CFG_PLD);
9543             break;
9544         case LPFC_SLI_INTF_IF_TYPE_1:
9545         default:
9546             break;
9547         }
9548     }
9549     return port_error;
9550 }
9551 
9552 /**
9553  * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
9554  * @phba: pointer to lpfc hba data structure.
9555  * @if_type:  The SLI4 interface type getting configured.
9556  *
9557  * This routine is invoked to set up SLI4 BAR0 PCI config space register
9558  * memory map.
9559  **/
9560 static void
9561 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9562 {
9563     switch (if_type) {
9564     case LPFC_SLI_INTF_IF_TYPE_0:
9565         phba->sli4_hba.u.if_type0.UERRLOregaddr =
9566             phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9567         phba->sli4_hba.u.if_type0.UERRHIregaddr =
9568             phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9569         phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9570             phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9571         phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9572             phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9573         phba->sli4_hba.SLIINTFregaddr =
9574             phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9575         break;
9576     case LPFC_SLI_INTF_IF_TYPE_2:
9577         phba->sli4_hba.u.if_type2.EQDregaddr =
9578             phba->sli4_hba.conf_regs_memmap_p +
9579                         LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9580         phba->sli4_hba.u.if_type2.ERR1regaddr =
9581             phba->sli4_hba.conf_regs_memmap_p +
9582                         LPFC_CTL_PORT_ER1_OFFSET;
9583         phba->sli4_hba.u.if_type2.ERR2regaddr =
9584             phba->sli4_hba.conf_regs_memmap_p +
9585                         LPFC_CTL_PORT_ER2_OFFSET;
9586         phba->sli4_hba.u.if_type2.CTRLregaddr =
9587             phba->sli4_hba.conf_regs_memmap_p +
9588                         LPFC_CTL_PORT_CTL_OFFSET;
9589         phba->sli4_hba.u.if_type2.STATUSregaddr =
9590             phba->sli4_hba.conf_regs_memmap_p +
9591                         LPFC_CTL_PORT_STA_OFFSET;
9592         phba->sli4_hba.SLIINTFregaddr =
9593             phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9594         phba->sli4_hba.PSMPHRregaddr =
9595             phba->sli4_hba.conf_regs_memmap_p +
9596                         LPFC_CTL_PORT_SEM_OFFSET;
9597         phba->sli4_hba.RQDBregaddr =
9598             phba->sli4_hba.conf_regs_memmap_p +
9599                         LPFC_ULP0_RQ_DOORBELL;
9600         phba->sli4_hba.WQDBregaddr =
9601             phba->sli4_hba.conf_regs_memmap_p +
9602                         LPFC_ULP0_WQ_DOORBELL;
9603         phba->sli4_hba.CQDBregaddr =
9604             phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9605         phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9606         phba->sli4_hba.MQDBregaddr =
9607             phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9608         phba->sli4_hba.BMBXregaddr =
9609             phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9610         break;
9611     case LPFC_SLI_INTF_IF_TYPE_6:
9612         phba->sli4_hba.u.if_type2.EQDregaddr =
9613             phba->sli4_hba.conf_regs_memmap_p +
9614                         LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9615         phba->sli4_hba.u.if_type2.ERR1regaddr =
9616             phba->sli4_hba.conf_regs_memmap_p +
9617                         LPFC_CTL_PORT_ER1_OFFSET;
9618         phba->sli4_hba.u.if_type2.ERR2regaddr =
9619             phba->sli4_hba.conf_regs_memmap_p +
9620                         LPFC_CTL_PORT_ER2_OFFSET;
9621         phba->sli4_hba.u.if_type2.CTRLregaddr =
9622             phba->sli4_hba.conf_regs_memmap_p +
9623                         LPFC_CTL_PORT_CTL_OFFSET;
9624         phba->sli4_hba.u.if_type2.STATUSregaddr =
9625             phba->sli4_hba.conf_regs_memmap_p +
9626                         LPFC_CTL_PORT_STA_OFFSET;
9627         phba->sli4_hba.PSMPHRregaddr =
9628             phba->sli4_hba.conf_regs_memmap_p +
9629                         LPFC_CTL_PORT_SEM_OFFSET;
9630         phba->sli4_hba.BMBXregaddr =
9631             phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9632         break;
9633     case LPFC_SLI_INTF_IF_TYPE_1:
9634     default:
9635         dev_printk(KERN_ERR, &phba->pcidev->dev,
9636                "FATAL - unsupported SLI4 interface type - %d\n",
9637                if_type);
9638         break;
9639     }
9640 }
9641 
9642 /**
9643  * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
9644  * @phba: pointer to lpfc hba data structure.
9645  * @if_type: sli if type to operate on.
9646  *
9647  * This routine is invoked to set up SLI4 BAR1 register memory map.
9648  **/
9649 static void
9650 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9651 {
9652     switch (if_type) {
9653     case LPFC_SLI_INTF_IF_TYPE_0:
9654         phba->sli4_hba.PSMPHRregaddr =
9655             phba->sli4_hba.ctrl_regs_memmap_p +
9656             LPFC_SLIPORT_IF0_SMPHR;
9657         phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9658             LPFC_HST_ISR0;
9659         phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9660             LPFC_HST_IMR0;
9661         phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9662             LPFC_HST_ISCR0;
9663         break;
9664     case LPFC_SLI_INTF_IF_TYPE_6:
9665         phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9666             LPFC_IF6_RQ_DOORBELL;
9667         phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9668             LPFC_IF6_WQ_DOORBELL;
9669         phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9670             LPFC_IF6_CQ_DOORBELL;
9671         phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9672             LPFC_IF6_EQ_DOORBELL;
9673         phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9674             LPFC_IF6_MQ_DOORBELL;
9675         break;
9676     case LPFC_SLI_INTF_IF_TYPE_2:
9677     case LPFC_SLI_INTF_IF_TYPE_1:
9678     default:
9679         dev_err(&phba->pcidev->dev,
9680                "FATAL - unsupported SLI4 interface type - %d\n",
9681                if_type);
9682         break;
9683     }
9684 }
9685 
9686 /**
9687  * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
9688  * @phba: pointer to lpfc hba data structure.
9689  * @vf: virtual function number
9690  *
9691  * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
9692  * based on the given viftual function number, @vf.
9693  *
9694  * Return 0 if successful, otherwise -ENODEV.
9695  **/
9696 static int
9697 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9698 {
9699     if (vf > LPFC_VIR_FUNC_MAX)
9700         return -ENODEV;
9701 
9702     phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9703                 vf * LPFC_VFR_PAGE_SIZE +
9704                     LPFC_ULP0_RQ_DOORBELL);
9705     phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9706                 vf * LPFC_VFR_PAGE_SIZE +
9707                     LPFC_ULP0_WQ_DOORBELL);
9708     phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9709                 vf * LPFC_VFR_PAGE_SIZE +
9710                     LPFC_EQCQ_DOORBELL);
9711     phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9712     phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9713                 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9714     phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9715                 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9716     return 0;
9717 }
9718 
9719 /**
9720  * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
9721  * @phba: pointer to lpfc hba data structure.
9722  *
9723  * This routine is invoked to create the bootstrap mailbox
9724  * region consistent with the SLI-4 interface spec.  This
9725  * routine allocates all memory necessary to communicate
9726  * mailbox commands to the port and sets up all alignment
9727  * needs.  No locks are expected to be held when calling
9728  * this routine.
9729  *
9730  * Return codes
9731  *  0 - successful
9732  *  -ENOMEM - could not allocated memory.
9733  **/
9734 static int
9735 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9736 {
9737     uint32_t bmbx_size;
9738     struct lpfc_dmabuf *dmabuf;
9739     struct dma_address *dma_address;
9740     uint32_t pa_addr;
9741     uint64_t phys_addr;
9742 
9743     dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9744     if (!dmabuf)
9745         return -ENOMEM;
9746 
9747     /*
9748      * The bootstrap mailbox region is comprised of 2 parts
9749      * plus an alignment restriction of 16 bytes.
9750      */
9751     bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9752     dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9753                       &dmabuf->phys, GFP_KERNEL);
9754     if (!dmabuf->virt) {
9755         kfree(dmabuf);
9756         return -ENOMEM;
9757     }
9758 
9759     /*
9760      * Initialize the bootstrap mailbox pointers now so that the register
9761      * operations are simple later.  The mailbox dma address is required
9762      * to be 16-byte aligned.  Also align the virtual memory as each
9763      * maibox is copied into the bmbx mailbox region before issuing the
9764      * command to the port.
9765      */
9766     phba->sli4_hba.bmbx.dmabuf = dmabuf;
9767     phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9768 
9769     phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9770                           LPFC_ALIGN_16_BYTE);
9771     phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9772                           LPFC_ALIGN_16_BYTE);
9773 
9774     /*
9775      * Set the high and low physical addresses now.  The SLI4 alignment
9776      * requirement is 16 bytes and the mailbox is posted to the port
9777      * as two 30-bit addresses.  The other data is a bit marking whether
9778      * the 30-bit address is the high or low address.
9779      * Upcast bmbx aphys to 64bits so shift instruction compiles
9780      * clean on 32 bit machines.
9781      */
9782     dma_address = &phba->sli4_hba.bmbx.dma_address;
9783     phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9784     pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9785     dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9786                        LPFC_BMBX_BIT1_ADDR_HI);
9787 
9788     pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9789     dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9790                        LPFC_BMBX_BIT1_ADDR_LO);
9791     return 0;
9792 }
9793 
9794 /**
9795  * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
9796  * @phba: pointer to lpfc hba data structure.
9797  *
9798  * This routine is invoked to teardown the bootstrap mailbox
9799  * region and release all host resources. This routine requires
9800  * the caller to ensure all mailbox commands recovered, no
9801  * additional mailbox comands are sent, and interrupts are disabled
9802  * before calling this routine.
9803  *
9804  **/
9805 static void
9806 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9807 {
9808     dma_free_coherent(&phba->pcidev->dev,
9809               phba->sli4_hba.bmbx.bmbx_size,
9810               phba->sli4_hba.bmbx.dmabuf->virt,
9811               phba->sli4_hba.bmbx.dmabuf->phys);
9812 
9813     kfree(phba->sli4_hba.bmbx.dmabuf);
9814     memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9815 }
9816 
9817 static const char * const lpfc_topo_to_str[] = {
9818     "Loop then P2P",
9819     "Loopback",
9820     "P2P Only",
9821     "Unsupported",
9822     "Loop Only",
9823     "Unsupported",
9824     "P2P then Loop",
9825 };
9826 
9827 #define LINK_FLAGS_DEF  0x0
9828 #define LINK_FLAGS_P2P  0x1
9829 #define LINK_FLAGS_LOOP 0x2
9830 /**
9831  * lpfc_map_topology - Map the topology read from READ_CONFIG
9832  * @phba: pointer to lpfc hba data structure.
9833  * @rd_config: pointer to read config data
9834  *
9835  * This routine is invoked to map the topology values as read
9836  * from the read config mailbox command. If the persistent
9837  * topology feature is supported, the firmware will provide the
9838  * saved topology information to be used in INIT_LINK
9839  **/
9840 static void
9841 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9842 {
9843     u8 ptv, tf, pt;
9844 
9845     ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9846     tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9847     pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9848 
9849     lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9850             "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9851              ptv, tf, pt);
9852     if (!ptv) {
9853         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9854                 "2019 FW does not support persistent topology "
9855                 "Using driver parameter defined value [%s]",
9856                 lpfc_topo_to_str[phba->cfg_topology]);
9857         return;
9858     }
9859     /* FW supports persistent topology - override module parameter value */
9860     phba->hba_flag |= HBA_PERSISTENT_TOPO;
9861 
9862     /* if ASIC_GEN_NUM >= 0xC) */
9863     if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9864             LPFC_SLI_INTF_IF_TYPE_6) ||
9865         (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9866             LPFC_SLI_INTF_FAMILY_G6)) {
9867         if (!tf) {
9868             phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9869                     ? FLAGS_TOPOLOGY_MODE_LOOP
9870                     : FLAGS_TOPOLOGY_MODE_PT_PT);
9871         } else {
9872             phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
9873         }
9874     } else { /* G5 */
9875         if (tf) {
9876             /* If topology failover set - pt is '0' or '1' */
9877             phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9878                           FLAGS_TOPOLOGY_MODE_LOOP_PT);
9879         } else {
9880             phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9881                     ? FLAGS_TOPOLOGY_MODE_PT_PT
9882                     : FLAGS_TOPOLOGY_MODE_LOOP);
9883         }
9884     }
9885     if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
9886         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9887                 "2020 Using persistent topology value [%s]",
9888                 lpfc_topo_to_str[phba->cfg_topology]);
9889     } else {
9890         lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9891                 "2021 Invalid topology values from FW "
9892                 "Using driver parameter defined value [%s]",
9893                 lpfc_topo_to_str[phba->cfg_topology]);
9894     }
9895 }
9896 
9897 /**
9898  * lpfc_sli4_read_config - Get the config parameters.
9899  * @phba: pointer to lpfc hba data structure.
9900  *
9901  * This routine is invoked to read the configuration parameters from the HBA.
9902  * The configuration parameters are used to set the base and maximum values
9903  * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
9904  * allocation for the port.
9905  *
9906  * Return codes
9907  *  0 - successful
9908  *  -ENOMEM - No available memory
9909  *      -EIO - The mailbox failed to complete successfully.
9910  **/
9911 int
9912 lpfc_sli4_read_config(struct lpfc_hba *phba)
9913 {
9914     LPFC_MBOXQ_t *pmb;
9915     struct lpfc_mbx_read_config *rd_config;
9916     union  lpfc_sli4_cfg_shdr *shdr;
9917     uint32_t shdr_status, shdr_add_status;
9918     struct lpfc_mbx_get_func_cfg *get_func_cfg;
9919     struct lpfc_rsrc_desc_fcfcoe *desc;
9920     char *pdesc_0;
9921     uint16_t forced_link_speed;
9922     uint32_t if_type, qmin, fawwpn;
9923     int length, i, rc = 0, rc2;
9924 
9925     pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9926     if (!pmb) {
9927         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9928                 "2011 Unable to allocate memory for issuing "
9929                 "SLI_CONFIG_SPECIAL mailbox command\n");
9930         return -ENOMEM;
9931     }
9932 
9933     lpfc_read_config(phba, pmb);
9934 
9935     rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9936     if (rc != MBX_SUCCESS) {
9937         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9938                 "2012 Mailbox failed , mbxCmd x%x "
9939                 "READ_CONFIG, mbxStatus x%x\n",
9940                 bf_get(lpfc_mqe_command, &pmb->u.mqe),
9941                 bf_get(lpfc_mqe_status, &pmb->u.mqe));
9942         rc = -EIO;
9943     } else {
9944         rd_config = &pmb->u.mqe.un.rd_config;
9945         if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
9946             phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9947             phba->sli4_hba.lnk_info.lnk_tp =
9948                 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
9949             phba->sli4_hba.lnk_info.lnk_no =
9950                 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
9951             lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9952                     "3081 lnk_type:%d, lnk_numb:%d\n",
9953                     phba->sli4_hba.lnk_info.lnk_tp,
9954                     phba->sli4_hba.lnk_info.lnk_no);
9955         } else
9956             lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9957                     "3082 Mailbox (x%x) returned ldv:x0\n",
9958                     bf_get(lpfc_mqe_command, &pmb->u.mqe));
9959         if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
9960             phba->bbcredit_support = 1;
9961             phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9962         }
9963 
9964         fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config);
9965 
9966         if (fawwpn) {
9967             lpfc_printf_log(phba, KERN_INFO,
9968                     LOG_INIT | LOG_DISCOVERY,
9969                     "2702 READ_CONFIG: FA-PWWN is "
9970                     "configured on\n");
9971             phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
9972         } else {
9973             /* Clear FW configured flag, preserve driver flag */
9974             phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG;
9975         }
9976 
9977         phba->sli4_hba.conf_trunk =
9978             bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
9979         phba->sli4_hba.extents_in_use =
9980             bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
9981 
9982         phba->sli4_hba.max_cfg_param.max_xri =
9983             bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
9984         /* Reduce resource usage in kdump environment */
9985         if (is_kdump_kernel() &&
9986             phba->sli4_hba.max_cfg_param.max_xri > 512)
9987             phba->sli4_hba.max_cfg_param.max_xri = 512;
9988         phba->sli4_hba.max_cfg_param.xri_base =
9989             bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
9990         phba->sli4_hba.max_cfg_param.max_vpi =
9991             bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
9992         /* Limit the max we support */
9993         if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9994             phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
9995         phba->sli4_hba.max_cfg_param.vpi_base =
9996             bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
9997         phba->sli4_hba.max_cfg_param.max_rpi =
9998             bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
9999         phba->sli4_hba.max_cfg_param.rpi_base =
10000             bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
10001         phba->sli4_hba.max_cfg_param.max_vfi =
10002             bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
10003         phba->sli4_hba.max_cfg_param.vfi_base =
10004             bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
10005         phba->sli4_hba.max_cfg_param.max_fcfi =
10006             bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
10007         phba->sli4_hba.max_cfg_param.max_eq =
10008             bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
10009         phba->sli4_hba.max_cfg_param.max_rq =
10010             bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
10011         phba->sli4_hba.max_cfg_param.max_wq =
10012             bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
10013         phba->sli4_hba.max_cfg_param.max_cq =
10014             bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
10015         phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
10016         phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
10017         phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
10018         phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
10019         phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
10020                 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
10021         phba->max_vports = phba->max_vpi;
10022 
10023         /* Next decide on FPIN or Signal E2E CGN support
10024          * For congestion alarms and warnings valid combination are:
10025          * 1. FPIN alarms / FPIN warnings
10026          * 2. Signal alarms / Signal warnings
10027          * 3. FPIN alarms / Signal warnings
10028          * 4. Signal alarms / FPIN warnings
10029          *
10030          * Initialize the adapter frequency to 100 mSecs
10031          */
10032         phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10033         phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
10034         phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
10035 
10036         if (lpfc_use_cgn_signal) {
10037             if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
10038                 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
10039                 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
10040             }
10041             if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
10042                 /* MUST support both alarm and warning
10043                  * because EDC does not support alarm alone.
10044                  */
10045                 if (phba->cgn_reg_signal !=
10046                     EDC_CG_SIG_WARN_ONLY) {
10047                     /* Must support both or none */
10048                     phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10049                     phba->cgn_reg_signal =
10050                         EDC_CG_SIG_NOTSUPPORTED;
10051                 } else {
10052                     phba->cgn_reg_signal =
10053                         EDC_CG_SIG_WARN_ALARM;
10054                     phba->cgn_reg_fpin =
10055                         LPFC_CGN_FPIN_NONE;
10056                 }
10057             }
10058         }
10059 
10060         /* Set the congestion initial signal and fpin values. */
10061         phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
10062         phba->cgn_init_reg_signal = phba->cgn_reg_signal;
10063 
10064         lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
10065                 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
10066                 phba->cgn_reg_signal, phba->cgn_reg_fpin);
10067 
10068         lpfc_map_topology(phba, rd_config);
10069         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10070                 "2003 cfg params Extents? %d "
10071                 "XRI(B:%d M:%d), "
10072                 "VPI(B:%d M:%d) "
10073                 "VFI(B:%d M:%d) "
10074                 "RPI(B:%d M:%d) "
10075                 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
10076                 phba->sli4_hba.extents_in_use,
10077                 phba->sli4_hba.max_cfg_param.xri_base,
10078                 phba->sli4_hba.max_cfg_param.max_xri,
10079                 phba->sli4_hba.max_cfg_param.vpi_base,
10080                 phba->sli4_hba.max_cfg_param.max_vpi,
10081                 phba->sli4_hba.max_cfg_param.vfi_base,
10082                 phba->sli4_hba.max_cfg_param.max_vfi,
10083                 phba->sli4_hba.max_cfg_param.rpi_base,
10084                 phba->sli4_hba.max_cfg_param.max_rpi,
10085                 phba->sli4_hba.max_cfg_param.max_fcfi,
10086                 phba->sli4_hba.max_cfg_param.max_eq,
10087                 phba->sli4_hba.max_cfg_param.max_cq,
10088                 phba->sli4_hba.max_cfg_param.max_wq,
10089                 phba->sli4_hba.max_cfg_param.max_rq,
10090                 phba->lmt);
10091 
10092         /*
10093          * Calculate queue resources based on how
10094          * many WQ/CQ/EQs are available.
10095          */
10096         qmin = phba->sli4_hba.max_cfg_param.max_wq;
10097         if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
10098             qmin = phba->sli4_hba.max_cfg_param.max_cq;
10099         if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
10100             qmin = phba->sli4_hba.max_cfg_param.max_eq;
10101         /*
10102          * Whats left after this can go toward NVME / FCP.
10103          * The minus 4 accounts for ELS, NVME LS, MBOX
10104          * plus one extra. When configured for
10105          * NVMET, FCP io channel WQs are not created.
10106          */
10107         qmin -= 4;
10108 
10109         /* Check to see if there is enough for NVME */
10110         if ((phba->cfg_irq_chann > qmin) ||
10111             (phba->cfg_hdw_queue > qmin)) {
10112             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10113                     "2005 Reducing Queues - "
10114                     "FW resource limitation: "
10115                     "WQ %d CQ %d EQ %d: min %d: "
10116                     "IRQ %d HDWQ %d\n",
10117                     phba->sli4_hba.max_cfg_param.max_wq,
10118                     phba->sli4_hba.max_cfg_param.max_cq,
10119                     phba->sli4_hba.max_cfg_param.max_eq,
10120                     qmin, phba->cfg_irq_chann,
10121                     phba->cfg_hdw_queue);
10122 
10123             if (phba->cfg_irq_chann > qmin)
10124                 phba->cfg_irq_chann = qmin;
10125             if (phba->cfg_hdw_queue > qmin)
10126                 phba->cfg_hdw_queue = qmin;
10127         }
10128     }
10129 
10130     if (rc)
10131         goto read_cfg_out;
10132 
10133     /* Update link speed if forced link speed is supported */
10134     if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10135     if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10136         forced_link_speed =
10137             bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
10138         if (forced_link_speed) {
10139             phba->hba_flag |= HBA_FORCED_LINK_SPEED;
10140 
10141             switch (forced_link_speed) {
10142             case LINK_SPEED_1G:
10143                 phba->cfg_link_speed =
10144                     LPFC_USER_LINK_SPEED_1G;
10145                 break;
10146             case LINK_SPEED_2G:
10147                 phba->cfg_link_speed =
10148                     LPFC_USER_LINK_SPEED_2G;
10149                 break;
10150             case LINK_SPEED_4G:
10151                 phba->cfg_link_speed =
10152                     LPFC_USER_LINK_SPEED_4G;
10153                 break;
10154             case LINK_SPEED_8G:
10155                 phba->cfg_link_speed =
10156                     LPFC_USER_LINK_SPEED_8G;
10157                 break;
10158             case LINK_SPEED_10G:
10159                 phba->cfg_link_speed =
10160                     LPFC_USER_LINK_SPEED_10G;
10161                 break;
10162             case LINK_SPEED_16G:
10163                 phba->cfg_link_speed =
10164                     LPFC_USER_LINK_SPEED_16G;
10165                 break;
10166             case LINK_SPEED_32G:
10167                 phba->cfg_link_speed =
10168                     LPFC_USER_LINK_SPEED_32G;
10169                 break;
10170             case LINK_SPEED_64G:
10171                 phba->cfg_link_speed =
10172                     LPFC_USER_LINK_SPEED_64G;
10173                 break;
10174             case 0xffff:
10175                 phba->cfg_link_speed =
10176                     LPFC_USER_LINK_SPEED_AUTO;
10177                 break;
10178             default:
10179                 lpfc_printf_log(phba, KERN_ERR,
10180                         LOG_TRACE_EVENT,
10181                         "0047 Unrecognized link "
10182                         "speed : %d\n",
10183                         forced_link_speed);
10184                 phba->cfg_link_speed =
10185                     LPFC_USER_LINK_SPEED_AUTO;
10186             }
10187         }
10188     }
10189 
10190     /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
10191     length = phba->sli4_hba.max_cfg_param.max_xri -
10192             lpfc_sli4_get_els_iocb_cnt(phba);
10193     if (phba->cfg_hba_queue_depth > length) {
10194         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10195                 "3361 HBA queue depth changed from %d to %d\n",
10196                 phba->cfg_hba_queue_depth, length);
10197         phba->cfg_hba_queue_depth = length;
10198     }
10199 
10200     if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
10201         LPFC_SLI_INTF_IF_TYPE_2)
10202         goto read_cfg_out;
10203 
10204     /* get the pf# and vf# for SLI4 if_type 2 port */
10205     length = (sizeof(struct lpfc_mbx_get_func_cfg) -
10206           sizeof(struct lpfc_sli4_cfg_mhdr));
10207     lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
10208              LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
10209              length, LPFC_SLI4_MBX_EMBED);
10210 
10211     rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10212     shdr = (union lpfc_sli4_cfg_shdr *)
10213                 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
10214     shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10215     shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10216     if (rc2 || shdr_status || shdr_add_status) {
10217         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10218                 "3026 Mailbox failed , mbxCmd x%x "
10219                 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10220                 bf_get(lpfc_mqe_command, &pmb->u.mqe),
10221                 bf_get(lpfc_mqe_status, &pmb->u.mqe));
10222         goto read_cfg_out;
10223     }
10224 
10225     /* search for fc_fcoe resrouce descriptor */
10226     get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10227 
10228     pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10229     desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10230     length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10231     if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10232         length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10233     else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10234         goto read_cfg_out;
10235 
10236     for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10237         desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10238         if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10239             bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10240             phba->sli4_hba.iov.pf_number =
10241                 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10242             phba->sli4_hba.iov.vf_number =
10243                 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10244             break;
10245         }
10246     }
10247 
10248     if (i < LPFC_RSRC_DESC_MAX_NUM)
10249         lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10250                 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10251                 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10252                 phba->sli4_hba.iov.vf_number);
10253     else
10254         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10255                 "3028 GET_FUNCTION_CONFIG: failed to find "
10256                 "Resource Descriptor:x%x\n",
10257                 LPFC_RSRC_DESC_TYPE_FCFCOE);
10258 
10259 read_cfg_out:
10260     mempool_free(pmb, phba->mbox_mem_pool);
10261     return rc;
10262 }
10263 
10264 /**
10265  * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
10266  * @phba: pointer to lpfc hba data structure.
10267  *
10268  * This routine is invoked to setup the port-side endian order when
10269  * the port if_type is 0.  This routine has no function for other
10270  * if_types.
10271  *
10272  * Return codes
10273  *  0 - successful
10274  *  -ENOMEM - No available memory
10275  *      -EIO - The mailbox failed to complete successfully.
10276  **/
10277 static int
10278 lpfc_setup_endian_order(struct lpfc_hba *phba)
10279 {
10280     LPFC_MBOXQ_t *mboxq;
10281     uint32_t if_type, rc = 0;
10282     uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10283                       HOST_ENDIAN_HIGH_WORD1};
10284 
10285     if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10286     switch (if_type) {
10287     case LPFC_SLI_INTF_IF_TYPE_0:
10288         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10289                                GFP_KERNEL);
10290         if (!mboxq) {
10291             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10292                     "0492 Unable to allocate memory for "
10293                     "issuing SLI_CONFIG_SPECIAL mailbox "
10294                     "command\n");
10295             return -ENOMEM;
10296         }
10297 
10298         /*
10299          * The SLI4_CONFIG_SPECIAL mailbox command requires the first
10300          * two words to contain special data values and no other data.
10301          */
10302         memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10303         memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10304         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10305         if (rc != MBX_SUCCESS) {
10306             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10307                     "0493 SLI_CONFIG_SPECIAL mailbox "
10308                     "failed with status x%x\n",
10309                     rc);
10310             rc = -EIO;
10311         }
10312         mempool_free(mboxq, phba->mbox_mem_pool);
10313         break;
10314     case LPFC_SLI_INTF_IF_TYPE_6:
10315     case LPFC_SLI_INTF_IF_TYPE_2:
10316     case LPFC_SLI_INTF_IF_TYPE_1:
10317     default:
10318         break;
10319     }
10320     return rc;
10321 }
10322 
10323 /**
10324  * lpfc_sli4_queue_verify - Verify and update EQ counts
10325  * @phba: pointer to lpfc hba data structure.
10326  *
10327  * This routine is invoked to check the user settable queue counts for EQs.
10328  * After this routine is called the counts will be set to valid values that
10329  * adhere to the constraints of the system's interrupt vectors and the port's
10330  * queue resources.
10331  *
10332  * Return codes
10333  *      0 - successful
10334  *      -ENOMEM - No available memory
10335  **/
10336 static int
10337 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10338 {
10339     /*
10340      * Sanity check for configured queue parameters against the run-time
10341      * device parameters
10342      */
10343 
10344     if (phba->nvmet_support) {
10345         if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10346             phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10347         if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10348             phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10349     }
10350 
10351     lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10352             "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10353             phba->cfg_hdw_queue, phba->cfg_irq_chann,
10354             phba->cfg_nvmet_mrq);
10355 
10356     /* Get EQ depth from module parameter, fake the default for now */
10357     phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10358     phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10359 
10360     /* Get CQ depth from module parameter, fake the default for now */
10361     phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10362     phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10363     return 0;
10364 }
10365 
10366 static int
10367 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10368 {
10369     struct lpfc_queue *qdesc;
10370     u32 wqesize;
10371     int cpu;
10372 
10373     cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10374     /* Create Fast Path IO CQs */
10375     if (phba->enab_exp_wqcq_pages)
10376         /* Increase the CQ size when WQEs contain an embedded cdb */
10377         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10378                           phba->sli4_hba.cq_esize,
10379                           LPFC_CQE_EXP_COUNT, cpu);
10380 
10381     else
10382         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10383                           phba->sli4_hba.cq_esize,
10384                           phba->sli4_hba.cq_ecount, cpu);
10385     if (!qdesc) {
10386         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10387                 "0499 Failed allocate fast-path IO CQ (%d)\n",
10388                 idx);
10389         return 1;
10390     }
10391     qdesc->qe_valid = 1;
10392     qdesc->hdwq = idx;
10393     qdesc->chann = cpu;
10394     phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10395 
10396     /* Create Fast Path IO WQs */
10397     if (phba->enab_exp_wqcq_pages) {
10398         /* Increase the WQ size when WQEs contain an embedded cdb */
10399         wqesize = (phba->fcp_embed_io) ?
10400             LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10401         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10402                           wqesize,
10403                           LPFC_WQE_EXP_COUNT, cpu);
10404     } else
10405         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10406                           phba->sli4_hba.wq_esize,
10407                           phba->sli4_hba.wq_ecount, cpu);
10408 
10409     if (!qdesc) {
10410         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10411                 "0503 Failed allocate fast-path IO WQ (%d)\n",
10412                 idx);
10413         return 1;
10414     }
10415     qdesc->hdwq = idx;
10416     qdesc->chann = cpu;
10417     phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10418     list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10419     return 0;
10420 }
10421 
10422 /**
10423  * lpfc_sli4_queue_create - Create all the SLI4 queues
10424  * @phba: pointer to lpfc hba data structure.
10425  *
10426  * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
10427  * operation. For each SLI4 queue type, the parameters such as queue entry
10428  * count (queue depth) shall be taken from the module parameter. For now,
10429  * we just use some constant number as place holder.
10430  *
10431  * Return codes
10432  *      0 - successful
10433  *      -ENOMEM - No availble memory
10434  *      -EIO - The mailbox failed to complete successfully.
10435  **/
10436 int
10437 lpfc_sli4_queue_create(struct lpfc_hba *phba)
10438 {
10439     struct lpfc_queue *qdesc;
10440     int idx, cpu, eqcpu;
10441     struct lpfc_sli4_hdw_queue *qp;
10442     struct lpfc_vector_map_info *cpup;
10443     struct lpfc_vector_map_info *eqcpup;
10444     struct lpfc_eq_intr_info *eqi;
10445 
10446     /*
10447      * Create HBA Record arrays.
10448      * Both NVME and FCP will share that same vectors / EQs
10449      */
10450     phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10451     phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10452     phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10453     phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10454     phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10455     phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10456     phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10457     phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10458     phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10459     phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10460 
10461     if (!phba->sli4_hba.hdwq) {
10462         phba->sli4_hba.hdwq = kcalloc(
10463             phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10464             GFP_KERNEL);
10465         if (!phba->sli4_hba.hdwq) {
10466             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10467                     "6427 Failed allocate memory for "
10468                     "fast-path Hardware Queue array\n");
10469             goto out_error;
10470         }
10471         /* Prepare hardware queues to take IO buffers */
10472         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10473             qp = &phba->sli4_hba.hdwq[idx];
10474             spin_lock_init(&qp->io_buf_list_get_lock);
10475             spin_lock_init(&qp->io_buf_list_put_lock);
10476             INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10477             INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10478             qp->get_io_bufs = 0;
10479             qp->put_io_bufs = 0;
10480             qp->total_io_bufs = 0;
10481             spin_lock_init(&qp->abts_io_buf_list_lock);
10482             INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10483             qp->abts_scsi_io_bufs = 0;
10484             qp->abts_nvme_io_bufs = 0;
10485             INIT_LIST_HEAD(&qp->sgl_list);
10486             INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10487             spin_lock_init(&qp->hdwq_lock);
10488         }
10489     }
10490 
10491     if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10492         if (phba->nvmet_support) {
10493             phba->sli4_hba.nvmet_cqset = kcalloc(
10494                     phba->cfg_nvmet_mrq,
10495                     sizeof(struct lpfc_queue *),
10496                     GFP_KERNEL);
10497             if (!phba->sli4_hba.nvmet_cqset) {
10498                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10499                     "3121 Fail allocate memory for "
10500                     "fast-path CQ set array\n");
10501                 goto out_error;
10502             }
10503             phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10504                     phba->cfg_nvmet_mrq,
10505                     sizeof(struct lpfc_queue *),
10506                     GFP_KERNEL);
10507             if (!phba->sli4_hba.nvmet_mrq_hdr) {
10508                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10509                     "3122 Fail allocate memory for "
10510                     "fast-path RQ set hdr array\n");
10511                 goto out_error;
10512             }
10513             phba->sli4_hba.nvmet_mrq_data = kcalloc(
10514                     phba->cfg_nvmet_mrq,
10515                     sizeof(struct lpfc_queue *),
10516                     GFP_KERNEL);
10517             if (!phba->sli4_hba.nvmet_mrq_data) {
10518                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10519                     "3124 Fail allocate memory for "
10520                     "fast-path RQ set data array\n");
10521                 goto out_error;
10522             }
10523         }
10524     }
10525 
10526     INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10527 
10528     /* Create HBA Event Queues (EQs) */
10529     for_each_present_cpu(cpu) {
10530         /* We only want to create 1 EQ per vector, even though
10531          * multiple CPUs might be using that vector. so only
10532          * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
10533          */
10534         cpup = &phba->sli4_hba.cpu_map[cpu];
10535         if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10536             continue;
10537 
10538         /* Get a ptr to the Hardware Queue associated with this CPU */
10539         qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10540 
10541         /* Allocate an EQ */
10542         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10543                           phba->sli4_hba.eq_esize,
10544                           phba->sli4_hba.eq_ecount, cpu);
10545         if (!qdesc) {
10546             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10547                     "0497 Failed allocate EQ (%d)\n",
10548                     cpup->hdwq);
10549             goto out_error;
10550         }
10551         qdesc->qe_valid = 1;
10552         qdesc->hdwq = cpup->hdwq;
10553         qdesc->chann = cpu; /* First CPU this EQ is affinitized to */
10554         qdesc->last_cpu = qdesc->chann;
10555 
10556         /* Save the allocated EQ in the Hardware Queue */
10557         qp->hba_eq = qdesc;
10558 
10559         eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10560         list_add(&qdesc->cpu_list, &eqi->list);
10561     }
10562 
10563     /* Now we need to populate the other Hardware Queues, that share
10564      * an IRQ vector, with the associated EQ ptr.
10565      */
10566     for_each_present_cpu(cpu) {
10567         cpup = &phba->sli4_hba.cpu_map[cpu];
10568 
10569         /* Check for EQ already allocated in previous loop */
10570         if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10571             continue;
10572 
10573         /* Check for multiple CPUs per hdwq */
10574         qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10575         if (qp->hba_eq)
10576             continue;
10577 
10578         /* We need to share an EQ for this hdwq */
10579         eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10580         eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10581         qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10582     }
10583 
10584     /* Allocate IO Path SLI4 CQ/WQs */
10585     for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10586         if (lpfc_alloc_io_wq_cq(phba, idx))
10587             goto out_error;
10588     }
10589 
10590     if (phba->nvmet_support) {
10591         for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10592             cpu = lpfc_find_cpu_handle(phba, idx,
10593                            LPFC_FIND_BY_HDWQ);
10594             qdesc = lpfc_sli4_queue_alloc(phba,
10595                               LPFC_DEFAULT_PAGE_SIZE,
10596                               phba->sli4_hba.cq_esize,
10597                               phba->sli4_hba.cq_ecount,
10598                               cpu);
10599             if (!qdesc) {
10600                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10601                         "3142 Failed allocate NVME "
10602                         "CQ Set (%d)\n", idx);
10603                 goto out_error;
10604             }
10605             qdesc->qe_valid = 1;
10606             qdesc->hdwq = idx;
10607             qdesc->chann = cpu;
10608             phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10609         }
10610     }
10611 
10612     /*
10613      * Create Slow Path Completion Queues (CQs)
10614      */
10615 
10616     cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10617     /* Create slow-path Mailbox Command Complete Queue */
10618     qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10619                       phba->sli4_hba.cq_esize,
10620                       phba->sli4_hba.cq_ecount, cpu);
10621     if (!qdesc) {
10622         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10623                 "0500 Failed allocate slow-path mailbox CQ\n");
10624         goto out_error;
10625     }
10626     qdesc->qe_valid = 1;
10627     phba->sli4_hba.mbx_cq = qdesc;
10628 
10629     /* Create slow-path ELS Complete Queue */
10630     qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10631                       phba->sli4_hba.cq_esize,
10632                       phba->sli4_hba.cq_ecount, cpu);
10633     if (!qdesc) {
10634         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10635                 "0501 Failed allocate slow-path ELS CQ\n");
10636         goto out_error;
10637     }
10638     qdesc->qe_valid = 1;
10639     qdesc->chann = cpu;
10640     phba->sli4_hba.els_cq = qdesc;
10641 
10642 
10643     /*
10644      * Create Slow Path Work Queues (WQs)
10645      */
10646 
10647     /* Create Mailbox Command Queue */
10648 
10649     qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10650                       phba->sli4_hba.mq_esize,
10651                       phba->sli4_hba.mq_ecount, cpu);
10652     if (!qdesc) {
10653         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10654                 "0505 Failed allocate slow-path MQ\n");
10655         goto out_error;
10656     }
10657     qdesc->chann = cpu;
10658     phba->sli4_hba.mbx_wq = qdesc;
10659 
10660     /*
10661      * Create ELS Work Queues
10662      */
10663 
10664     /* Create slow-path ELS Work Queue */
10665     qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10666                       phba->sli4_hba.wq_esize,
10667                       phba->sli4_hba.wq_ecount, cpu);
10668     if (!qdesc) {
10669         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10670                 "0504 Failed allocate slow-path ELS WQ\n");
10671         goto out_error;
10672     }
10673     qdesc->chann = cpu;
10674     phba->sli4_hba.els_wq = qdesc;
10675     list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10676 
10677     if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10678         /* Create NVME LS Complete Queue */
10679         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10680                           phba->sli4_hba.cq_esize,
10681                           phba->sli4_hba.cq_ecount, cpu);
10682         if (!qdesc) {
10683             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10684                     "6079 Failed allocate NVME LS CQ\n");
10685             goto out_error;
10686         }
10687         qdesc->chann = cpu;
10688         qdesc->qe_valid = 1;
10689         phba->sli4_hba.nvmels_cq = qdesc;
10690 
10691         /* Create NVME LS Work Queue */
10692         qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10693                           phba->sli4_hba.wq_esize,
10694                           phba->sli4_hba.wq_ecount, cpu);
10695         if (!qdesc) {
10696             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10697                     "6080 Failed allocate NVME LS WQ\n");
10698             goto out_error;
10699         }
10700         qdesc->chann = cpu;
10701         phba->sli4_hba.nvmels_wq = qdesc;
10702         list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10703     }
10704 
10705     /*
10706      * Create Receive Queue (RQ)
10707      */
10708 
10709     /* Create Receive Queue for header */
10710     qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10711                       phba->sli4_hba.rq_esize,
10712                       phba->sli4_hba.rq_ecount, cpu);
10713     if (!qdesc) {
10714         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10715                 "0506 Failed allocate receive HRQ\n");
10716         goto out_error;
10717     }
10718     phba->sli4_hba.hdr_rq = qdesc;
10719 
10720     /* Create Receive Queue for data */
10721     qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10722                       phba->sli4_hba.rq_esize,
10723                       phba->sli4_hba.rq_ecount, cpu);
10724     if (!qdesc) {
10725         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10726                 "0507 Failed allocate receive DRQ\n");
10727         goto out_error;
10728     }
10729     phba->sli4_hba.dat_rq = qdesc;
10730 
10731     if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10732         phba->nvmet_support) {
10733         for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10734             cpu = lpfc_find_cpu_handle(phba, idx,
10735                            LPFC_FIND_BY_HDWQ);
10736             /* Create NVMET Receive Queue for header */
10737             qdesc = lpfc_sli4_queue_alloc(phba,
10738                               LPFC_DEFAULT_PAGE_SIZE,
10739                               phba->sli4_hba.rq_esize,
10740                               LPFC_NVMET_RQE_DEF_COUNT,
10741                               cpu);
10742             if (!qdesc) {
10743                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10744                         "3146 Failed allocate "
10745                         "receive HRQ\n");
10746                 goto out_error;
10747             }
10748             qdesc->hdwq = idx;
10749             phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10750 
10751             /* Only needed for header of RQ pair */
10752             qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10753                            GFP_KERNEL,
10754                            cpu_to_node(cpu));
10755             if (qdesc->rqbp == NULL) {
10756                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10757                         "6131 Failed allocate "
10758                         "Header RQBP\n");
10759                 goto out_error;
10760             }
10761 
10762             /* Put list in known state in case driver load fails. */
10763             INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10764 
10765             /* Create NVMET Receive Queue for data */
10766             qdesc = lpfc_sli4_queue_alloc(phba,
10767                               LPFC_DEFAULT_PAGE_SIZE,
10768                               phba->sli4_hba.rq_esize,
10769                               LPFC_NVMET_RQE_DEF_COUNT,
10770                               cpu);
10771             if (!qdesc) {
10772                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10773                         "3156 Failed allocate "
10774                         "receive DRQ\n");
10775                 goto out_error;
10776             }
10777             qdesc->hdwq = idx;
10778             phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10779         }
10780     }
10781 
10782     /* Clear NVME stats */
10783     if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10784         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10785             memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10786                    sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10787         }
10788     }
10789 
10790     /* Clear SCSI stats */
10791     if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10792         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10793             memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10794                    sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10795         }
10796     }
10797 
10798     return 0;
10799 
10800 out_error:
10801     lpfc_sli4_queue_destroy(phba);
10802     return -ENOMEM;
10803 }
10804 
10805 static inline void
10806 __lpfc_sli4_release_queue(struct lpfc_queue **qp)
10807 {
10808     if (*qp != NULL) {
10809         lpfc_sli4_queue_free(*qp);
10810         *qp = NULL;
10811     }
10812 }
10813 
10814 static inline void
10815 lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10816 {
10817     int idx;
10818 
10819     if (*qs == NULL)
10820         return;
10821 
10822     for (idx = 0; idx < max; idx++)
10823         __lpfc_sli4_release_queue(&(*qs)[idx]);
10824 
10825     kfree(*qs);
10826     *qs = NULL;
10827 }
10828 
10829 static inline void
10830 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10831 {
10832     struct lpfc_sli4_hdw_queue *hdwq;
10833     struct lpfc_queue *eq;
10834     uint32_t idx;
10835 
10836     hdwq = phba->sli4_hba.hdwq;
10837 
10838     /* Loop thru all Hardware Queues */
10839     for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10840         /* Free the CQ/WQ corresponding to the Hardware Queue */
10841         lpfc_sli4_queue_free(hdwq[idx].io_cq);
10842         lpfc_sli4_queue_free(hdwq[idx].io_wq);
10843         hdwq[idx].hba_eq = NULL;
10844         hdwq[idx].io_cq = NULL;
10845         hdwq[idx].io_wq = NULL;
10846         if (phba->cfg_xpsgl && !phba->nvmet_support)
10847             lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10848         lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10849     }
10850     /* Loop thru all IRQ vectors */
10851     for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10852         /* Free the EQ corresponding to the IRQ vector */
10853         eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10854         lpfc_sli4_queue_free(eq);
10855         phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10856     }
10857 }
10858 
10859 /**
10860  * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
10861  * @phba: pointer to lpfc hba data structure.
10862  *
10863  * This routine is invoked to release all the SLI4 queues with the FCoE HBA
10864  * operation.
10865  *
10866  * Return codes
10867  *      0 - successful
10868  *      -ENOMEM - No available memory
10869  *      -EIO - The mailbox failed to complete successfully.
10870  **/
10871 void
10872 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10873 {
10874     /*
10875      * Set FREE_INIT before beginning to free the queues.
10876      * Wait until the users of queues to acknowledge to
10877      * release queues by clearing FREE_WAIT.
10878      */
10879     spin_lock_irq(&phba->hbalock);
10880     phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10881     while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10882         spin_unlock_irq(&phba->hbalock);
10883         msleep(20);
10884         spin_lock_irq(&phba->hbalock);
10885     }
10886     spin_unlock_irq(&phba->hbalock);
10887 
10888     lpfc_sli4_cleanup_poll_list(phba);
10889 
10890     /* Release HBA eqs */
10891     if (phba->sli4_hba.hdwq)
10892         lpfc_sli4_release_hdwq(phba);
10893 
10894     if (phba->nvmet_support) {
10895         lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10896                      phba->cfg_nvmet_mrq);
10897 
10898         lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10899                      phba->cfg_nvmet_mrq);
10900         lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10901                      phba->cfg_nvmet_mrq);
10902     }
10903 
10904     /* Release mailbox command work queue */
10905     __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10906 
10907     /* Release ELS work queue */
10908     __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10909 
10910     /* Release ELS work queue */
10911     __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10912 
10913     /* Release unsolicited receive queue */
10914     __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10915     __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10916 
10917     /* Release ELS complete queue */
10918     __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10919 
10920     /* Release NVME LS complete queue */
10921     __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10922 
10923     /* Release mailbox command complete queue */
10924     __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10925 
10926     /* Everything on this list has been freed */
10927     INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10928 
10929     /* Done with freeing the queues */
10930     spin_lock_irq(&phba->hbalock);
10931     phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10932     spin_unlock_irq(&phba->hbalock);
10933 }
10934 
10935 int
10936 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10937 {
10938     struct lpfc_rqb *rqbp;
10939     struct lpfc_dmabuf *h_buf;
10940     struct rqb_dmabuf *rqb_buffer;
10941 
10942     rqbp = rq->rqbp;
10943     while (!list_empty(&rqbp->rqb_buffer_list)) {
10944         list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10945                  struct lpfc_dmabuf, list);
10946 
10947         rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
10948         (rqbp->rqb_free_buffer)(phba, rqb_buffer);
10949         rqbp->buffer_count--;
10950     }
10951     return 1;
10952 }
10953 
10954 static int
10955 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10956     struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
10957     int qidx, uint32_t qtype)
10958 {
10959     struct lpfc_sli_ring *pring;
10960     int rc;
10961 
10962     if (!eq || !cq || !wq) {
10963         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10964             "6085 Fast-path %s (%d) not allocated\n",
10965             ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
10966         return -ENOMEM;
10967     }
10968 
10969     /* create the Cq first */
10970     rc = lpfc_cq_create(phba, cq, eq,
10971             (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
10972     if (rc) {
10973         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10974                 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
10975                 qidx, (uint32_t)rc);
10976         return rc;
10977     }
10978 
10979     if (qtype != LPFC_MBOX) {
10980         /* Setup cq_map for fast lookup */
10981         if (cq_map)
10982             *cq_map = cq->queue_id;
10983 
10984         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10985             "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10986             qidx, cq->queue_id, qidx, eq->queue_id);
10987 
10988         /* create the wq */
10989         rc = lpfc_wq_create(phba, wq, cq, qtype);
10990         if (rc) {
10991             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10992                 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
10993                 qidx, (uint32_t)rc);
10994             /* no need to tear down cq - caller will do so */
10995             return rc;
10996         }
10997 
10998         /* Bind this CQ/WQ to the NVME ring */
10999         pring = wq->pring;
11000         pring->sli.sli4.wqp = (void *)wq;
11001         cq->pring = pring;
11002 
11003         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11004             "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
11005             qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
11006     } else {
11007         rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
11008         if (rc) {
11009             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11010                     "0539 Failed setup of slow-path MQ: "
11011                     "rc = 0x%x\n", rc);
11012             /* no need to tear down cq - caller will do so */
11013             return rc;
11014         }
11015 
11016         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11017             "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
11018             phba->sli4_hba.mbx_wq->queue_id,
11019             phba->sli4_hba.mbx_cq->queue_id);
11020     }
11021 
11022     return 0;
11023 }
11024 
11025 /**
11026  * lpfc_setup_cq_lookup - Setup the CQ lookup table
11027  * @phba: pointer to lpfc hba data structure.
11028  *
11029  * This routine will populate the cq_lookup table by all
11030  * available CQ queue_id's.
11031  **/
11032 static void
11033 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
11034 {
11035     struct lpfc_queue *eq, *childq;
11036     int qidx;
11037 
11038     memset(phba->sli4_hba.cq_lookup, 0,
11039            (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
11040     /* Loop thru all IRQ vectors */
11041     for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11042         /* Get the EQ corresponding to the IRQ vector */
11043         eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11044         if (!eq)
11045             continue;
11046         /* Loop through all CQs associated with that EQ */
11047         list_for_each_entry(childq, &eq->child_list, list) {
11048             if (childq->queue_id > phba->sli4_hba.cq_max)
11049                 continue;
11050             if (childq->subtype == LPFC_IO)
11051                 phba->sli4_hba.cq_lookup[childq->queue_id] =
11052                     childq;
11053         }
11054     }
11055 }
11056 
11057 /**
11058  * lpfc_sli4_queue_setup - Set up all the SLI4 queues
11059  * @phba: pointer to lpfc hba data structure.
11060  *
11061  * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
11062  * operation.
11063  *
11064  * Return codes
11065  *      0 - successful
11066  *      -ENOMEM - No available memory
11067  *      -EIO - The mailbox failed to complete successfully.
11068  **/
11069 int
11070 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
11071 {
11072     uint32_t shdr_status, shdr_add_status;
11073     union lpfc_sli4_cfg_shdr *shdr;
11074     struct lpfc_vector_map_info *cpup;
11075     struct lpfc_sli4_hdw_queue *qp;
11076     LPFC_MBOXQ_t *mboxq;
11077     int qidx, cpu;
11078     uint32_t length, usdelay;
11079     int rc = -ENOMEM;
11080 
11081     /* Check for dual-ULP support */
11082     mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11083     if (!mboxq) {
11084         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11085                 "3249 Unable to allocate memory for "
11086                 "QUERY_FW_CFG mailbox command\n");
11087         return -ENOMEM;
11088     }
11089     length = (sizeof(struct lpfc_mbx_query_fw_config) -
11090           sizeof(struct lpfc_sli4_cfg_mhdr));
11091     lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11092              LPFC_MBOX_OPCODE_QUERY_FW_CFG,
11093              length, LPFC_SLI4_MBX_EMBED);
11094 
11095     rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11096 
11097     shdr = (union lpfc_sli4_cfg_shdr *)
11098             &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11099     shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11100     shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11101     if (shdr_status || shdr_add_status || rc) {
11102         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11103                 "3250 QUERY_FW_CFG mailbox failed with status "
11104                 "x%x add_status x%x, mbx status x%x\n",
11105                 shdr_status, shdr_add_status, rc);
11106         mempool_free(mboxq, phba->mbox_mem_pool);
11107         rc = -ENXIO;
11108         goto out_error;
11109     }
11110 
11111     phba->sli4_hba.fw_func_mode =
11112             mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
11113     phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
11114     phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
11115     phba->sli4_hba.physical_port =
11116             mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
11117     lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11118             "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
11119             "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
11120             phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
11121 
11122     mempool_free(mboxq, phba->mbox_mem_pool);
11123 
11124     /*
11125      * Set up HBA Event Queues (EQs)
11126      */
11127     qp = phba->sli4_hba.hdwq;
11128 
11129     /* Set up HBA event queue */
11130     if (!qp) {
11131         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11132                 "3147 Fast-path EQs not allocated\n");
11133         rc = -ENOMEM;
11134         goto out_error;
11135     }
11136 
11137     /* Loop thru all IRQ vectors */
11138     for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11139         /* Create HBA Event Queues (EQs) in order */
11140         for_each_present_cpu(cpu) {
11141             cpup = &phba->sli4_hba.cpu_map[cpu];
11142 
11143             /* Look for the CPU thats using that vector with
11144              * LPFC_CPU_FIRST_IRQ set.
11145              */
11146             if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11147                 continue;
11148             if (qidx != cpup->eq)
11149                 continue;
11150 
11151             /* Create an EQ for that vector */
11152             rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
11153                         phba->cfg_fcp_imax);
11154             if (rc) {
11155                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11156                         "0523 Failed setup of fast-path"
11157                         " EQ (%d), rc = 0x%x\n",
11158                         cpup->eq, (uint32_t)rc);
11159                 goto out_destroy;
11160             }
11161 
11162             /* Save the EQ for that vector in the hba_eq_hdl */
11163             phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
11164                 qp[cpup->hdwq].hba_eq;
11165 
11166             lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11167                     "2584 HBA EQ setup: queue[%d]-id=%d\n",
11168                     cpup->eq,
11169                     qp[cpup->hdwq].hba_eq->queue_id);
11170         }
11171     }
11172 
11173     /* Loop thru all Hardware Queues */
11174     for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11175         cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
11176         cpup = &phba->sli4_hba.cpu_map[cpu];
11177 
11178         /* Create the CQ/WQ corresponding to the Hardware Queue */
11179         rc = lpfc_create_wq_cq(phba,
11180                        phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
11181                        qp[qidx].io_cq,
11182                        qp[qidx].io_wq,
11183                        &phba->sli4_hba.hdwq[qidx].io_cq_map,
11184                        qidx,
11185                        LPFC_IO);
11186         if (rc) {
11187             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11188                     "0535 Failed to setup fastpath "
11189                     "IO WQ/CQ (%d), rc = 0x%x\n",
11190                     qidx, (uint32_t)rc);
11191             goto out_destroy;
11192         }
11193     }
11194 
11195     /*
11196      * Set up Slow Path Complete Queues (CQs)
11197      */
11198 
11199     /* Set up slow-path MBOX CQ/MQ */
11200 
11201     if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
11202         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11203                 "0528 %s not allocated\n",
11204                 phba->sli4_hba.mbx_cq ?
11205                 "Mailbox WQ" : "Mailbox CQ");
11206         rc = -ENOMEM;
11207         goto out_destroy;
11208     }
11209 
11210     rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11211                    phba->sli4_hba.mbx_cq,
11212                    phba->sli4_hba.mbx_wq,
11213                    NULL, 0, LPFC_MBOX);
11214     if (rc) {
11215         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11216             "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
11217             (uint32_t)rc);
11218         goto out_destroy;
11219     }
11220     if (phba->nvmet_support) {
11221         if (!phba->sli4_hba.nvmet_cqset) {
11222             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11223                     "3165 Fast-path NVME CQ Set "
11224                     "array not allocated\n");
11225             rc = -ENOMEM;
11226             goto out_destroy;
11227         }
11228         if (phba->cfg_nvmet_mrq > 1) {
11229             rc = lpfc_cq_create_set(phba,
11230                     phba->sli4_hba.nvmet_cqset,
11231                     qp,
11232                     LPFC_WCQ, LPFC_NVMET);
11233             if (rc) {
11234                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11235                         "3164 Failed setup of NVME CQ "
11236                         "Set, rc = 0x%x\n",
11237                         (uint32_t)rc);
11238                 goto out_destroy;
11239             }
11240         } else {
11241             /* Set up NVMET Receive Complete Queue */
11242             rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11243                         qp[0].hba_eq,
11244                         LPFC_WCQ, LPFC_NVMET);
11245             if (rc) {
11246                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11247                         "6089 Failed setup NVMET CQ: "
11248                         "rc = 0x%x\n", (uint32_t)rc);
11249                 goto out_destroy;
11250             }
11251             phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11252 
11253             lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11254                     "6090 NVMET CQ setup: cq-id=%d, "
11255                     "parent eq-id=%d\n",
11256                     phba->sli4_hba.nvmet_cqset[0]->queue_id,
11257                     qp[0].hba_eq->queue_id);
11258         }
11259     }
11260 
11261     /* Set up slow-path ELS WQ/CQ */
11262     if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11263         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11264                 "0530 ELS %s not allocated\n",
11265                 phba->sli4_hba.els_cq ? "WQ" : "CQ");
11266         rc = -ENOMEM;
11267         goto out_destroy;
11268     }
11269     rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11270                    phba->sli4_hba.els_cq,
11271                    phba->sli4_hba.els_wq,
11272                    NULL, 0, LPFC_ELS);
11273     if (rc) {
11274         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11275                 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11276                 (uint32_t)rc);
11277         goto out_destroy;
11278     }
11279     lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11280             "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11281             phba->sli4_hba.els_wq->queue_id,
11282             phba->sli4_hba.els_cq->queue_id);
11283 
11284     if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11285         /* Set up NVME LS Complete Queue */
11286         if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11287             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11288                     "6091 LS %s not allocated\n",
11289                     phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11290             rc = -ENOMEM;
11291             goto out_destroy;
11292         }
11293         rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11294                        phba->sli4_hba.nvmels_cq,
11295                        phba->sli4_hba.nvmels_wq,
11296                        NULL, 0, LPFC_NVME_LS);
11297         if (rc) {
11298             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11299                     "0526 Failed setup of NVVME LS WQ/CQ: "
11300                     "rc = 0x%x\n", (uint32_t)rc);
11301             goto out_destroy;
11302         }
11303 
11304         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11305                 "6096 ELS WQ setup: wq-id=%d, "
11306                 "parent cq-id=%d\n",
11307                 phba->sli4_hba.nvmels_wq->queue_id,
11308                 phba->sli4_hba.nvmels_cq->queue_id);
11309     }
11310 
11311     /*
11312      * Create NVMET Receive Queue (RQ)
11313      */
11314     if (phba->nvmet_support) {
11315         if ((!phba->sli4_hba.nvmet_cqset) ||
11316             (!phba->sli4_hba.nvmet_mrq_hdr) ||
11317             (!phba->sli4_hba.nvmet_mrq_data)) {
11318             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11319                     "6130 MRQ CQ Queues not "
11320                     "allocated\n");
11321             rc = -ENOMEM;
11322             goto out_destroy;
11323         }
11324         if (phba->cfg_nvmet_mrq > 1) {
11325             rc = lpfc_mrq_create(phba,
11326                          phba->sli4_hba.nvmet_mrq_hdr,
11327                          phba->sli4_hba.nvmet_mrq_data,
11328                          phba->sli4_hba.nvmet_cqset,
11329                          LPFC_NVMET);
11330             if (rc) {
11331                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11332                         "6098 Failed setup of NVMET "
11333                         "MRQ: rc = 0x%x\n",
11334                         (uint32_t)rc);
11335                 goto out_destroy;
11336             }
11337 
11338         } else {
11339             rc = lpfc_rq_create(phba,
11340                         phba->sli4_hba.nvmet_mrq_hdr[0],
11341                         phba->sli4_hba.nvmet_mrq_data[0],
11342                         phba->sli4_hba.nvmet_cqset[0],
11343                         LPFC_NVMET);
11344             if (rc) {
11345                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11346                         "6057 Failed setup of NVMET "
11347                         "Receive Queue: rc = 0x%x\n",
11348                         (uint32_t)rc);
11349                 goto out_destroy;
11350             }
11351 
11352             lpfc_printf_log(
11353                 phba, KERN_INFO, LOG_INIT,
11354                 "6099 NVMET RQ setup: hdr-rq-id=%d, "
11355                 "dat-rq-id=%d parent cq-id=%d\n",
11356                 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11357                 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11358                 phba->sli4_hba.nvmet_cqset[0]->queue_id);
11359 
11360         }
11361     }
11362 
11363     if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11364         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11365                 "0540 Receive Queue not allocated\n");
11366         rc = -ENOMEM;
11367         goto out_destroy;
11368     }
11369 
11370     rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11371                 phba->sli4_hba.els_cq, LPFC_USOL);
11372     if (rc) {
11373         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11374                 "0541 Failed setup of Receive Queue: "
11375                 "rc = 0x%x\n", (uint32_t)rc);
11376         goto out_destroy;
11377     }
11378 
11379     lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11380             "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11381             "parent cq-id=%d\n",
11382             phba->sli4_hba.hdr_rq->queue_id,
11383             phba->sli4_hba.dat_rq->queue_id,
11384             phba->sli4_hba.els_cq->queue_id);
11385 
11386     if (phba->cfg_fcp_imax)
11387         usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11388     else
11389         usdelay = 0;
11390 
11391     for (qidx = 0; qidx < phba->cfg_irq_chann;
11392          qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11393         lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11394                      usdelay);
11395 
11396     if (phba->sli4_hba.cq_max) {
11397         kfree(phba->sli4_hba.cq_lookup);
11398         phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11399             sizeof(struct lpfc_queue *), GFP_KERNEL);
11400         if (!phba->sli4_hba.cq_lookup) {
11401             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11402                     "0549 Failed setup of CQ Lookup table: "
11403                     "size 0x%x\n", phba->sli4_hba.cq_max);
11404             rc = -ENOMEM;
11405             goto out_destroy;
11406         }
11407         lpfc_setup_cq_lookup(phba);
11408     }
11409     return 0;
11410 
11411 out_destroy:
11412     lpfc_sli4_queue_unset(phba);
11413 out_error:
11414     return rc;
11415 }
11416 
11417 /**
11418  * lpfc_sli4_queue_unset - Unset all the SLI4 queues
11419  * @phba: pointer to lpfc hba data structure.
11420  *
11421  * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
11422  * operation.
11423  *
11424  * Return codes
11425  *      0 - successful
11426  *      -ENOMEM - No available memory
11427  *      -EIO - The mailbox failed to complete successfully.
11428  **/
11429 void
11430 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11431 {
11432     struct lpfc_sli4_hdw_queue *qp;
11433     struct lpfc_queue *eq;
11434     int qidx;
11435 
11436     /* Unset mailbox command work queue */
11437     if (phba->sli4_hba.mbx_wq)
11438         lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11439 
11440     /* Unset NVME LS work queue */
11441     if (phba->sli4_hba.nvmels_wq)
11442         lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11443 
11444     /* Unset ELS work queue */
11445     if (phba->sli4_hba.els_wq)
11446         lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11447 
11448     /* Unset unsolicited receive queue */
11449     if (phba->sli4_hba.hdr_rq)
11450         lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11451                 phba->sli4_hba.dat_rq);
11452 
11453     /* Unset mailbox command complete queue */
11454     if (phba->sli4_hba.mbx_cq)
11455         lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11456 
11457     /* Unset ELS complete queue */
11458     if (phba->sli4_hba.els_cq)
11459         lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11460 
11461     /* Unset NVME LS complete queue */
11462     if (phba->sli4_hba.nvmels_cq)
11463         lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11464 
11465     if (phba->nvmet_support) {
11466         /* Unset NVMET MRQ queue */
11467         if (phba->sli4_hba.nvmet_mrq_hdr) {
11468             for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11469                 lpfc_rq_destroy(
11470                     phba,
11471                     phba->sli4_hba.nvmet_mrq_hdr[qidx],
11472                     phba->sli4_hba.nvmet_mrq_data[qidx]);
11473         }
11474 
11475         /* Unset NVMET CQ Set complete queue */
11476         if (phba->sli4_hba.nvmet_cqset) {
11477             for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11478                 lpfc_cq_destroy(
11479                     phba, phba->sli4_hba.nvmet_cqset[qidx]);
11480         }
11481     }
11482 
11483     /* Unset fast-path SLI4 queues */
11484     if (phba->sli4_hba.hdwq) {
11485         /* Loop thru all Hardware Queues */
11486         for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11487             /* Destroy the CQ/WQ corresponding to Hardware Queue */
11488             qp = &phba->sli4_hba.hdwq[qidx];
11489             lpfc_wq_destroy(phba, qp->io_wq);
11490             lpfc_cq_destroy(phba, qp->io_cq);
11491         }
11492         /* Loop thru all IRQ vectors */
11493         for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11494             /* Destroy the EQ corresponding to the IRQ vector */
11495             eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11496             lpfc_eq_destroy(phba, eq);
11497         }
11498     }
11499 
11500     kfree(phba->sli4_hba.cq_lookup);
11501     phba->sli4_hba.cq_lookup = NULL;
11502     phba->sli4_hba.cq_max = 0;
11503 }
11504 
11505 /**
11506  * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
11507  * @phba: pointer to lpfc hba data structure.
11508  *
11509  * This routine is invoked to allocate and set up a pool of completion queue
11510  * events. The body of the completion queue event is a completion queue entry
11511  * CQE. For now, this pool is used for the interrupt service routine to queue
11512  * the following HBA completion queue events for the worker thread to process:
11513  *   - Mailbox asynchronous events
11514  *   - Receive queue completion unsolicited events
11515  * Later, this can be used for all the slow-path events.
11516  *
11517  * Return codes
11518  *      0 - successful
11519  *      -ENOMEM - No available memory
11520  **/
11521 static int
11522 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11523 {
11524     struct lpfc_cq_event *cq_event;
11525     int i;
11526 
11527     for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11528         cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
11529         if (!cq_event)
11530             goto out_pool_create_fail;
11531         list_add_tail(&cq_event->list,
11532                   &phba->sli4_hba.sp_cqe_event_pool);
11533     }
11534     return 0;
11535 
11536 out_pool_create_fail:
11537     lpfc_sli4_cq_event_pool_destroy(phba);
11538     return -ENOMEM;
11539 }
11540 
11541 /**
11542  * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
11543  * @phba: pointer to lpfc hba data structure.
11544  *
11545  * This routine is invoked to free the pool of completion queue events at
11546  * driver unload time. Note that, it is the responsibility of the driver
11547  * cleanup routine to free all the outstanding completion-queue events
11548  * allocated from this pool back into the pool before invoking this routine
11549  * to destroy the pool.
11550  **/
11551 static void
11552 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11553 {
11554     struct lpfc_cq_event *cq_event, *next_cq_event;
11555 
11556     list_for_each_entry_safe(cq_event, next_cq_event,
11557                  &phba->sli4_hba.sp_cqe_event_pool, list) {
11558         list_del(&cq_event->list);
11559         kfree(cq_event);
11560     }
11561 }
11562 
11563 /**
11564  * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11565  * @phba: pointer to lpfc hba data structure.
11566  *
11567  * This routine is the lock free version of the API invoked to allocate a
11568  * completion-queue event from the free pool.
11569  *
11570  * Return: Pointer to the newly allocated completion-queue event if successful
11571  *         NULL otherwise.
11572  **/
11573 struct lpfc_cq_event *
11574 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11575 {
11576     struct lpfc_cq_event *cq_event = NULL;
11577 
11578     list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11579              struct lpfc_cq_event, list);
11580     return cq_event;
11581 }
11582 
11583 /**
11584  * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
11585  * @phba: pointer to lpfc hba data structure.
11586  *
11587  * This routine is the lock version of the API invoked to allocate a
11588  * completion-queue event from the free pool.
11589  *
11590  * Return: Pointer to the newly allocated completion-queue event if successful
11591  *         NULL otherwise.
11592  **/
11593 struct lpfc_cq_event *
11594 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11595 {
11596     struct lpfc_cq_event *cq_event;
11597     unsigned long iflags;
11598 
11599     spin_lock_irqsave(&phba->hbalock, iflags);
11600     cq_event = __lpfc_sli4_cq_event_alloc(phba);
11601     spin_unlock_irqrestore(&phba->hbalock, iflags);
11602     return cq_event;
11603 }
11604 
11605 /**
11606  * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11607  * @phba: pointer to lpfc hba data structure.
11608  * @cq_event: pointer to the completion queue event to be freed.
11609  *
11610  * This routine is the lock free version of the API invoked to release a
11611  * completion-queue event back into the free pool.
11612  **/
11613 void
11614 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11615                  struct lpfc_cq_event *cq_event)
11616 {
11617     list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11618 }
11619 
11620 /**
11621  * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
11622  * @phba: pointer to lpfc hba data structure.
11623  * @cq_event: pointer to the completion queue event to be freed.
11624  *
11625  * This routine is the lock version of the API invoked to release a
11626  * completion-queue event back into the free pool.
11627  **/
11628 void
11629 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11630                struct lpfc_cq_event *cq_event)
11631 {
11632     unsigned long iflags;
11633     spin_lock_irqsave(&phba->hbalock, iflags);
11634     __lpfc_sli4_cq_event_release(phba, cq_event);
11635     spin_unlock_irqrestore(&phba->hbalock, iflags);
11636 }
11637 
11638 /**
11639  * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
11640  * @phba: pointer to lpfc hba data structure.
11641  *
11642  * This routine is to free all the pending completion-queue events to the
11643  * back into the free pool for device reset.
11644  **/
11645 static void
11646 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11647 {
11648     LIST_HEAD(cq_event_list);
11649     struct lpfc_cq_event *cq_event;
11650     unsigned long iflags;
11651 
11652     /* Retrieve all the pending WCQEs from pending WCQE lists */
11653 
11654     /* Pending ELS XRI abort events */
11655     spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11656     list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11657              &cq_event_list);
11658     spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11659 
11660     /* Pending asynnc events */
11661     spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11662     list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11663              &cq_event_list);
11664     spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11665 
11666     while (!list_empty(&cq_event_list)) {
11667         list_remove_head(&cq_event_list, cq_event,
11668                  struct lpfc_cq_event, list);
11669         lpfc_sli4_cq_event_release(phba, cq_event);
11670     }
11671 }
11672 
11673 /**
11674  * lpfc_pci_function_reset - Reset pci function.
11675  * @phba: pointer to lpfc hba data structure.
11676  *
11677  * This routine is invoked to request a PCI function reset. It will destroys
11678  * all resources assigned to the PCI function which originates this request.
11679  *
11680  * Return codes
11681  *      0 - successful
11682  *      -ENOMEM - No available memory
11683  *      -EIO - The mailbox failed to complete successfully.
11684  **/
11685 int
11686 lpfc_pci_function_reset(struct lpfc_hba *phba)
11687 {
11688     LPFC_MBOXQ_t *mboxq;
11689     uint32_t rc = 0, if_type;
11690     uint32_t shdr_status, shdr_add_status;
11691     uint32_t rdy_chk;
11692     uint32_t port_reset = 0;
11693     union lpfc_sli4_cfg_shdr *shdr;
11694     struct lpfc_register reg_data;
11695     uint16_t devid;
11696 
11697     if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11698     switch (if_type) {
11699     case LPFC_SLI_INTF_IF_TYPE_0:
11700         mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11701                                GFP_KERNEL);
11702         if (!mboxq) {
11703             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11704                     "0494 Unable to allocate memory for "
11705                     "issuing SLI_FUNCTION_RESET mailbox "
11706                     "command\n");
11707             return -ENOMEM;
11708         }
11709 
11710         /* Setup PCI function reset mailbox-ioctl command */
11711         lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11712                  LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11713                  LPFC_SLI4_MBX_EMBED);
11714         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11715         shdr = (union lpfc_sli4_cfg_shdr *)
11716             &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11717         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11718         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11719                      &shdr->response);
11720         mempool_free(mboxq, phba->mbox_mem_pool);
11721         if (shdr_status || shdr_add_status || rc) {
11722             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11723                     "0495 SLI_FUNCTION_RESET mailbox "
11724                     "failed with status x%x add_status x%x,"
11725                     " mbx status x%x\n",
11726                     shdr_status, shdr_add_status, rc);
11727             rc = -ENXIO;
11728         }
11729         break;
11730     case LPFC_SLI_INTF_IF_TYPE_2:
11731     case LPFC_SLI_INTF_IF_TYPE_6:
11732 wait:
11733         /*
11734          * Poll the Port Status Register and wait for RDY for
11735          * up to 30 seconds. If the port doesn't respond, treat
11736          * it as an error.
11737          */
11738         for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11739             if (lpfc_readl(phba->sli4_hba.u.if_type2.
11740                 STATUSregaddr, &reg_data.word0)) {
11741                 rc = -ENODEV;
11742                 goto out;
11743             }
11744             if (bf_get(lpfc_sliport_status_rdy, &reg_data))
11745                 break;
11746             msleep(20);
11747         }
11748 
11749         if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
11750             phba->work_status[0] = readl(
11751                 phba->sli4_hba.u.if_type2.ERR1regaddr);
11752             phba->work_status[1] = readl(
11753                 phba->sli4_hba.u.if_type2.ERR2regaddr);
11754             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11755                     "2890 Port not ready, port status reg "
11756                     "0x%x error 1=0x%x, error 2=0x%x\n",
11757                     reg_data.word0,
11758                     phba->work_status[0],
11759                     phba->work_status[1]);
11760             rc = -ENODEV;
11761             goto out;
11762         }
11763 
11764         if (bf_get(lpfc_sliport_status_pldv, &reg_data))
11765             lpfc_pldv_detect = true;
11766 
11767         if (!port_reset) {
11768             /*
11769              * Reset the port now
11770              */
11771             reg_data.word0 = 0;
11772             bf_set(lpfc_sliport_ctrl_end, &reg_data,
11773                    LPFC_SLIPORT_LITTLE_ENDIAN);
11774             bf_set(lpfc_sliport_ctrl_ip, &reg_data,
11775                    LPFC_SLIPORT_INIT_PORT);
11776             writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11777                    CTRLregaddr);
11778             /* flush */
11779             pci_read_config_word(phba->pcidev,
11780                          PCI_DEVICE_ID, &devid);
11781 
11782             port_reset = 1;
11783             msleep(20);
11784             goto wait;
11785         } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
11786             rc = -ENODEV;
11787             goto out;
11788         }
11789         break;
11790 
11791     case LPFC_SLI_INTF_IF_TYPE_1:
11792     default:
11793         break;
11794     }
11795 
11796 out:
11797     /* Catch the not-ready port failure after a port reset. */
11798     if (rc) {
11799         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11800                 "3317 HBA not functional: IP Reset Failed "
11801                 "try: echo fw_reset > board_mode\n");
11802         rc = -ENODEV;
11803     }
11804 
11805     return rc;
11806 }
11807 
11808 /**
11809  * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
11810  * @phba: pointer to lpfc hba data structure.
11811  *
11812  * This routine is invoked to set up the PCI device memory space for device
11813  * with SLI-4 interface spec.
11814  *
11815  * Return codes
11816  *  0 - successful
11817  *  other values - error
11818  **/
11819 static int
11820 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11821 {
11822     struct pci_dev *pdev = phba->pcidev;
11823     unsigned long bar0map_len, bar1map_len, bar2map_len;
11824     int error;
11825     uint32_t if_type;
11826 
11827     if (!pdev)
11828         return -ENODEV;
11829 
11830     /* Set the device DMA mask size */
11831     error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11832     if (error)
11833         error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11834     if (error)
11835         return error;
11836 
11837     /*
11838      * The BARs and register set definitions and offset locations are
11839      * dependent on the if_type.
11840      */
11841     if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11842                   &phba->sli4_hba.sli_intf.word0)) {
11843         return -ENODEV;
11844     }
11845 
11846     /* There is no SLI3 failback for SLI4 devices. */
11847     if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11848         LPFC_SLI_INTF_VALID) {
11849         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11850                 "2894 SLI_INTF reg contents invalid "
11851                 "sli_intf reg 0x%x\n",
11852                 phba->sli4_hba.sli_intf.word0);
11853         return -ENODEV;
11854     }
11855 
11856     if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11857     /*
11858      * Get the bus address of SLI4 device Bar regions and the
11859      * number of bytes required by each mapping. The mapping of the
11860      * particular PCI BARs regions is dependent on the type of
11861      * SLI4 device.
11862      */
11863     if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11864         phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11865         bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11866 
11867         /*
11868          * Map SLI4 PCI Config Space Register base to a kernel virtual
11869          * addr
11870          */
11871         phba->sli4_hba.conf_regs_memmap_p =
11872             ioremap(phba->pci_bar0_map, bar0map_len);
11873         if (!phba->sli4_hba.conf_regs_memmap_p) {
11874             dev_printk(KERN_ERR, &pdev->dev,
11875                    "ioremap failed for SLI4 PCI config "
11876                    "registers.\n");
11877             return -ENODEV;
11878         }
11879         phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11880         /* Set up BAR0 PCI config space register memory map */
11881         lpfc_sli4_bar0_register_memmap(phba, if_type);
11882     } else {
11883         phba->pci_bar0_map = pci_resource_start(pdev, 1);
11884         bar0map_len = pci_resource_len(pdev, 1);
11885         if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11886             dev_printk(KERN_ERR, &pdev->dev,
11887                "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11888             return -ENODEV;
11889         }
11890         phba->sli4_hba.conf_regs_memmap_p =
11891                 ioremap(phba->pci_bar0_map, bar0map_len);
11892         if (!phba->sli4_hba.conf_regs_memmap_p) {
11893             dev_printk(KERN_ERR, &pdev->dev,
11894                 "ioremap failed for SLI4 PCI config "
11895                 "registers.\n");
11896             return -ENODEV;
11897         }
11898         lpfc_sli4_bar0_register_memmap(phba, if_type);
11899     }
11900 
11901     if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11902         if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11903             /*
11904              * Map SLI4 if type 0 HBA Control Register base to a
11905              * kernel virtual address and setup the registers.
11906              */
11907             phba->pci_bar1_map = pci_resource_start(pdev,
11908                                 PCI_64BIT_BAR2);
11909             bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11910             phba->sli4_hba.ctrl_regs_memmap_p =
11911                     ioremap(phba->pci_bar1_map,
11912                         bar1map_len);
11913             if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11914                 dev_err(&pdev->dev,
11915                        "ioremap failed for SLI4 HBA "
11916                         "control registers.\n");
11917                 error = -ENOMEM;
11918                 goto out_iounmap_conf;
11919             }
11920             phba->pci_bar2_memmap_p =
11921                      phba->sli4_hba.ctrl_regs_memmap_p;
11922             lpfc_sli4_bar1_register_memmap(phba, if_type);
11923         } else {
11924             error = -ENOMEM;
11925             goto out_iounmap_conf;
11926         }
11927     }
11928 
11929     if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11930         (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11931         /*
11932          * Map SLI4 if type 6 HBA Doorbell Register base to a kernel
11933          * virtual address and setup the registers.
11934          */
11935         phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11936         bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11937         phba->sli4_hba.drbl_regs_memmap_p =
11938                 ioremap(phba->pci_bar1_map, bar1map_len);
11939         if (!phba->sli4_hba.drbl_regs_memmap_p) {
11940             dev_err(&pdev->dev,
11941                "ioremap failed for SLI4 HBA doorbell registers.\n");
11942             error = -ENOMEM;
11943             goto out_iounmap_conf;
11944         }
11945         phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11946         lpfc_sli4_bar1_register_memmap(phba, if_type);
11947     }
11948 
11949     if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11950         if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11951             /*
11952              * Map SLI4 if type 0 HBA Doorbell Register base to
11953              * a kernel virtual address and setup the registers.
11954              */
11955             phba->pci_bar2_map = pci_resource_start(pdev,
11956                                 PCI_64BIT_BAR4);
11957             bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11958             phba->sli4_hba.drbl_regs_memmap_p =
11959                     ioremap(phba->pci_bar2_map,
11960                         bar2map_len);
11961             if (!phba->sli4_hba.drbl_regs_memmap_p) {
11962                 dev_err(&pdev->dev,
11963                        "ioremap failed for SLI4 HBA"
11964                        " doorbell registers.\n");
11965                 error = -ENOMEM;
11966                 goto out_iounmap_ctrl;
11967             }
11968             phba->pci_bar4_memmap_p =
11969                     phba->sli4_hba.drbl_regs_memmap_p;
11970             error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11971             if (error)
11972                 goto out_iounmap_all;
11973         } else {
11974             error = -ENOMEM;
11975             goto out_iounmap_all;
11976         }
11977     }
11978 
11979     if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
11980         pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11981         /*
11982          * Map SLI4 if type 6 HBA DPP Register base to a kernel
11983          * virtual address and setup the registers.
11984          */
11985         phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11986         bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11987         phba->sli4_hba.dpp_regs_memmap_p =
11988                 ioremap(phba->pci_bar2_map, bar2map_len);
11989         if (!phba->sli4_hba.dpp_regs_memmap_p) {
11990             dev_err(&pdev->dev,
11991                "ioremap failed for SLI4 HBA dpp registers.\n");
11992             error = -ENOMEM;
11993             goto out_iounmap_ctrl;
11994         }
11995         phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
11996     }
11997 
11998     /* Set up the EQ/CQ register handeling functions now */
11999     switch (if_type) {
12000     case LPFC_SLI_INTF_IF_TYPE_0:
12001     case LPFC_SLI_INTF_IF_TYPE_2:
12002         phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
12003         phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
12004         phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
12005         break;
12006     case LPFC_SLI_INTF_IF_TYPE_6:
12007         phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
12008         phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
12009         phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
12010         break;
12011     default:
12012         break;
12013     }
12014 
12015     return 0;
12016 
12017 out_iounmap_all:
12018     iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12019 out_iounmap_ctrl:
12020     iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12021 out_iounmap_conf:
12022     iounmap(phba->sli4_hba.conf_regs_memmap_p);
12023 
12024     return error;
12025 }
12026 
12027 /**
12028  * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
12029  * @phba: pointer to lpfc hba data structure.
12030  *
12031  * This routine is invoked to unset the PCI device memory space for device
12032  * with SLI-4 interface spec.
12033  **/
12034 static void
12035 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
12036 {
12037     uint32_t if_type;
12038     if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12039 
12040     switch (if_type) {
12041     case LPFC_SLI_INTF_IF_TYPE_0:
12042         iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12043         iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12044         iounmap(phba->sli4_hba.conf_regs_memmap_p);
12045         break;
12046     case LPFC_SLI_INTF_IF_TYPE_2:
12047         iounmap(phba->sli4_hba.conf_regs_memmap_p);
12048         break;
12049     case LPFC_SLI_INTF_IF_TYPE_6:
12050         iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12051         iounmap(phba->sli4_hba.conf_regs_memmap_p);
12052         if (phba->sli4_hba.dpp_regs_memmap_p)
12053             iounmap(phba->sli4_hba.dpp_regs_memmap_p);
12054         break;
12055     case LPFC_SLI_INTF_IF_TYPE_1:
12056     default:
12057         dev_printk(KERN_ERR, &phba->pcidev->dev,
12058                "FATAL - unsupported SLI4 interface type - %d\n",
12059                if_type);
12060         break;
12061     }
12062 }
12063 
12064 /**
12065  * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
12066  * @phba: pointer to lpfc hba data structure.
12067  *
12068  * This routine is invoked to enable the MSI-X interrupt vectors to device
12069  * with SLI-3 interface specs.
12070  *
12071  * Return codes
12072  *   0 - successful
12073  *   other values - error
12074  **/
12075 static int
12076 lpfc_sli_enable_msix(struct lpfc_hba *phba)
12077 {
12078     int rc;
12079     LPFC_MBOXQ_t *pmb;
12080 
12081     /* Set up MSI-X multi-message vectors */
12082     rc = pci_alloc_irq_vectors(phba->pcidev,
12083             LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
12084     if (rc < 0) {
12085         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12086                 "0420 PCI enable MSI-X failed (%d)\n", rc);
12087         goto vec_fail_out;
12088     }
12089 
12090     /*
12091      * Assign MSI-X vectors to interrupt handlers
12092      */
12093 
12094     /* vector-0 is associated to slow-path handler */
12095     rc = request_irq(pci_irq_vector(phba->pcidev, 0),
12096              &lpfc_sli_sp_intr_handler, 0,
12097              LPFC_SP_DRIVER_HANDLER_NAME, phba);
12098     if (rc) {
12099         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12100                 "0421 MSI-X slow-path request_irq failed "
12101                 "(%d)\n", rc);
12102         goto msi_fail_out;
12103     }
12104 
12105     /* vector-1 is associated to fast-path handler */
12106     rc = request_irq(pci_irq_vector(phba->pcidev, 1),
12107              &lpfc_sli_fp_intr_handler, 0,
12108              LPFC_FP_DRIVER_HANDLER_NAME, phba);
12109 
12110     if (rc) {
12111         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12112                 "0429 MSI-X fast-path request_irq failed "
12113                 "(%d)\n", rc);
12114         goto irq_fail_out;
12115     }
12116 
12117     /*
12118      * Configure HBA MSI-X attention conditions to messages
12119      */
12120     pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12121 
12122     if (!pmb) {
12123         rc = -ENOMEM;
12124         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12125                 "0474 Unable to allocate memory for issuing "
12126                 "MBOX_CONFIG_MSI command\n");
12127         goto mem_fail_out;
12128     }
12129     rc = lpfc_config_msi(phba, pmb);
12130     if (rc)
12131         goto mbx_fail_out;
12132     rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
12133     if (rc != MBX_SUCCESS) {
12134         lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
12135                 "0351 Config MSI mailbox command failed, "
12136                 "mbxCmd x%x, mbxStatus x%x\n",
12137                 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
12138         goto mbx_fail_out;
12139     }
12140 
12141     /* Free memory allocated for mailbox command */
12142     mempool_free(pmb, phba->mbox_mem_pool);
12143     return rc;
12144 
12145 mbx_fail_out:
12146     /* Free memory allocated for mailbox command */
12147     mempool_free(pmb, phba->mbox_mem_pool);
12148 
12149 mem_fail_out:
12150     /* free the irq already requested */
12151     free_irq(pci_irq_vector(phba->pcidev, 1), phba);
12152 
12153 irq_fail_out:
12154     /* free the irq already requested */
12155     free_irq(pci_irq_vector(phba->pcidev, 0), phba);
12156 
12157 msi_fail_out:
12158     /* Unconfigure MSI-X capability structure */
12159     pci_free_irq_vectors(phba->pcidev);
12160 
12161 vec_fail_out:
12162     return rc;
12163 }
12164 
12165 /**
12166  * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
12167  * @phba: pointer to lpfc hba data structure.
12168  *
12169  * This routine is invoked to enable the MSI interrupt mode to device with
12170  * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
12171  * enable the MSI vector. The device driver is responsible for calling the
12172  * request_irq() to register MSI vector with a interrupt the handler, which
12173  * is done in this function.
12174  *
12175  * Return codes
12176  *  0 - successful
12177  *  other values - error
12178  */
12179 static int
12180 lpfc_sli_enable_msi(struct lpfc_hba *phba)
12181 {
12182     int rc;
12183 
12184     rc = pci_enable_msi(phba->pcidev);
12185     if (!rc)
12186         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12187                 "0012 PCI enable MSI mode success.\n");
12188     else {
12189         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12190                 "0471 PCI enable MSI mode failed (%d)\n", rc);
12191         return rc;
12192     }
12193 
12194     rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12195              0, LPFC_DRIVER_NAME, phba);
12196     if (rc) {
12197         pci_disable_msi(phba->pcidev);
12198         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12199                 "0478 MSI request_irq failed (%d)\n", rc);
12200     }
12201     return rc;
12202 }
12203 
12204 /**
12205  * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
12206  * @phba: pointer to lpfc hba data structure.
12207  * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
12208  *
12209  * This routine is invoked to enable device interrupt and associate driver's
12210  * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
12211  * spec. Depends on the interrupt mode configured to the driver, the driver
12212  * will try to fallback from the configured interrupt mode to an interrupt
12213  * mode which is supported by the platform, kernel, and device in the order
12214  * of:
12215  * MSI-X -> MSI -> IRQ.
12216  *
12217  * Return codes
12218  *   0 - successful
12219  *   other values - error
12220  **/
12221 static uint32_t
12222 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12223 {
12224     uint32_t intr_mode = LPFC_INTR_ERROR;
12225     int retval;
12226 
12227     /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
12228     retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12229     if (retval)
12230         return intr_mode;
12231     phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
12232 
12233     if (cfg_mode == 2) {
12234         /* Now, try to enable MSI-X interrupt mode */
12235         retval = lpfc_sli_enable_msix(phba);
12236         if (!retval) {
12237             /* Indicate initialization to MSI-X mode */
12238             phba->intr_type = MSIX;
12239             intr_mode = 2;
12240         }
12241     }
12242 
12243     /* Fallback to MSI if MSI-X initialization failed */
12244     if (cfg_mode >= 1 && phba->intr_type == NONE) {
12245         retval = lpfc_sli_enable_msi(phba);
12246         if (!retval) {
12247             /* Indicate initialization to MSI mode */
12248             phba->intr_type = MSI;
12249             intr_mode = 1;
12250         }
12251     }
12252 
12253     /* Fallback to INTx if both MSI-X/MSI initalization failed */
12254     if (phba->intr_type == NONE) {
12255         retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12256                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12257         if (!retval) {
12258             /* Indicate initialization to INTx mode */
12259             phba->intr_type = INTx;
12260             intr_mode = 0;
12261         }
12262     }
12263     return intr_mode;
12264 }
12265 
12266 /**
12267  * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
12268  * @phba: pointer to lpfc hba data structure.
12269  *
12270  * This routine is invoked to disable device interrupt and disassociate the
12271  * driver's interrupt handler(s) from interrupt vector(s) to device with
12272  * SLI-3 interface spec. Depending on the interrupt mode, the driver will
12273  * release the interrupt vector(s) for the message signaled interrupt.
12274  **/
12275 static void
12276 lpfc_sli_disable_intr(struct lpfc_hba *phba)
12277 {
12278     int nr_irqs, i;
12279 
12280     if (phba->intr_type == MSIX)
12281         nr_irqs = LPFC_MSIX_VECTORS;
12282     else
12283         nr_irqs = 1;
12284 
12285     for (i = 0; i < nr_irqs; i++)
12286         free_irq(pci_irq_vector(phba->pcidev, i), phba);
12287     pci_free_irq_vectors(phba->pcidev);
12288 
12289     /* Reset interrupt management states */
12290     phba->intr_type = NONE;
12291     phba->sli.slistat.sli_intr = 0;
12292 }
12293 
12294 /**
12295  * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
12296  * @phba: pointer to lpfc hba data structure.
12297  * @id: EQ vector index or Hardware Queue index
12298  * @match: LPFC_FIND_BY_EQ = match by EQ
12299  *         LPFC_FIND_BY_HDWQ = match by Hardware Queue
12300  * Return the CPU that matches the selection criteria
12301  */
12302 static uint16_t
12303 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12304 {
12305     struct lpfc_vector_map_info *cpup;
12306     int cpu;
12307 
12308     /* Loop through all CPUs */
12309     for_each_present_cpu(cpu) {
12310         cpup = &phba->sli4_hba.cpu_map[cpu];
12311 
12312         /* If we are matching by EQ, there may be multiple CPUs using
12313          * using the same vector, so select the one with
12314          * LPFC_CPU_FIRST_IRQ set.
12315          */
12316         if ((match == LPFC_FIND_BY_EQ) &&
12317             (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12318             (cpup->eq == id))
12319             return cpu;
12320 
12321         /* If matching by HDWQ, select the first CPU that matches */
12322         if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12323             return cpu;
12324     }
12325     return 0;
12326 }
12327 
12328 #ifdef CONFIG_X86
12329 /**
12330  * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
12331  * @phba: pointer to lpfc hba data structure.
12332  * @cpu: CPU map index
12333  * @phys_id: CPU package physical id
12334  * @core_id: CPU core id
12335  */
12336 static int
12337 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12338         uint16_t phys_id, uint16_t core_id)
12339 {
12340     struct lpfc_vector_map_info *cpup;
12341     int idx;
12342 
12343     for_each_present_cpu(idx) {
12344         cpup = &phba->sli4_hba.cpu_map[idx];
12345         /* Does the cpup match the one we are looking for */
12346         if ((cpup->phys_id == phys_id) &&
12347             (cpup->core_id == core_id) &&
12348             (cpu != idx))
12349             return 1;
12350     }
12351     return 0;
12352 }
12353 #endif
12354 
12355 /*
12356  * lpfc_assign_eq_map_info - Assigns eq for vector_map structure
12357  * @phba: pointer to lpfc hba data structure.
12358  * @eqidx: index for eq and irq vector
12359  * @flag: flags to set for vector_map structure
12360  * @cpu: cpu used to index vector_map structure
12361  *
12362  * The routine assigns eq info into vector_map structure
12363  */
12364 static inline void
12365 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12366             unsigned int cpu)
12367 {
12368     struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12369     struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12370 
12371     cpup->eq = eqidx;
12372     cpup->flag |= flag;
12373 
12374     lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12375             "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12376             cpu, eqhdl->irq, cpup->eq, cpup->flag);
12377 }
12378 
12379 /**
12380  * lpfc_cpu_map_array_init - Initialize cpu_map structure
12381  * @phba: pointer to lpfc hba data structure.
12382  *
12383  * The routine initializes the cpu_map array structure
12384  */
12385 static void
12386 lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12387 {
12388     struct lpfc_vector_map_info *cpup;
12389     struct lpfc_eq_intr_info *eqi;
12390     int cpu;
12391 
12392     for_each_possible_cpu(cpu) {
12393         cpup = &phba->sli4_hba.cpu_map[cpu];
12394         cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12395         cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12396         cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12397         cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12398         cpup->flag = 0;
12399         eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12400         INIT_LIST_HEAD(&eqi->list);
12401         eqi->icnt = 0;
12402     }
12403 }
12404 
12405 /**
12406  * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure
12407  * @phba: pointer to lpfc hba data structure.
12408  *
12409  * The routine initializes the hba_eq_hdl array structure
12410  */
12411 static void
12412 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12413 {
12414     struct lpfc_hba_eq_hdl *eqhdl;
12415     int i;
12416 
12417     for (i = 0; i < phba->cfg_irq_chann; i++) {
12418         eqhdl = lpfc_get_eq_hdl(i);
12419         eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
12420         eqhdl->phba = phba;
12421     }
12422 }
12423 
12424 /**
12425  * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
12426  * @phba: pointer to lpfc hba data structure.
12427  * @vectors: number of msix vectors allocated.
12428  *
12429  * The routine will figure out the CPU affinity assignment for every
12430  * MSI-X vector allocated for the HBA.
12431  * In addition, the CPU to IO channel mapping will be calculated
12432  * and the phba->sli4_hba.cpu_map array will reflect this.
12433  */
12434 static void
12435 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12436 {
12437     int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12438     int max_phys_id, min_phys_id;
12439     int max_core_id, min_core_id;
12440     struct lpfc_vector_map_info *cpup;
12441     struct lpfc_vector_map_info *new_cpup;
12442 #ifdef CONFIG_X86
12443     struct cpuinfo_x86 *cpuinfo;
12444 #endif
12445 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12446     struct lpfc_hdwq_stat *c_stat;
12447 #endif
12448 
12449     max_phys_id = 0;
12450     min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12451     max_core_id = 0;
12452     min_core_id = LPFC_VECTOR_MAP_EMPTY;
12453 
12454     /* Update CPU map with physical id and core id of each CPU */
12455     for_each_present_cpu(cpu) {
12456         cpup = &phba->sli4_hba.cpu_map[cpu];
12457 #ifdef CONFIG_X86
12458         cpuinfo = &cpu_data(cpu);
12459         cpup->phys_id = cpuinfo->phys_proc_id;
12460         cpup->core_id = cpuinfo->cpu_core_id;
12461         if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12462             cpup->flag |= LPFC_CPU_MAP_HYPER;
12463 #else
12464         /* No distinction between CPUs for other platforms */
12465         cpup->phys_id = 0;
12466         cpup->core_id = cpu;
12467 #endif
12468 
12469         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12470                 "3328 CPU %d physid %d coreid %d flag x%x\n",
12471                 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12472 
12473         if (cpup->phys_id > max_phys_id)
12474             max_phys_id = cpup->phys_id;
12475         if (cpup->phys_id < min_phys_id)
12476             min_phys_id = cpup->phys_id;
12477 
12478         if (cpup->core_id > max_core_id)
12479             max_core_id = cpup->core_id;
12480         if (cpup->core_id < min_core_id)
12481             min_core_id = cpup->core_id;
12482     }
12483 
12484     /* After looking at each irq vector assigned to this pcidev, its
12485      * possible to see that not ALL CPUs have been accounted for.
12486      * Next we will set any unassigned (unaffinitized) cpu map
12487      * entries to a IRQ on the same phys_id.
12488      */
12489     first_cpu = cpumask_first(cpu_present_mask);
12490     start_cpu = first_cpu;
12491 
12492     for_each_present_cpu(cpu) {
12493         cpup = &phba->sli4_hba.cpu_map[cpu];
12494 
12495         /* Is this CPU entry unassigned */
12496         if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12497             /* Mark CPU as IRQ not assigned by the kernel */
12498             cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12499 
12500             /* If so, find a new_cpup thats on the the SAME
12501              * phys_id as cpup. start_cpu will start where we
12502              * left off so all unassigned entries don't get assgined
12503              * the IRQ of the first entry.
12504              */
12505             new_cpu = start_cpu;
12506             for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12507                 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12508                 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12509                     (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12510                     (new_cpup->phys_id == cpup->phys_id))
12511                     goto found_same;
12512                 new_cpu = cpumask_next(
12513                     new_cpu, cpu_present_mask);
12514                 if (new_cpu == nr_cpumask_bits)
12515                     new_cpu = first_cpu;
12516             }
12517             /* At this point, we leave the CPU as unassigned */
12518             continue;
12519 found_same:
12520             /* We found a matching phys_id, so copy the IRQ info */
12521             cpup->eq = new_cpup->eq;
12522 
12523             /* Bump start_cpu to the next slot to minmize the
12524              * chance of having multiple unassigned CPU entries
12525              * selecting the same IRQ.
12526              */
12527             start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12528             if (start_cpu == nr_cpumask_bits)
12529                 start_cpu = first_cpu;
12530 
12531             lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12532                     "3337 Set Affinity: CPU %d "
12533                     "eq %d from peer cpu %d same "
12534                     "phys_id (%d)\n",
12535                     cpu, cpup->eq, new_cpu,
12536                     cpup->phys_id);
12537         }
12538     }
12539 
12540     /* Set any unassigned cpu map entries to a IRQ on any phys_id */
12541     start_cpu = first_cpu;
12542 
12543     for_each_present_cpu(cpu) {
12544         cpup = &phba->sli4_hba.cpu_map[cpu];
12545 
12546         /* Is this entry unassigned */
12547         if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12548             /* Mark it as IRQ not assigned by the kernel */
12549             cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12550 
12551             /* If so, find a new_cpup thats on ANY phys_id
12552              * as the cpup. start_cpu will start where we
12553              * left off so all unassigned entries don't get
12554              * assigned the IRQ of the first entry.
12555              */
12556             new_cpu = start_cpu;
12557             for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12558                 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12559                 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12560                     (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12561                     goto found_any;
12562                 new_cpu = cpumask_next(
12563                     new_cpu, cpu_present_mask);
12564                 if (new_cpu == nr_cpumask_bits)
12565                     new_cpu = first_cpu;
12566             }
12567             /* We should never leave an entry unassigned */
12568             lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12569                     "3339 Set Affinity: CPU %d "
12570                     "eq %d UNASSIGNED\n",
12571                     cpup->hdwq, cpup->eq);
12572             continue;
12573 found_any:
12574             /* We found an available entry, copy the IRQ info */
12575             cpup->eq = new_cpup->eq;
12576 
12577             /* Bump start_cpu to the next slot to minmize the
12578              * chance of having multiple unassigned CPU entries
12579              * selecting the same IRQ.
12580              */
12581             start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12582             if (start_cpu == nr_cpumask_bits)
12583                 start_cpu = first_cpu;
12584 
12585             lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12586                     "3338 Set Affinity: CPU %d "
12587                     "eq %d from peer cpu %d (%d/%d)\n",
12588                     cpu, cpup->eq, new_cpu,
12589                     new_cpup->phys_id, new_cpup->core_id);
12590         }
12591     }
12592 
12593     /* Assign hdwq indices that are unique across all cpus in the map
12594      * that are also FIRST_CPUs.
12595      */
12596     idx = 0;
12597     for_each_present_cpu(cpu) {
12598         cpup = &phba->sli4_hba.cpu_map[cpu];
12599 
12600         /* Only FIRST IRQs get a hdwq index assignment. */
12601         if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12602             continue;
12603 
12604         /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */
12605         cpup->hdwq = idx;
12606         idx++;
12607         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12608                 "3333 Set Affinity: CPU %d (phys %d core %d): "
12609                 "hdwq %d eq %d flg x%x\n",
12610                 cpu, cpup->phys_id, cpup->core_id,
12611                 cpup->hdwq, cpup->eq, cpup->flag);
12612     }
12613     /* Associate a hdwq with each cpu_map entry
12614      * This will be 1 to 1 - hdwq to cpu, unless there are less
12615      * hardware queues then CPUs. For that case we will just round-robin
12616      * the available hardware queues as they get assigned to CPUs.
12617      * The next_idx is the idx from the FIRST_CPU loop above to account
12618      * for irq_chann < hdwq.  The idx is used for round-robin assignments
12619      * and needs to start at 0.
12620      */
12621     next_idx = idx;
12622     start_cpu = 0;
12623     idx = 0;
12624     for_each_present_cpu(cpu) {
12625         cpup = &phba->sli4_hba.cpu_map[cpu];
12626 
12627         /* FIRST cpus are already mapped. */
12628         if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12629             continue;
12630 
12631         /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq
12632          * of the unassigned cpus to the next idx so that all
12633          * hdw queues are fully utilized.
12634          */
12635         if (next_idx < phba->cfg_hdw_queue) {
12636             cpup->hdwq = next_idx;
12637             next_idx++;
12638             continue;
12639         }
12640 
12641         /* Not a First CPU and all hdw_queues are used.  Reuse a
12642          * Hardware Queue for another CPU, so be smart about it
12643          * and pick one that has its IRQ/EQ mapped to the same phys_id
12644          * (CPU package) and core_id.
12645          */
12646         new_cpu = start_cpu;
12647         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12648             new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12649             if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12650                 new_cpup->phys_id == cpup->phys_id &&
12651                 new_cpup->core_id == cpup->core_id) {
12652                 goto found_hdwq;
12653             }
12654             new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12655             if (new_cpu == nr_cpumask_bits)
12656                 new_cpu = first_cpu;
12657         }
12658 
12659         /* If we can't match both phys_id and core_id,
12660          * settle for just a phys_id match.
12661          */
12662         new_cpu = start_cpu;
12663         for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12664             new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12665             if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12666                 new_cpup->phys_id == cpup->phys_id)
12667                 goto found_hdwq;
12668 
12669             new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12670             if (new_cpu == nr_cpumask_bits)
12671                 new_cpu = first_cpu;
12672         }
12673 
12674         /* Otherwise just round robin on cfg_hdw_queue */
12675         cpup->hdwq = idx % phba->cfg_hdw_queue;
12676         idx++;
12677         goto logit;
12678  found_hdwq:
12679         /* We found an available entry, copy the IRQ info */
12680         start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12681         if (start_cpu == nr_cpumask_bits)
12682             start_cpu = first_cpu;
12683         cpup->hdwq = new_cpup->hdwq;
12684  logit:
12685         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12686                 "3335 Set Affinity: CPU %d (phys %d core %d): "
12687                 "hdwq %d eq %d flg x%x\n",
12688                 cpu, cpup->phys_id, cpup->core_id,
12689                 cpup->hdwq, cpup->eq, cpup->flag);
12690     }
12691 
12692     /*
12693      * Initialize the cpu_map slots for not-present cpus in case
12694      * a cpu is hot-added. Perform a simple hdwq round robin assignment.
12695      */
12696     idx = 0;
12697     for_each_possible_cpu(cpu) {
12698         cpup = &phba->sli4_hba.cpu_map[cpu];
12699 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12700         c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12701         c_stat->hdwq_no = cpup->hdwq;
12702 #endif
12703         if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12704             continue;
12705 
12706         cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12707 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12708         c_stat->hdwq_no = cpup->hdwq;
12709 #endif
12710         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12711                 "3340 Set Affinity: not present "
12712                 "CPU %d hdwq %d\n",
12713                 cpu, cpup->hdwq);
12714     }
12715 
12716     /* The cpu_map array will be used later during initialization
12717      * when EQ / CQ / WQs are allocated and configured.
12718      */
12719     return;
12720 }
12721 
12722 /**
12723  * lpfc_cpuhp_get_eq
12724  *
12725  * @phba:   pointer to lpfc hba data structure.
12726  * @cpu:    cpu going offline
12727  * @eqlist: eq list to append to
12728  */
12729 static int
12730 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12731           struct list_head *eqlist)
12732 {
12733     const struct cpumask *maskp;
12734     struct lpfc_queue *eq;
12735     struct cpumask *tmp;
12736     u16 idx;
12737 
12738     tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12739     if (!tmp)
12740         return -ENOMEM;
12741 
12742     for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12743         maskp = pci_irq_get_affinity(phba->pcidev, idx);
12744         if (!maskp)
12745             continue;
12746         /*
12747          * if irq is not affinitized to the cpu going
12748          * then we don't need to poll the eq attached
12749          * to it.
12750          */
12751         if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12752             continue;
12753         /* get the cpus that are online and are affini-
12754          * tized to this irq vector.  If the count is
12755          * more than 1 then cpuhp is not going to shut-
12756          * down this vector.  Since this cpu has not
12757          * gone offline yet, we need >1.
12758          */
12759         cpumask_and(tmp, maskp, cpu_online_mask);
12760         if (cpumask_weight(tmp) > 1)
12761             continue;
12762 
12763         /* Now that we have an irq to shutdown, get the eq
12764          * mapped to this irq.  Note: multiple hdwq's in
12765          * the software can share an eq, but eventually
12766          * only eq will be mapped to this vector
12767          */
12768         eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12769         list_add(&eq->_poll_list, eqlist);
12770     }
12771     kfree(tmp);
12772     return 0;
12773 }
12774 
12775 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12776 {
12777     if (phba->sli_rev != LPFC_SLI_REV4)
12778         return;
12779 
12780     cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12781                         &phba->cpuhp);
12782     /*
12783      * unregistering the instance doesn't stop the polling
12784      * timer. Wait for the poll timer to retire.
12785      */
12786     synchronize_rcu();
12787     del_timer_sync(&phba->cpuhp_poll_timer);
12788 }
12789 
12790 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12791 {
12792     if (phba->pport->fc_flag & FC_OFFLINE_MODE)
12793         return;
12794 
12795     __lpfc_cpuhp_remove(phba);
12796 }
12797 
12798 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12799 {
12800     if (phba->sli_rev != LPFC_SLI_REV4)
12801         return;
12802 
12803     rcu_read_lock();
12804 
12805     if (!list_empty(&phba->poll_list))
12806         mod_timer(&phba->cpuhp_poll_timer,
12807               jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12808 
12809     rcu_read_unlock();
12810 
12811     cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12812                      &phba->cpuhp);
12813 }
12814 
12815 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12816 {
12817     if (phba->pport->load_flag & FC_UNLOADING) {
12818         *retval = -EAGAIN;
12819         return true;
12820     }
12821 
12822     if (phba->sli_rev != LPFC_SLI_REV4) {
12823         *retval = 0;
12824         return true;
12825     }
12826 
12827     /* proceed with the hotplug */
12828     return false;
12829 }
12830 
12831 /**
12832  * lpfc_irq_set_aff - set IRQ affinity
12833  * @eqhdl: EQ handle
12834  * @cpu: cpu to set affinity
12835  *
12836  **/
12837 static inline void
12838 lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12839 {
12840     cpumask_clear(&eqhdl->aff_mask);
12841     cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12842     irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12843     irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
12844 }
12845 
12846 /**
12847  * lpfc_irq_clear_aff - clear IRQ affinity
12848  * @eqhdl: EQ handle
12849  *
12850  **/
12851 static inline void
12852 lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12853 {
12854     cpumask_clear(&eqhdl->aff_mask);
12855     irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12856 }
12857 
12858 /**
12859  * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event
12860  * @phba: pointer to HBA context object.
12861  * @cpu: cpu going offline/online
12862  * @offline: true, cpu is going offline. false, cpu is coming online.
12863  *
12864  * If cpu is going offline, we'll try our best effort to find the next
12865  * online cpu on the phba's original_mask and migrate all offlining IRQ
12866  * affinities.
12867  *
12868  * If cpu is coming online, reaffinitize the IRQ back to the onlining cpu.
12869  *
12870  * Note: Call only if NUMA or NHT mode is enabled, otherwise rely on
12871  *   PCI_IRQ_AFFINITY to auto-manage IRQ affinity.
12872  *
12873  **/
12874 static void
12875 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12876 {
12877     struct lpfc_vector_map_info *cpup;
12878     struct cpumask *aff_mask;
12879     unsigned int cpu_select, cpu_next, idx;
12880     const struct cpumask *orig_mask;
12881 
12882     if (phba->irq_chann_mode == NORMAL_MODE)
12883         return;
12884 
12885     orig_mask = &phba->sli4_hba.irq_aff_mask;
12886 
12887     if (!cpumask_test_cpu(cpu, orig_mask))
12888         return;
12889 
12890     cpup = &phba->sli4_hba.cpu_map[cpu];
12891 
12892     if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12893         return;
12894 
12895     if (offline) {
12896         /* Find next online CPU on original mask */
12897         cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12898         cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12899 
12900         /* Found a valid CPU */
12901         if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12902             /* Go through each eqhdl and ensure offlining
12903              * cpu aff_mask is migrated
12904              */
12905             for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12906                 aff_mask = lpfc_get_aff_mask(idx);
12907 
12908                 /* Migrate affinity */
12909                 if (cpumask_test_cpu(cpu, aff_mask))
12910                     lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12911                              cpu_select);
12912             }
12913         } else {
12914             /* Rely on irqbalance if no online CPUs left on NUMA */
12915             for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12916                 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12917         }
12918     } else {
12919         /* Migrate affinity back to this CPU */
12920         lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12921     }
12922 }
12923 
12924 static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12925 {
12926     struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12927     struct lpfc_queue *eq, *next;
12928     LIST_HEAD(eqlist);
12929     int retval;
12930 
12931     if (!phba) {
12932         WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12933         return 0;
12934     }
12935 
12936     if (__lpfc_cpuhp_checks(phba, &retval))
12937         return retval;
12938 
12939     lpfc_irq_rebalance(phba, cpu, true);
12940 
12941     retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12942     if (retval)
12943         return retval;
12944 
12945     /* start polling on these eq's */
12946     list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12947         list_del_init(&eq->_poll_list);
12948         lpfc_sli4_start_polling(eq);
12949     }
12950 
12951     return 0;
12952 }
12953 
12954 static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
12955 {
12956     struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12957     struct lpfc_queue *eq, *next;
12958     unsigned int n;
12959     int retval;
12960 
12961     if (!phba) {
12962         WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12963         return 0;
12964     }
12965 
12966     if (__lpfc_cpuhp_checks(phba, &retval))
12967         return retval;
12968 
12969     lpfc_irq_rebalance(phba, cpu, false);
12970 
12971     list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12972         n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12973         if (n == cpu)
12974             lpfc_sli4_stop_polling(eq);
12975     }
12976 
12977     return 0;
12978 }
12979 
12980 /**
12981  * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
12982  * @phba: pointer to lpfc hba data structure.
12983  *
12984  * This routine is invoked to enable the MSI-X interrupt vectors to device
12985  * with SLI-4 interface spec.  It also allocates MSI-X vectors and maps them
12986  * to cpus on the system.
12987  *
12988  * When cfg_irq_numa is enabled, the adapter will only allocate vectors for
12989  * the number of cpus on the same numa node as this adapter.  The vectors are
12990  * allocated without requesting OS affinity mapping.  A vector will be
12991  * allocated and assigned to each online and offline cpu.  If the cpu is
12992  * online, then affinity will be set to that cpu.  If the cpu is offline, then
12993  * affinity will be set to the nearest peer cpu within the numa node that is
12994  * online.  If there are no online cpus within the numa node, affinity is not
12995  * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping
12996  * is consistent with the way cpu online/offline is handled when cfg_irq_numa is
12997  * configured.
12998  *
12999  * If numa mode is not enabled and there is more than 1 vector allocated, then
13000  * the driver relies on the managed irq interface where the OS assigns vector to
13001  * cpu affinity.  The driver will then use that affinity mapping to setup its
13002  * cpu mapping table.
13003  *
13004  * Return codes
13005  * 0 - successful
13006  * other values - error
13007  **/
13008 static int
13009 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
13010 {
13011     int vectors, rc, index;
13012     char *name;
13013     const struct cpumask *aff_mask = NULL;
13014     unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
13015     struct lpfc_vector_map_info *cpup;
13016     struct lpfc_hba_eq_hdl *eqhdl;
13017     const struct cpumask *maskp;
13018     unsigned int flags = PCI_IRQ_MSIX;
13019 
13020     /* Set up MSI-X multi-message vectors */
13021     vectors = phba->cfg_irq_chann;
13022 
13023     if (phba->irq_chann_mode != NORMAL_MODE)
13024         aff_mask = &phba->sli4_hba.irq_aff_mask;
13025 
13026     if (aff_mask) {
13027         cpu_cnt = cpumask_weight(aff_mask);
13028         vectors = min(phba->cfg_irq_chann, cpu_cnt);
13029 
13030         /* cpu: iterates over aff_mask including offline or online
13031          * cpu_select: iterates over online aff_mask to set affinity
13032          */
13033         cpu = cpumask_first(aff_mask);
13034         cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13035     } else {
13036         flags |= PCI_IRQ_AFFINITY;
13037     }
13038 
13039     rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
13040     if (rc < 0) {
13041         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13042                 "0484 PCI enable MSI-X failed (%d)\n", rc);
13043         goto vec_fail_out;
13044     }
13045     vectors = rc;
13046 
13047     /* Assign MSI-X vectors to interrupt handlers */
13048     for (index = 0; index < vectors; index++) {
13049         eqhdl = lpfc_get_eq_hdl(index);
13050         name = eqhdl->handler_name;
13051         memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
13052         snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
13053              LPFC_DRIVER_HANDLER_NAME"%d", index);
13054 
13055         eqhdl->idx = index;
13056         rc = request_irq(pci_irq_vector(phba->pcidev, index),
13057              &lpfc_sli4_hba_intr_handler, 0,
13058              name, eqhdl);
13059         if (rc) {
13060             lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13061                     "0486 MSI-X fast-path (%d) "
13062                     "request_irq failed (%d)\n", index, rc);
13063             goto cfg_fail_out;
13064         }
13065 
13066         eqhdl->irq = pci_irq_vector(phba->pcidev, index);
13067 
13068         if (aff_mask) {
13069             /* If found a neighboring online cpu, set affinity */
13070             if (cpu_select < nr_cpu_ids)
13071                 lpfc_irq_set_aff(eqhdl, cpu_select);
13072 
13073             /* Assign EQ to cpu_map */
13074             lpfc_assign_eq_map_info(phba, index,
13075                         LPFC_CPU_FIRST_IRQ,
13076                         cpu);
13077 
13078             /* Iterate to next offline or online cpu in aff_mask */
13079             cpu = cpumask_next(cpu, aff_mask);
13080 
13081             /* Find next online cpu in aff_mask to set affinity */
13082             cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13083         } else if (vectors == 1) {
13084             cpu = cpumask_first(cpu_present_mask);
13085             lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
13086                         cpu);
13087         } else {
13088             maskp = pci_irq_get_affinity(phba->pcidev, index);
13089 
13090             /* Loop through all CPUs associated with vector index */
13091             for_each_cpu_and(cpu, maskp, cpu_present_mask) {
13092                 cpup = &phba->sli4_hba.cpu_map[cpu];
13093 
13094                 /* If this is the first CPU thats assigned to
13095                  * this vector, set LPFC_CPU_FIRST_IRQ.
13096                  *
13097                  * With certain platforms its possible that irq
13098                  * vectors are affinitized to all the cpu's.
13099                  * This can result in each cpu_map.eq to be set
13100                  * to the last vector, resulting in overwrite
13101                  * of all the previous cpu_map.eq.  Ensure that
13102                  * each vector receives a place in cpu_map.
13103                  * Later call to lpfc_cpu_affinity_check will
13104                  * ensure we are nicely balanced out.
13105                  */
13106                 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
13107                     continue;
13108                 lpfc_assign_eq_map_info(phba, index,
13109                             LPFC_CPU_FIRST_IRQ,
13110                             cpu);
13111                 break;
13112             }
13113         }
13114     }
13115 
13116     if (vectors != phba->cfg_irq_chann) {
13117         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13118                 "3238 Reducing IO channels to match number of "
13119                 "MSI-X vectors, requested %d got %d\n",
13120                 phba->cfg_irq_chann, vectors);
13121         if (phba->cfg_irq_chann > vectors)
13122             phba->cfg_irq_chann = vectors;
13123     }
13124 
13125     return rc;
13126 
13127 cfg_fail_out:
13128     /* free the irq already requested */
13129     for (--index; index >= 0; index--) {
13130         eqhdl = lpfc_get_eq_hdl(index);
13131         lpfc_irq_clear_aff(eqhdl);
13132         free_irq(eqhdl->irq, eqhdl);
13133     }
13134 
13135     /* Unconfigure MSI-X capability structure */
13136     pci_free_irq_vectors(phba->pcidev);
13137 
13138 vec_fail_out:
13139     return rc;
13140 }
13141 
13142 /**
13143  * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
13144  * @phba: pointer to lpfc hba data structure.
13145  *
13146  * This routine is invoked to enable the MSI interrupt mode to device with
13147  * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is
13148  * called to enable the MSI vector. The device driver is responsible for
13149  * calling the request_irq() to register MSI vector with a interrupt the
13150  * handler, which is done in this function.
13151  *
13152  * Return codes
13153  *  0 - successful
13154  *  other values - error
13155  **/
13156 static int
13157 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
13158 {
13159     int rc, index;
13160     unsigned int cpu;
13161     struct lpfc_hba_eq_hdl *eqhdl;
13162 
13163     rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
13164                    PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
13165     if (rc > 0)
13166         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13167                 "0487 PCI enable MSI mode success.\n");
13168     else {
13169         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13170                 "0488 PCI enable MSI mode failed (%d)\n", rc);
13171         return rc ? rc : -1;
13172     }
13173 
13174     rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13175              0, LPFC_DRIVER_NAME, phba);
13176     if (rc) {
13177         pci_free_irq_vectors(phba->pcidev);
13178         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13179                 "0490 MSI request_irq failed (%d)\n", rc);
13180         return rc;
13181     }
13182 
13183     eqhdl = lpfc_get_eq_hdl(0);
13184     eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13185 
13186     cpu = cpumask_first(cpu_present_mask);
13187     lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
13188 
13189     for (index = 0; index < phba->cfg_irq_chann; index++) {
13190         eqhdl = lpfc_get_eq_hdl(index);
13191         eqhdl->idx = index;
13192     }
13193 
13194     return 0;
13195 }
13196 
13197 /**
13198  * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
13199  * @phba: pointer to lpfc hba data structure.
13200  * @cfg_mode: Interrupt configuration mode (INTx, MSI or MSI-X).
13201  *
13202  * This routine is invoked to enable device interrupt and associate driver's
13203  * interrupt handler(s) to interrupt vector(s) to device with SLI-4
13204  * interface spec. Depends on the interrupt mode configured to the driver,
13205  * the driver will try to fallback from the configured interrupt mode to an
13206  * interrupt mode which is supported by the platform, kernel, and device in
13207  * the order of:
13208  * MSI-X -> MSI -> IRQ.
13209  *
13210  * Return codes
13211  *  0 - successful
13212  *  other values - error
13213  **/
13214 static uint32_t
13215 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
13216 {
13217     uint32_t intr_mode = LPFC_INTR_ERROR;
13218     int retval, idx;
13219 
13220     if (cfg_mode == 2) {
13221         /* Preparation before conf_msi mbox cmd */
13222         retval = 0;
13223         if (!retval) {
13224             /* Now, try to enable MSI-X interrupt mode */
13225             retval = lpfc_sli4_enable_msix(phba);
13226             if (!retval) {
13227                 /* Indicate initialization to MSI-X mode */
13228                 phba->intr_type = MSIX;
13229                 intr_mode = 2;
13230             }
13231         }
13232     }
13233 
13234     /* Fallback to MSI if MSI-X initialization failed */
13235     if (cfg_mode >= 1 && phba->intr_type == NONE) {
13236         retval = lpfc_sli4_enable_msi(phba);
13237         if (!retval) {
13238             /* Indicate initialization to MSI mode */
13239             phba->intr_type = MSI;
13240             intr_mode = 1;
13241         }
13242     }
13243 
13244     /* Fallback to INTx if both MSI-X/MSI initalization failed */
13245     if (phba->intr_type == NONE) {
13246         retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13247                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13248         if (!retval) {
13249             struct lpfc_hba_eq_hdl *eqhdl;
13250             unsigned int cpu;
13251 
13252             /* Indicate initialization to INTx mode */
13253             phba->intr_type = INTx;
13254             intr_mode = 0;
13255 
13256             eqhdl = lpfc_get_eq_hdl(0);
13257             eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13258 
13259             cpu = cpumask_first(cpu_present_mask);
13260             lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13261                         cpu);
13262             for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13263                 eqhdl = lpfc_get_eq_hdl(idx);
13264                 eqhdl->idx = idx;
13265             }
13266         }
13267     }
13268     return intr_mode;
13269 }
13270 
13271 /**
13272  * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
13273  * @phba: pointer to lpfc hba data structure.
13274  *
13275  * This routine is invoked to disable device interrupt and disassociate
13276  * the driver's interrupt handler(s) from interrupt vector(s) to device
13277  * with SLI-4 interface spec. Depending on the interrupt mode, the driver
13278  * will release the interrupt vector(s) for the message signaled interrupt.
13279  **/
13280 static void
13281 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13282 {
13283     /* Disable the currently initialized interrupt mode */
13284     if (phba->intr_type == MSIX) {
13285         int index;
13286         struct lpfc_hba_eq_hdl *eqhdl;
13287 
13288         /* Free up MSI-X multi-message vectors */
13289         for (index = 0; index < phba->cfg_irq_chann; index++) {
13290             eqhdl = lpfc_get_eq_hdl(index);
13291             lpfc_irq_clear_aff(eqhdl);
13292             free_irq(eqhdl->irq, eqhdl);
13293         }
13294     } else {
13295         free_irq(phba->pcidev->irq, phba);
13296     }
13297 
13298     pci_free_irq_vectors(phba->pcidev);
13299 
13300     /* Reset interrupt management states */
13301     phba->intr_type = NONE;
13302     phba->sli.slistat.sli_intr = 0;
13303 }
13304 
13305 /**
13306  * lpfc_unset_hba - Unset SLI3 hba device initialization
13307  * @phba: pointer to lpfc hba data structure.
13308  *
13309  * This routine is invoked to unset the HBA device initialization steps to
13310  * a device with SLI-3 interface spec.
13311  **/
13312 static void
13313 lpfc_unset_hba(struct lpfc_hba *phba)
13314 {
13315     struct lpfc_vport *vport = phba->pport;
13316     struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
13317 
13318     spin_lock_irq(shost->host_lock);
13319     vport->load_flag |= FC_UNLOADING;
13320     spin_unlock_irq(shost->host_lock);
13321 
13322     kfree(phba->vpi_bmask);
13323     kfree(phba->vpi_ids);
13324 
13325     lpfc_stop_hba_timers(phba);
13326 
13327     phba->pport->work_port_events = 0;
13328 
13329     lpfc_sli_hba_down(phba);
13330 
13331     lpfc_sli_brdrestart(phba);
13332 
13333     lpfc_sli_disable_intr(phba);
13334 
13335     return;
13336 }
13337 
13338 /**
13339  * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
13340  * @phba: Pointer to HBA context object.
13341  *
13342  * This function is called in the SLI4 code path to wait for completion
13343  * of device's XRIs exchange busy. It will check the XRI exchange busy
13344  * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
13345  * that, it will check the XRI exchange busy on outstanding FCP and ELS
13346  * I/Os every 30 seconds, log error message, and wait forever. Only when
13347  * all XRI exchange busy complete, the driver unload shall proceed with
13348  * invoking the function reset ioctl mailbox command to the CNA and the
13349  * the rest of the driver unload resource release.
13350  **/
13351 static void
13352 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13353 {
13354     struct lpfc_sli4_hdw_queue *qp;
13355     int idx, ccnt;
13356     int wait_time = 0;
13357     int io_xri_cmpl = 1;
13358     int nvmet_xri_cmpl = 1;
13359     int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13360 
13361     /* Driver just aborted IOs during the hba_unset process.  Pause
13362      * here to give the HBA time to complete the IO and get entries
13363      * into the abts lists.
13364      */
13365     msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13366 
13367     /* Wait for NVME pending IO to flush back to transport. */
13368     if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13369         lpfc_nvme_wait_for_io_drain(phba);
13370 
13371     ccnt = 0;
13372     for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13373         qp = &phba->sli4_hba.hdwq[idx];
13374         io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13375         if (!io_xri_cmpl) /* if list is NOT empty */
13376             ccnt++;
13377     }
13378     if (ccnt)
13379         io_xri_cmpl = 0;
13380 
13381     if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13382         nvmet_xri_cmpl =
13383             list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13384     }
13385 
13386     while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13387         if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13388             if (!nvmet_xri_cmpl)
13389                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13390                         "6424 NVMET XRI exchange busy "
13391                         "wait time: %d seconds.\n",
13392                         wait_time/1000);
13393             if (!io_xri_cmpl)
13394                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13395                         "6100 IO XRI exchange busy "
13396                         "wait time: %d seconds.\n",
13397                         wait_time/1000);
13398             if (!els_xri_cmpl)
13399                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13400                         "2878 ELS XRI exchange busy "
13401                         "wait time: %d seconds.\n",
13402                         wait_time/1000);
13403             msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13404             wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13405         } else {
13406             msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13407             wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13408         }
13409 
13410         ccnt = 0;
13411         for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13412             qp = &phba->sli4_hba.hdwq[idx];
13413             io_xri_cmpl = list_empty(
13414                 &qp->lpfc_abts_io_buf_list);
13415             if (!io_xri_cmpl) /* if list is NOT empty */
13416                 ccnt++;
13417         }
13418         if (ccnt)
13419             io_xri_cmpl = 0;
13420 
13421         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13422             nvmet_xri_cmpl = list_empty(
13423                 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13424         }
13425         els_xri_cmpl =
13426             list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13427 
13428     }
13429 }
13430 
13431 /**
13432  * lpfc_sli4_hba_unset - Unset the fcoe hba
13433  * @phba: Pointer to HBA context object.
13434  *
13435  * This function is called in the SLI4 code path to reset the HBA's FCoE
13436  * function. The caller is not required to hold any lock. This routine
13437  * issues PCI function reset mailbox command to reset the FCoE function.
13438  * At the end of the function, it calls lpfc_hba_down_post function to
13439  * free any pending commands.
13440  **/
13441 static void
13442 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13443 {
13444     int wait_cnt = 0;
13445     LPFC_MBOXQ_t *mboxq;
13446     struct pci_dev *pdev = phba->pcidev;
13447 
13448     lpfc_stop_hba_timers(phba);
13449     hrtimer_cancel(&phba->cmf_timer);
13450 
13451     if (phba->pport)
13452         phba->sli4_hba.intr_enable = 0;
13453 
13454     /*
13455      * Gracefully wait out the potential current outstanding asynchronous
13456      * mailbox command.
13457      */
13458 
13459     /* First, block any pending async mailbox command from posted */
13460     spin_lock_irq(&phba->hbalock);
13461     phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13462     spin_unlock_irq(&phba->hbalock);
13463     /* Now, trying to wait it out if we can */
13464     while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13465         msleep(10);
13466         if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13467             break;
13468     }
13469     /* Forcefully release the outstanding mailbox command if timed out */
13470     if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13471         spin_lock_irq(&phba->hbalock);
13472         mboxq = phba->sli.mbox_active;
13473         mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13474         __lpfc_mbox_cmpl_put(phba, mboxq);
13475         phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13476         phba->sli.mbox_active = NULL;
13477         spin_unlock_irq(&phba->hbalock);
13478     }
13479 
13480     /* Abort all iocbs associated with the hba */
13481     lpfc_sli_hba_iocb_abort(phba);
13482 
13483     if (!pci_channel_offline(phba->pcidev))
13484         /* Wait for completion of device XRI exchange busy */
13485         lpfc_sli4_xri_exchange_busy_wait(phba);
13486 
13487     /* per-phba callback de-registration for hotplug event */
13488     if (phba->pport)
13489         lpfc_cpuhp_remove(phba);
13490 
13491     /* Disable PCI subsystem interrupt */
13492     lpfc_sli4_disable_intr(phba);
13493 
13494     /* Disable SR-IOV if enabled */
13495     if (phba->cfg_sriov_nr_virtfn)
13496         pci_disable_sriov(pdev);
13497 
13498     /* Stop kthread signal shall trigger work_done one more time */
13499     kthread_stop(phba->worker_thread);
13500 
13501     /* Disable FW logging to host memory */
13502     lpfc_ras_stop_fwlog(phba);
13503 
13504     /* Reset SLI4 HBA FCoE function */
13505     lpfc_pci_function_reset(phba);
13506 
13507     /* release all queue allocated resources. */
13508     lpfc_sli4_queue_destroy(phba);
13509 
13510     /* Free RAS DMA memory */
13511     if (phba->ras_fwlog.ras_enabled)
13512         lpfc_sli4_ras_dma_free(phba);
13513 
13514     /* Stop the SLI4 device port */
13515     if (phba->pport)
13516         phba->pport->work_port_events = 0;
13517 }
13518 
13519 static uint32_t
13520 lpfc_cgn_crc32(uint32_t crc, u8 byte)
13521 {
13522     uint32_t msb = 0;
13523     uint32_t bit;
13524 
13525     for (bit = 0; bit < 8; bit++) {
13526         msb = (crc >> 31) & 1;
13527         crc <<= 1;
13528 
13529         if (msb ^ (byte & 1)) {
13530             crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
13531             crc |= 1;
13532         }
13533         byte >>= 1;
13534     }
13535     return crc;
13536 }
13537 
13538 static uint32_t
13539 lpfc_cgn_reverse_bits(uint32_t wd)
13540 {
13541     uint32_t result = 0;
13542     uint32_t i;
13543 
13544     for (i = 0; i < 32; i++) {
13545         result <<= 1;
13546         result |= (1 & (wd >> i));
13547     }
13548     return result;
13549 }
13550 
13551 /*
13552  * The routine corresponds with the algorithm the HBA firmware
13553  * uses to validate the data integrity.
13554  */
13555 uint32_t
13556 lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
13557 {
13558     uint32_t  i;
13559     uint32_t result;
13560     uint8_t  *data = (uint8_t *)ptr;
13561 
13562     for (i = 0; i < byteLen; ++i)
13563         crc = lpfc_cgn_crc32(crc, data[i]);
13564 
13565     result = ~lpfc_cgn_reverse_bits(crc);
13566     return result;
13567 }
13568 
13569 void
13570 lpfc_init_congestion_buf(struct lpfc_hba *phba)
13571 {
13572     struct lpfc_cgn_info *cp;
13573     struct timespec64 cmpl_time;
13574     struct tm broken;
13575     uint16_t size;
13576     uint32_t crc;
13577 
13578     lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13579             "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13580 
13581     if (!phba->cgn_i)
13582         return;
13583     cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13584 
13585     atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13586     atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13587     atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13588     atomic_set(&phba->cgn_sync_warn_cnt, 0);
13589 
13590     atomic_set(&phba->cgn_driver_evt_cnt, 0);
13591     atomic_set(&phba->cgn_latency_evt_cnt, 0);
13592     atomic64_set(&phba->cgn_latency_evt, 0);
13593     phba->cgn_evt_minute = 0;
13594     phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
13595 
13596     memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
13597     cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13598     cp->cgn_info_version = LPFC_CGN_INFO_V3;
13599 
13600     /* cgn parameters */
13601     cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13602     cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13603     cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13604     cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13605 
13606     ktime_get_real_ts64(&cmpl_time);
13607     time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13608 
13609     cp->cgn_info_month = broken.tm_mon + 1;
13610     cp->cgn_info_day = broken.tm_mday;
13611     cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */
13612     cp->cgn_info_hour = broken.tm_hour;
13613     cp->cgn_info_minute = broken.tm_min;
13614     cp->cgn_info_second = broken.tm_sec;
13615 
13616     lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13617             "2643 CGNInfo Init: Start Time "
13618             "%d/%d/%d %d:%d:%d\n",
13619             cp->cgn_info_day, cp->cgn_info_month,
13620             cp->cgn_info_year, cp->cgn_info_hour,
13621             cp->cgn_info_minute, cp->cgn_info_second);
13622 
13623     /* Fill in default LUN qdepth */
13624     if (phba->pport) {
13625         size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13626         cp->cgn_lunq = cpu_to_le16(size);
13627     }
13628 
13629     /* last used Index initialized to 0xff already */
13630 
13631     cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13632     cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13633     crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13634     cp->cgn_info_crc = cpu_to_le32(crc);
13635 
13636     phba->cgn_evt_timestamp = jiffies +
13637         msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13638 }
13639 
13640 void
13641 lpfc_init_congestion_stat(struct lpfc_hba *phba)
13642 {
13643     struct lpfc_cgn_info *cp;
13644     struct timespec64 cmpl_time;
13645     struct tm broken;
13646     uint32_t crc;
13647 
13648     lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13649             "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13650 
13651     if (!phba->cgn_i)
13652         return;
13653 
13654     cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13655     memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
13656 
13657     ktime_get_real_ts64(&cmpl_time);
13658     time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13659 
13660     cp->cgn_stat_month = broken.tm_mon + 1;
13661     cp->cgn_stat_day = broken.tm_mday;
13662     cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */
13663     cp->cgn_stat_hour = broken.tm_hour;
13664     cp->cgn_stat_minute = broken.tm_min;
13665 
13666     lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13667             "2647 CGNstat Init: Start Time "
13668             "%d/%d/%d %d:%d\n",
13669             cp->cgn_stat_day, cp->cgn_stat_month,
13670             cp->cgn_stat_year, cp->cgn_stat_hour,
13671             cp->cgn_stat_minute);
13672 
13673     crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13674     cp->cgn_info_crc = cpu_to_le32(crc);
13675 }
13676 
13677 /**
13678  * __lpfc_reg_congestion_buf - register congestion info buffer with HBA
13679  * @phba: Pointer to hba context object.
13680  * @reg: flag to determine register or unregister.
13681  */
13682 static int
13683 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13684 {
13685     struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13686     union  lpfc_sli4_cfg_shdr *shdr;
13687     uint32_t shdr_status, shdr_add_status;
13688     LPFC_MBOXQ_t *mboxq;
13689     int length, rc;
13690 
13691     if (!phba->cgn_i)
13692         return -ENXIO;
13693 
13694     mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13695     if (!mboxq) {
13696         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13697                 "2641 REG_CONGESTION_BUF mbox allocation fail: "
13698                 "HBA state x%x reg %d\n",
13699                 phba->pport->port_state, reg);
13700         return -ENOMEM;
13701     }
13702 
13703     length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13704         sizeof(struct lpfc_sli4_cfg_mhdr));
13705     lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13706              LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13707              LPFC_SLI4_MBX_EMBED);
13708     reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13709     bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13710     if (reg > 0)
13711         bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13712     else
13713         bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13714     reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13715     reg_congestion_buf->addr_lo =
13716         putPaddrLow(phba->cgn_i->phys);
13717     reg_congestion_buf->addr_hi =
13718         putPaddrHigh(phba->cgn_i->phys);
13719 
13720     rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13721     shdr = (union lpfc_sli4_cfg_shdr *)
13722         &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13723     shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13724     shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13725                  &shdr->response);
13726     mempool_free(mboxq, phba->mbox_mem_pool);
13727     if (shdr_status || shdr_add_status || rc) {
13728         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13729                 "2642 REG_CONGESTION_BUF mailbox "
13730                 "failed with status x%x add_status x%x,"
13731                 " mbx status x%x reg %d\n",
13732                 shdr_status, shdr_add_status, rc, reg);
13733         return -ENXIO;
13734     }
13735     return 0;
13736 }
13737 
13738 int
13739 lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13740 {
13741     lpfc_cmf_stop(phba);
13742     return __lpfc_reg_congestion_buf(phba, 0);
13743 }
13744 
13745 int
13746 lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13747 {
13748     return __lpfc_reg_congestion_buf(phba, 1);
13749 }
13750 
13751 /**
13752  * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
13753  * @phba: Pointer to HBA context object.
13754  * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
13755  *
13756  * This function is called in the SLI4 code path to read the port's
13757  * sli4 capabilities.
13758  *
13759  * This function may be be called from any context that can block-wait
13760  * for the completion.  The expectation is that this routine is called
13761  * typically from probe_one or from the online routine.
13762  **/
13763 int
13764 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13765 {
13766     int rc;
13767     struct lpfc_mqe *mqe = &mboxq->u.mqe;
13768     struct lpfc_pc_sli4_params *sli4_params;
13769     uint32_t mbox_tmo;
13770     int length;
13771     bool exp_wqcq_pages = true;
13772     struct lpfc_sli4_parameters *mbx_sli4_parameters;
13773 
13774     /*
13775      * By default, the driver assumes the SLI4 port requires RPI
13776      * header postings.  The SLI4_PARAM response will correct this
13777      * assumption.
13778      */
13779     phba->sli4_hba.rpi_hdrs_in_use = 1;
13780 
13781     /* Read the port's SLI4 Config Parameters */
13782     length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13783           sizeof(struct lpfc_sli4_cfg_mhdr));
13784     lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13785              LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13786              length, LPFC_SLI4_MBX_EMBED);
13787     if (!phba->sli4_hba.intr_enable)
13788         rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13789     else {
13790         mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13791         rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13792     }
13793     if (unlikely(rc))
13794         return rc;
13795     sli4_params = &phba->sli4_hba.pc_sli4_params;
13796     mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13797     sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13798     sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13799     sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13800     sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13801                          mbx_sli4_parameters);
13802     sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13803                          mbx_sli4_parameters);
13804     if (bf_get(cfg_phwq, mbx_sli4_parameters))
13805         phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13806     else
13807         phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13808     sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13809     sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13810                        mbx_sli4_parameters);
13811     sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13812     sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13813     sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13814     sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13815     sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13816     sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13817     sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13818     sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13819     sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13820     sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13821     sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13822                         mbx_sli4_parameters);
13823     sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13824     sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13825                        mbx_sli4_parameters);
13826     phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13827     phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13828 
13829     /* Check for Extended Pre-Registered SGL support */
13830     phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13831 
13832     /* Check for firmware nvme support */
13833     rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13834              bf_get(cfg_xib, mbx_sli4_parameters));
13835 
13836     if (rc) {
13837         /* Save this to indicate the Firmware supports NVME */
13838         sli4_params->nvme = 1;
13839 
13840         /* Firmware NVME support, check driver FC4 NVME support */
13841         if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13842             lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13843                     "6133 Disabling NVME support: "
13844                     "FC4 type not supported: x%x\n",
13845                     phba->cfg_enable_fc4_type);
13846             goto fcponly;
13847         }
13848     } else {
13849         /* No firmware NVME support, check driver FC4 NVME support */
13850         sli4_params->nvme = 0;
13851         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13852             lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13853                     "6101 Disabling NVME support: Not "
13854                     "supported by firmware (%d %d) x%x\n",
13855                     bf_get(cfg_nvme, mbx_sli4_parameters),
13856                     bf_get(cfg_xib, mbx_sli4_parameters),
13857                     phba->cfg_enable_fc4_type);
13858 fcponly:
13859             phba->nvmet_support = 0;
13860             phba->cfg_nvmet_mrq = 0;
13861             phba->cfg_nvme_seg_cnt = 0;
13862 
13863             /* If no FC4 type support, move to just SCSI support */
13864             if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13865                 return -ENODEV;
13866             phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13867         }
13868     }
13869 
13870     /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
13871      * accommodate 512K and 1M IOs in a single nvme buf.
13872      */
13873     if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13874         phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13875 
13876     /* Enable embedded Payload BDE if support is indicated */
13877     if (bf_get(cfg_pbde, mbx_sli4_parameters))
13878         phba->cfg_enable_pbde = 1;
13879     else
13880         phba->cfg_enable_pbde = 0;
13881 
13882     /*
13883      * To support Suppress Response feature we must satisfy 3 conditions.
13884      * lpfc_suppress_rsp module parameter must be set (default).
13885      * In SLI4-Parameters Descriptor:
13886      * Extended Inline Buffers (XIB) must be supported.
13887      * Suppress Response IU Not Supported (SRIUNS) must NOT be supported
13888      * (double negative).
13889      */
13890     if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13891         !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13892         phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13893     else
13894         phba->cfg_suppress_rsp = 0;
13895 
13896     if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13897         phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13898 
13899     /* Make sure that sge_supp_len can be handled by the driver */
13900     if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13901         sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13902 
13903     /*
13904      * Check whether the adapter supports an embedded copy of the
13905      * FCP CMD IU within the WQE for FCP_Ixxx commands. In order
13906      * to use this option, 128-byte WQEs must be used.
13907      */
13908     if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13909         phba->fcp_embed_io = 1;
13910     else
13911         phba->fcp_embed_io = 0;
13912 
13913     lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13914             "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13915             bf_get(cfg_xib, mbx_sli4_parameters),
13916             phba->cfg_enable_pbde,
13917             phba->fcp_embed_io, sli4_params->nvme,
13918             phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13919 
13920     if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13921         LPFC_SLI_INTF_IF_TYPE_2) &&
13922         (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13923          LPFC_SLI_INTF_FAMILY_LNCR_A0))
13924         exp_wqcq_pages = false;
13925 
13926     if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
13927         (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
13928         exp_wqcq_pages &&
13929         (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13930         phba->enab_exp_wqcq_pages = 1;
13931     else
13932         phba->enab_exp_wqcq_pages = 0;
13933     /*
13934      * Check if the SLI port supports MDS Diagnostics
13935      */
13936     if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
13937         phba->mds_diags_support = 1;
13938     else
13939         phba->mds_diags_support = 0;
13940 
13941     /*
13942      * Check if the SLI port supports NSLER
13943      */
13944     if (bf_get(cfg_nsler, mbx_sli4_parameters))
13945         phba->nsler = 1;
13946     else
13947         phba->nsler = 0;
13948 
13949     return 0;
13950 }
13951 
13952 /**
13953  * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
13954  * @pdev: pointer to PCI device
13955  * @pid: pointer to PCI device identifier
13956  *
13957  * This routine is to be called to attach a device with SLI-3 interface spec
13958  * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
13959  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
13960  * information of the device and driver to see if the driver state that it can
13961  * support this kind of device. If the match is successful, the driver core
13962  * invokes this routine. If this routine determines it can claim the HBA, it
13963  * does all the initialization that it needs to do to handle the HBA properly.
13964  *
13965  * Return code
13966  *  0 - driver can claim the device
13967  *  negative value - driver can not claim the device
13968  **/
13969 static int
13970 lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
13971 {
13972     struct lpfc_hba   *phba;
13973     struct lpfc_vport *vport = NULL;
13974     struct Scsi_Host  *shost = NULL;
13975     int error;
13976     uint32_t cfg_mode, intr_mode;
13977 
13978     /* Allocate memory for HBA structure */
13979     phba = lpfc_hba_alloc(pdev);
13980     if (!phba)
13981         return -ENOMEM;
13982 
13983     /* Perform generic PCI device enabling operation */
13984     error = lpfc_enable_pci_dev(phba);
13985     if (error)
13986         goto out_free_phba;
13987 
13988     /* Set up SLI API function jump table for PCI-device group-0 HBAs */
13989     error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13990     if (error)
13991         goto out_disable_pci_dev;
13992 
13993     /* Set up SLI-3 specific device PCI memory space */
13994     error = lpfc_sli_pci_mem_setup(phba);
13995     if (error) {
13996         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13997                 "1402 Failed to set up pci memory space.\n");
13998         goto out_disable_pci_dev;
13999     }
14000 
14001     /* Set up SLI-3 specific device driver resources */
14002     error = lpfc_sli_driver_resource_setup(phba);
14003     if (error) {
14004         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14005                 "1404 Failed to set up driver resource.\n");
14006         goto out_unset_pci_mem_s3;
14007     }
14008 
14009     /* Initialize and populate the iocb list per host */
14010 
14011     error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
14012     if (error) {
14013         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14014                 "1405 Failed to initialize iocb list.\n");
14015         goto out_unset_driver_resource_s3;
14016     }
14017 
14018     /* Set up common device driver resources */
14019     error = lpfc_setup_driver_resource_phase2(phba);
14020     if (error) {
14021         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14022                 "1406 Failed to set up driver resource.\n");
14023         goto out_free_iocb_list;
14024     }
14025 
14026     /* Get the default values for Model Name and Description */
14027     lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14028 
14029     /* Create SCSI host to the physical port */
14030     error = lpfc_create_shost(phba);
14031     if (error) {
14032         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14033                 "1407 Failed to create scsi host.\n");
14034         goto out_unset_driver_resource;
14035     }
14036 
14037     /* Configure sysfs attributes */
14038     vport = phba->pport;
14039     error = lpfc_alloc_sysfs_attr(vport);
14040     if (error) {
14041         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14042                 "1476 Failed to allocate sysfs attr\n");
14043         goto out_destroy_shost;
14044     }
14045 
14046     shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14047     /* Now, trying to enable interrupt and bring up the device */
14048     cfg_mode = phba->cfg_use_msi;
14049     while (true) {
14050         /* Put device to a known state before enabling interrupt */
14051         lpfc_stop_port(phba);
14052         /* Configure and enable interrupt */
14053         intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
14054         if (intr_mode == LPFC_INTR_ERROR) {
14055             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14056                     "0431 Failed to enable interrupt.\n");
14057             error = -ENODEV;
14058             goto out_free_sysfs_attr;
14059         }
14060         /* SLI-3 HBA setup */
14061         if (lpfc_sli_hba_setup(phba)) {
14062             lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14063                     "1477 Failed to set up hba\n");
14064             error = -ENODEV;
14065             goto out_remove_device;
14066         }
14067 
14068         /* Wait 50ms for the interrupts of previous mailbox commands */
14069         msleep(50);
14070         /* Check active interrupts on message signaled interrupts */
14071         if (intr_mode == 0 ||
14072             phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
14073             /* Log the current active interrupt mode */
14074             phba->intr_mode = intr_mode;
14075             lpfc_log_intr_mode(phba, intr_mode);
14076             break;
14077         } else {
14078             lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14079                     "0447 Configure interrupt mode (%d) "
14080                     "failed active interrupt test.\n",
14081                     intr_mode);
14082             /* Disable the current interrupt mode */
14083             lpfc_sli_disable_intr(phba);
14084             /* Try next level of interrupt mode */
14085             cfg_mode = --intr_mode;
14086         }
14087     }
14088 
14089     /* Perform post initialization setup */
14090     lpfc_post_init_setup(phba);
14091 
14092     /* Check if there are static vports to be created. */
14093     lpfc_create_static_vport(phba);
14094 
14095     return 0;
14096 
14097 out_remove_device:
14098     lpfc_unset_hba(phba);
14099 out_free_sysfs_attr:
14100     lpfc_free_sysfs_attr(vport);
14101 out_destroy_shost:
14102     lpfc_destroy_shost(phba);
14103 out_unset_driver_resource:
14104     lpfc_unset_driver_resource_phase2(phba);
14105 out_free_iocb_list:
14106     lpfc_free_iocb_list(phba);
14107 out_unset_driver_resource_s3:
14108     lpfc_sli_driver_resource_unset(phba);
14109 out_unset_pci_mem_s3:
14110     lpfc_sli_pci_mem_unset(phba);
14111 out_disable_pci_dev:
14112     lpfc_disable_pci_dev(phba);
14113     if (shost)
14114         scsi_host_put(shost);
14115 out_free_phba:
14116     lpfc_hba_free(phba);
14117     return error;
14118 }
14119 
14120 /**
14121  * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
14122  * @pdev: pointer to PCI device
14123  *
14124  * This routine is to be called to disattach a device with SLI-3 interface
14125  * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
14126  * removed from PCI bus, it performs all the necessary cleanup for the HBA
14127  * device to be removed from the PCI subsystem properly.
14128  **/
14129 static void
14130 lpfc_pci_remove_one_s3(struct pci_dev *pdev)
14131 {
14132     struct Scsi_Host  *shost = pci_get_drvdata(pdev);
14133     struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14134     struct lpfc_vport **vports;
14135     struct lpfc_hba   *phba = vport->phba;
14136     int i;
14137 
14138     spin_lock_irq(&phba->hbalock);
14139     vport->load_flag |= FC_UNLOADING;
14140     spin_unlock_irq(&phba->hbalock);
14141 
14142     lpfc_free_sysfs_attr(vport);
14143 
14144     /* Release all the vports against this physical port */
14145     vports = lpfc_create_vport_work_array(phba);
14146     if (vports != NULL)
14147         for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14148             if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14149                 continue;
14150             fc_vport_terminate(vports[i]->fc_vport);
14151         }
14152     lpfc_destroy_vport_work_array(phba, vports);
14153 
14154     /* Remove FC host with the physical port */
14155     fc_remove_host(shost);
14156     scsi_remove_host(shost);
14157 
14158     /* Clean up all nodes, mailboxes and IOs. */
14159     lpfc_cleanup(vport);
14160 
14161     /*
14162      * Bring down the SLI Layer. This step disable all interrupts,
14163      * clears the rings, discards all mailbox commands, and resets
14164      * the HBA.
14165      */
14166 
14167     /* HBA interrupt will be disabled after this call */
14168     lpfc_sli_hba_down(phba);
14169     /* Stop kthread signal shall trigger work_done one more time */
14170     kthread_stop(phba->worker_thread);
14171     /* Final cleanup of txcmplq and reset the HBA */
14172     lpfc_sli_brdrestart(phba);
14173 
14174     kfree(phba->vpi_bmask);
14175     kfree(phba->vpi_ids);
14176 
14177     lpfc_stop_hba_timers(phba);
14178     spin_lock_irq(&phba->port_list_lock);
14179     list_del_init(&vport->listentry);
14180     spin_unlock_irq(&phba->port_list_lock);
14181 
14182     lpfc_debugfs_terminate(vport);
14183 
14184     /* Disable SR-IOV if enabled */
14185     if (phba->cfg_sriov_nr_virtfn)
14186         pci_disable_sriov(pdev);
14187 
14188     /* Disable interrupt */
14189     lpfc_sli_disable_intr(phba);
14190 
14191     scsi_host_put(shost);
14192 
14193     /*
14194      * Call scsi_free before mem_free since scsi bufs are released to their
14195      * corresponding pools here.
14196      */
14197     lpfc_scsi_free(phba);
14198     lpfc_free_iocb_list(phba);
14199 
14200     lpfc_mem_free_all(phba);
14201 
14202     dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
14203               phba->hbqslimp.virt, phba->hbqslimp.phys);
14204 
14205     /* Free resources associated with SLI2 interface */
14206     dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
14207               phba->slim2p.virt, phba->slim2p.phys);
14208 
14209     /* unmap adapter SLIM and Control Registers */
14210     iounmap(phba->ctrl_regs_memmap_p);
14211     iounmap(phba->slim_memmap_p);
14212 
14213     lpfc_hba_free(phba);
14214 
14215     pci_release_mem_regions(pdev);
14216     pci_disable_device(pdev);
14217 }
14218 
14219 /**
14220  * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
14221  * @dev_d: pointer to device
14222  *
14223  * This routine is to be called from the kernel's PCI subsystem to support
14224  * system Power Management (PM) to device with SLI-3 interface spec. When
14225  * PM invokes this method, it quiesces the device by stopping the driver's
14226  * worker thread for the device, turning off device's interrupt and DMA,
14227  * and bring the device offline. Note that as the driver implements the
14228  * minimum PM requirements to a power-aware driver's PM support for the
14229  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
14230  * to the suspend() method call will be treated as SUSPEND and the driver will
14231  * fully reinitialize its device during resume() method call, the driver will
14232  * set device to PCI_D3hot state in PCI config space instead of setting it
14233  * according to the @msg provided by the PM.
14234  *
14235  * Return code
14236  *  0 - driver suspended the device
14237  *  Error otherwise
14238  **/
14239 static int __maybe_unused
14240 lpfc_pci_suspend_one_s3(struct device *dev_d)
14241 {
14242     struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14243     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14244 
14245     lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14246             "0473 PCI device Power Management suspend.\n");
14247 
14248     /* Bring down the device */
14249     lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14250     lpfc_offline(phba);
14251     kthread_stop(phba->worker_thread);
14252 
14253     /* Disable interrupt from device */
14254     lpfc_sli_disable_intr(phba);
14255 
14256     return 0;
14257 }
14258 
14259 /**
14260  * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
14261  * @dev_d: pointer to device
14262  *
14263  * This routine is to be called from the kernel's PCI subsystem to support
14264  * system Power Management (PM) to device with SLI-3 interface spec. When PM
14265  * invokes this method, it restores the device's PCI config space state and
14266  * fully reinitializes the device and brings it online. Note that as the
14267  * driver implements the minimum PM requirements to a power-aware driver's
14268  * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
14269  * FREEZE) to the suspend() method call will be treated as SUSPEND and the
14270  * driver will fully reinitialize its device during resume() method call,
14271  * the device will be set to PCI_D0 directly in PCI config space before
14272  * restoring the state.
14273  *
14274  * Return code
14275  *  0 - driver suspended the device
14276  *  Error otherwise
14277  **/
14278 static int __maybe_unused
14279 lpfc_pci_resume_one_s3(struct device *dev_d)
14280 {
14281     struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14282     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14283     uint32_t intr_mode;
14284     int error;
14285 
14286     lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14287             "0452 PCI device Power Management resume.\n");
14288 
14289     /* Startup the kernel thread for this host adapter. */
14290     phba->worker_thread = kthread_run(lpfc_do_work, phba,
14291                     "lpfc_worker_%d", phba->brd_no);
14292     if (IS_ERR(phba->worker_thread)) {
14293         error = PTR_ERR(phba->worker_thread);
14294         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14295                 "0434 PM resume failed to start worker "
14296                 "thread: error=x%x.\n", error);
14297         return error;
14298     }
14299 
14300     /* Init cpu_map array */
14301     lpfc_cpu_map_array_init(phba);
14302     /* Init hba_eq_hdl array */
14303     lpfc_hba_eq_hdl_array_init(phba);
14304     /* Configure and enable interrupt */
14305     intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14306     if (intr_mode == LPFC_INTR_ERROR) {
14307         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14308                 "0430 PM resume Failed to enable interrupt\n");
14309         return -EIO;
14310     } else
14311         phba->intr_mode = intr_mode;
14312 
14313     /* Restart HBA and bring it online */
14314     lpfc_sli_brdrestart(phba);
14315     lpfc_online(phba);
14316 
14317     /* Log the current active interrupt mode */
14318     lpfc_log_intr_mode(phba, phba->intr_mode);
14319 
14320     return 0;
14321 }
14322 
14323 /**
14324  * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
14325  * @phba: pointer to lpfc hba data structure.
14326  *
14327  * This routine is called to prepare the SLI3 device for PCI slot recover. It
14328  * aborts all the outstanding SCSI I/Os to the pci device.
14329  **/
14330 static void
14331 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14332 {
14333     lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14334             "2723 PCI channel I/O abort preparing for recovery\n");
14335 
14336     /*
14337      * There may be errored I/Os through HBA, abort all I/Os on txcmplq
14338      * and let the SCSI mid-layer to retry them to recover.
14339      */
14340     lpfc_sli_abort_fcp_rings(phba);
14341 }
14342 
14343 /**
14344  * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
14345  * @phba: pointer to lpfc hba data structure.
14346  *
14347  * This routine is called to prepare the SLI3 device for PCI slot reset. It
14348  * disables the device interrupt and pci device, and aborts the internal FCP
14349  * pending I/Os.
14350  **/
14351 static void
14352 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14353 {
14354     lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14355             "2710 PCI channel disable preparing for reset\n");
14356 
14357     /* Block any management I/Os to the device */
14358     lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14359 
14360     /* Block all SCSI devices' I/Os on the host */
14361     lpfc_scsi_dev_block(phba);
14362 
14363     /* Flush all driver's outstanding SCSI I/Os as we are to reset */
14364     lpfc_sli_flush_io_rings(phba);
14365 
14366     /* stop all timers */
14367     lpfc_stop_hba_timers(phba);
14368 
14369     /* Disable interrupt and pci device */
14370     lpfc_sli_disable_intr(phba);
14371     pci_disable_device(phba->pcidev);
14372 }
14373 
14374 /**
14375  * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
14376  * @phba: pointer to lpfc hba data structure.
14377  *
14378  * This routine is called to prepare the SLI3 device for PCI slot permanently
14379  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
14380  * pending I/Os.
14381  **/
14382 static void
14383 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14384 {
14385     lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14386             "2711 PCI channel permanent disable for failure\n");
14387     /* Block all SCSI devices' I/Os on the host */
14388     lpfc_scsi_dev_block(phba);
14389     lpfc_sli4_prep_dev_for_reset(phba);
14390 
14391     /* stop all timers */
14392     lpfc_stop_hba_timers(phba);
14393 
14394     /* Clean up all driver's outstanding SCSI I/Os */
14395     lpfc_sli_flush_io_rings(phba);
14396 }
14397 
14398 /**
14399  * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
14400  * @pdev: pointer to PCI device.
14401  * @state: the current PCI connection state.
14402  *
14403  * This routine is called from the PCI subsystem for I/O error handling to
14404  * device with SLI-3 interface spec. This function is called by the PCI
14405  * subsystem after a PCI bus error affecting this device has been detected.
14406  * When this function is invoked, it will need to stop all the I/Os and
14407  * interrupt(s) to the device. Once that is done, it will return
14408  * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
14409  * as desired.
14410  *
14411  * Return codes
14412  *  PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
14413  *  PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
14414  *  PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14415  **/
14416 static pci_ers_result_t
14417 lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14418 {
14419     struct Scsi_Host *shost = pci_get_drvdata(pdev);
14420     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14421 
14422     switch (state) {
14423     case pci_channel_io_normal:
14424         /* Non-fatal error, prepare for recovery */
14425         lpfc_sli_prep_dev_for_recover(phba);
14426         return PCI_ERS_RESULT_CAN_RECOVER;
14427     case pci_channel_io_frozen:
14428         /* Fatal error, prepare for slot reset */
14429         lpfc_sli_prep_dev_for_reset(phba);
14430         return PCI_ERS_RESULT_NEED_RESET;
14431     case pci_channel_io_perm_failure:
14432         /* Permanent failure, prepare for device down */
14433         lpfc_sli_prep_dev_for_perm_failure(phba);
14434         return PCI_ERS_RESULT_DISCONNECT;
14435     default:
14436         /* Unknown state, prepare and request slot reset */
14437         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14438                 "0472 Unknown PCI error state: x%x\n", state);
14439         lpfc_sli_prep_dev_for_reset(phba);
14440         return PCI_ERS_RESULT_NEED_RESET;
14441     }
14442 }
14443 
14444 /**
14445  * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
14446  * @pdev: pointer to PCI device.
14447  *
14448  * This routine is called from the PCI subsystem for error handling to
14449  * device with SLI-3 interface spec. This is called after PCI bus has been
14450  * reset to restart the PCI card from scratch, as if from a cold-boot.
14451  * During the PCI subsystem error recovery, after driver returns
14452  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
14453  * recovery and then call this routine before calling the .resume method
14454  * to recover the device. This function will initialize the HBA device,
14455  * enable the interrupt, but it will just put the HBA to offline state
14456  * without passing any I/O traffic.
14457  *
14458  * Return codes
14459  *  PCI_ERS_RESULT_RECOVERED - the device has been recovered
14460  *  PCI_ERS_RESULT_DISCONNECT - device could not be recovered
14461  */
14462 static pci_ers_result_t
14463 lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14464 {
14465     struct Scsi_Host *shost = pci_get_drvdata(pdev);
14466     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14467     struct lpfc_sli *psli = &phba->sli;
14468     uint32_t intr_mode;
14469 
14470     dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14471     if (pci_enable_device_mem(pdev)) {
14472         printk(KERN_ERR "lpfc: Cannot re-enable "
14473             "PCI device after reset.\n");
14474         return PCI_ERS_RESULT_DISCONNECT;
14475     }
14476 
14477     pci_restore_state(pdev);
14478 
14479     /*
14480      * As the new kernel behavior of pci_restore_state() API call clears
14481      * device saved_state flag, need to save the restored state again.
14482      */
14483     pci_save_state(pdev);
14484 
14485     if (pdev->is_busmaster)
14486         pci_set_master(pdev);
14487 
14488     spin_lock_irq(&phba->hbalock);
14489     psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14490     spin_unlock_irq(&phba->hbalock);
14491 
14492     /* Configure and enable interrupt */
14493     intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14494     if (intr_mode == LPFC_INTR_ERROR) {
14495         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14496                 "0427 Cannot re-enable interrupt after "
14497                 "slot reset.\n");
14498         return PCI_ERS_RESULT_DISCONNECT;
14499     } else
14500         phba->intr_mode = intr_mode;
14501 
14502     /* Take device offline, it will perform cleanup */
14503     lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14504     lpfc_offline(phba);
14505     lpfc_sli_brdrestart(phba);
14506 
14507     /* Log the current active interrupt mode */
14508     lpfc_log_intr_mode(phba, phba->intr_mode);
14509 
14510     return PCI_ERS_RESULT_RECOVERED;
14511 }
14512 
14513 /**
14514  * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
14515  * @pdev: pointer to PCI device
14516  *
14517  * This routine is called from the PCI subsystem for error handling to device
14518  * with SLI-3 interface spec. It is called when kernel error recovery tells
14519  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
14520  * error recovery. After this call, traffic can start to flow from this device
14521  * again.
14522  */
14523 static void
14524 lpfc_io_resume_s3(struct pci_dev *pdev)
14525 {
14526     struct Scsi_Host *shost = pci_get_drvdata(pdev);
14527     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14528 
14529     /* Bring device online, it will be no-op for non-fatal error resume */
14530     lpfc_online(phba);
14531 }
14532 
14533 /**
14534  * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
14535  * @phba: pointer to lpfc hba data structure.
14536  *
14537  * returns the number of ELS/CT IOCBs to reserve
14538  **/
14539 int
14540 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14541 {
14542     int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14543 
14544     if (phba->sli_rev == LPFC_SLI_REV4) {
14545         if (max_xri <= 100)
14546             return 10;
14547         else if (max_xri <= 256)
14548             return 25;
14549         else if (max_xri <= 512)
14550             return 50;
14551         else if (max_xri <= 1024)
14552             return 100;
14553         else if (max_xri <= 1536)
14554             return 150;
14555         else if (max_xri <= 2048)
14556             return 200;
14557         else
14558             return 250;
14559     } else
14560         return 0;
14561 }
14562 
14563 /**
14564  * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
14565  * @phba: pointer to lpfc hba data structure.
14566  *
14567  * returns the number of ELS/CT + NVMET IOCBs to reserve
14568  **/
14569 int
14570 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14571 {
14572     int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14573 
14574     if (phba->nvmet_support)
14575         max_xri += LPFC_NVMET_BUF_POST;
14576     return max_xri;
14577 }
14578 
14579 
14580 static int
14581 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14582     uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14583     const struct firmware *fw)
14584 {
14585     int rc;
14586     u8 sli_family;
14587 
14588     sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14589     /* Three cases:  (1) FW was not supported on the detected adapter.
14590      * (2) FW update has been locked out administratively.
14591      * (3) Some other error during FW update.
14592      * In each case, an unmaskable message is written to the console
14593      * for admin diagnosis.
14594      */
14595     if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14596         (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14597          magic_number != MAGIC_NUMBER_G6) ||
14598         (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14599          magic_number != MAGIC_NUMBER_G7) ||
14600         (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14601          magic_number != MAGIC_NUMBER_G7P)) {
14602         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14603                 "3030 This firmware version is not supported on"
14604                 " this HBA model. Device:%x Magic:%x Type:%x "
14605                 "ID:%x Size %d %zd\n",
14606                 phba->pcidev->device, magic_number, ftype, fid,
14607                 fsize, fw->size);
14608         rc = -EINVAL;
14609     } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14610         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14611                 "3021 Firmware downloads have been prohibited "
14612                 "by a system configuration setting on "
14613                 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14614                 "%zd\n",
14615                 phba->pcidev->device, magic_number, ftype, fid,
14616                 fsize, fw->size);
14617         rc = -EACCES;
14618     } else {
14619         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14620                 "3022 FW Download failed. Add Status x%x "
14621                 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14622                 "%zd\n",
14623                 offset, phba->pcidev->device, magic_number,
14624                 ftype, fid, fsize, fw->size);
14625         rc = -EIO;
14626     }
14627     return rc;
14628 }
14629 
14630 /**
14631  * lpfc_write_firmware - attempt to write a firmware image to the port
14632  * @fw: pointer to firmware image returned from request_firmware.
14633  * @context: pointer to firmware image returned from request_firmware.
14634  *
14635  **/
14636 static void
14637 lpfc_write_firmware(const struct firmware *fw, void *context)
14638 {
14639     struct lpfc_hba *phba = (struct lpfc_hba *)context;
14640     char fwrev[FW_REV_STR_SIZE];
14641     struct lpfc_grp_hdr *image;
14642     struct list_head dma_buffer_list;
14643     int i, rc = 0;
14644     struct lpfc_dmabuf *dmabuf, *next;
14645     uint32_t offset = 0, temp_offset = 0;
14646     uint32_t magic_number, ftype, fid, fsize;
14647 
14648     /* It can be null in no-wait mode, sanity check */
14649     if (!fw) {
14650         rc = -ENXIO;
14651         goto out;
14652     }
14653     image = (struct lpfc_grp_hdr *)fw->data;
14654 
14655     magic_number = be32_to_cpu(image->magic_number);
14656     ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14657     fid = bf_get_be32(lpfc_grp_hdr_id, image);
14658     fsize = be32_to_cpu(image->size);
14659 
14660     INIT_LIST_HEAD(&dma_buffer_list);
14661     lpfc_decode_firmware_rev(phba, fwrev, 1);
14662     if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14663         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14664                 "3023 Updating Firmware, Current Version:%s "
14665                 "New Version:%s\n",
14666                 fwrev, image->revision);
14667         for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14668             dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
14669                      GFP_KERNEL);
14670             if (!dmabuf) {
14671                 rc = -ENOMEM;
14672                 goto release_out;
14673             }
14674             dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14675                               SLI4_PAGE_SIZE,
14676                               &dmabuf->phys,
14677                               GFP_KERNEL);
14678             if (!dmabuf->virt) {
14679                 kfree(dmabuf);
14680                 rc = -ENOMEM;
14681                 goto release_out;
14682             }
14683             list_add_tail(&dmabuf->list, &dma_buffer_list);
14684         }
14685         while (offset < fw->size) {
14686             temp_offset = offset;
14687             list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14688                 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14689                     memcpy(dmabuf->virt,
14690                            fw->data + temp_offset,
14691                            fw->size - temp_offset);
14692                     temp_offset = fw->size;
14693                     break;
14694                 }
14695                 memcpy(dmabuf->virt, fw->data + temp_offset,
14696                        SLI4_PAGE_SIZE);
14697                 temp_offset += SLI4_PAGE_SIZE;
14698             }
14699             rc = lpfc_wr_object(phba, &dma_buffer_list,
14700                     (fw->size - offset), &offset);
14701             if (rc) {
14702                 rc = lpfc_log_write_firmware_error(phba, offset,
14703                                    magic_number,
14704                                    ftype,
14705                                    fid,
14706                                    fsize,
14707                                    fw);
14708                 goto release_out;
14709             }
14710         }
14711         rc = offset;
14712     } else
14713         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14714                 "3029 Skipped Firmware update, Current "
14715                 "Version:%s New Version:%s\n",
14716                 fwrev, image->revision);
14717 
14718 release_out:
14719     list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14720         list_del(&dmabuf->list);
14721         dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14722                   dmabuf->virt, dmabuf->phys);
14723         kfree(dmabuf);
14724     }
14725     release_firmware(fw);
14726 out:
14727     if (rc < 0)
14728         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14729                 "3062 Firmware update error, status %d.\n", rc);
14730     else
14731         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14732                 "3024 Firmware update success: size %d.\n", rc);
14733 }
14734 
14735 /**
14736  * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
14737  * @phba: pointer to lpfc hba data structure.
14738  * @fw_upgrade: which firmware to update.
14739  *
14740  * This routine is called to perform Linux generic firmware upgrade on device
14741  * that supports such feature.
14742  **/
14743 int
14744 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14745 {
14746     uint8_t file_name[ELX_MODEL_NAME_SIZE];
14747     int ret;
14748     const struct firmware *fw;
14749 
14750     /* Only supported on SLI4 interface type 2 for now */
14751     if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14752         LPFC_SLI_INTF_IF_TYPE_2)
14753         return -EPERM;
14754 
14755     snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
14756 
14757     if (fw_upgrade == INT_FW_UPGRADE) {
14758         ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14759                     file_name, &phba->pcidev->dev,
14760                     GFP_KERNEL, (void *)phba,
14761                     lpfc_write_firmware);
14762     } else if (fw_upgrade == RUN_FW_UPGRADE) {
14763         ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14764         if (!ret)
14765             lpfc_write_firmware(fw, (void *)phba);
14766     } else {
14767         ret = -EINVAL;
14768     }
14769 
14770     return ret;
14771 }
14772 
14773 /**
14774  * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
14775  * @pdev: pointer to PCI device
14776  * @pid: pointer to PCI device identifier
14777  *
14778  * This routine is called from the kernel's PCI subsystem to device with
14779  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14780  * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
14781  * information of the device and driver to see if the driver state that it
14782  * can support this kind of device. If the match is successful, the driver
14783  * core invokes this routine. If this routine determines it can claim the HBA,
14784  * it does all the initialization that it needs to do to handle the HBA
14785  * properly.
14786  *
14787  * Return code
14788  *  0 - driver can claim the device
14789  *  negative value - driver can not claim the device
14790  **/
14791 static int
14792 lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14793 {
14794     struct lpfc_hba   *phba;
14795     struct lpfc_vport *vport = NULL;
14796     struct Scsi_Host  *shost = NULL;
14797     int error;
14798     uint32_t cfg_mode, intr_mode;
14799 
14800     /* Allocate memory for HBA structure */
14801     phba = lpfc_hba_alloc(pdev);
14802     if (!phba)
14803         return -ENOMEM;
14804 
14805     INIT_LIST_HEAD(&phba->poll_list);
14806 
14807     /* Perform generic PCI device enabling operation */
14808     error = lpfc_enable_pci_dev(phba);
14809     if (error)
14810         goto out_free_phba;
14811 
14812     /* Set up SLI API function jump table for PCI-device group-1 HBAs */
14813     error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14814     if (error)
14815         goto out_disable_pci_dev;
14816 
14817     /* Set up SLI-4 specific device PCI memory space */
14818     error = lpfc_sli4_pci_mem_setup(phba);
14819     if (error) {
14820         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14821                 "1410 Failed to set up pci memory space.\n");
14822         goto out_disable_pci_dev;
14823     }
14824 
14825     /* Set up SLI-4 Specific device driver resources */
14826     error = lpfc_sli4_driver_resource_setup(phba);
14827     if (error) {
14828         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14829                 "1412 Failed to set up driver resource.\n");
14830         goto out_unset_pci_mem_s4;
14831     }
14832 
14833     INIT_LIST_HEAD(&phba->active_rrq_list);
14834     INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14835 
14836     /* Set up common device driver resources */
14837     error = lpfc_setup_driver_resource_phase2(phba);
14838     if (error) {
14839         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14840                 "1414 Failed to set up driver resource.\n");
14841         goto out_unset_driver_resource_s4;
14842     }
14843 
14844     /* Get the default values for Model Name and Description */
14845     lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14846 
14847     /* Now, trying to enable interrupt and bring up the device */
14848     cfg_mode = phba->cfg_use_msi;
14849 
14850     /* Put device to a known state before enabling interrupt */
14851     phba->pport = NULL;
14852     lpfc_stop_port(phba);
14853 
14854     /* Init cpu_map array */
14855     lpfc_cpu_map_array_init(phba);
14856 
14857     /* Init hba_eq_hdl array */
14858     lpfc_hba_eq_hdl_array_init(phba);
14859 
14860     /* Configure and enable interrupt */
14861     intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14862     if (intr_mode == LPFC_INTR_ERROR) {
14863         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14864                 "0426 Failed to enable interrupt.\n");
14865         error = -ENODEV;
14866         goto out_unset_driver_resource;
14867     }
14868     /* Default to single EQ for non-MSI-X */
14869     if (phba->intr_type != MSIX) {
14870         phba->cfg_irq_chann = 1;
14871         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14872             if (phba->nvmet_support)
14873                 phba->cfg_nvmet_mrq = 1;
14874         }
14875     }
14876     lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14877 
14878     /* Create SCSI host to the physical port */
14879     error = lpfc_create_shost(phba);
14880     if (error) {
14881         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14882                 "1415 Failed to create scsi host.\n");
14883         goto out_disable_intr;
14884     }
14885     vport = phba->pport;
14886     shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
14887 
14888     /* Configure sysfs attributes */
14889     error = lpfc_alloc_sysfs_attr(vport);
14890     if (error) {
14891         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14892                 "1416 Failed to allocate sysfs attr\n");
14893         goto out_destroy_shost;
14894     }
14895 
14896     /* Set up SLI-4 HBA */
14897     if (lpfc_sli4_hba_setup(phba)) {
14898         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14899                 "1421 Failed to set up hba\n");
14900         error = -ENODEV;
14901         goto out_free_sysfs_attr;
14902     }
14903 
14904     /* Log the current active interrupt mode */
14905     phba->intr_mode = intr_mode;
14906     lpfc_log_intr_mode(phba, intr_mode);
14907 
14908     /* Perform post initialization setup */
14909     lpfc_post_init_setup(phba);
14910 
14911     /* NVME support in FW earlier in the driver load corrects the
14912      * FC4 type making a check for nvme_support unnecessary.
14913      */
14914     if (phba->nvmet_support == 0) {
14915         if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14916             /* Create NVME binding with nvme_fc_transport. This
14917              * ensures the vport is initialized.  If the localport
14918              * create fails, it should not unload the driver to
14919              * support field issues.
14920              */
14921             error = lpfc_nvme_create_localport(vport);
14922             if (error) {
14923                 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14924                         "6004 NVME registration "
14925                         "failed, error x%x\n",
14926                         error);
14927             }
14928         }
14929     }
14930 
14931     /* check for firmware upgrade or downgrade */
14932     if (phba->cfg_request_firmware_upgrade)
14933         lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14934 
14935     /* Check if there are static vports to be created. */
14936     lpfc_create_static_vport(phba);
14937 
14938     timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14939     cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14940 
14941     return 0;
14942 
14943 out_free_sysfs_attr:
14944     lpfc_free_sysfs_attr(vport);
14945 out_destroy_shost:
14946     lpfc_destroy_shost(phba);
14947 out_disable_intr:
14948     lpfc_sli4_disable_intr(phba);
14949 out_unset_driver_resource:
14950     lpfc_unset_driver_resource_phase2(phba);
14951 out_unset_driver_resource_s4:
14952     lpfc_sli4_driver_resource_unset(phba);
14953 out_unset_pci_mem_s4:
14954     lpfc_sli4_pci_mem_unset(phba);
14955 out_disable_pci_dev:
14956     lpfc_disable_pci_dev(phba);
14957     if (shost)
14958         scsi_host_put(shost);
14959 out_free_phba:
14960     lpfc_hba_free(phba);
14961     return error;
14962 }
14963 
14964 /**
14965  * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
14966  * @pdev: pointer to PCI device
14967  *
14968  * This routine is called from the kernel's PCI subsystem to device with
14969  * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
14970  * removed from PCI bus, it performs all the necessary cleanup for the HBA
14971  * device to be removed from the PCI subsystem properly.
14972  **/
14973 static void
14974 lpfc_pci_remove_one_s4(struct pci_dev *pdev)
14975 {
14976     struct Scsi_Host *shost = pci_get_drvdata(pdev);
14977     struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14978     struct lpfc_vport **vports;
14979     struct lpfc_hba *phba = vport->phba;
14980     int i;
14981 
14982     /* Mark the device unloading flag */
14983     spin_lock_irq(&phba->hbalock);
14984     vport->load_flag |= FC_UNLOADING;
14985     spin_unlock_irq(&phba->hbalock);
14986     if (phba->cgn_i)
14987         lpfc_unreg_congestion_buf(phba);
14988 
14989     lpfc_free_sysfs_attr(vport);
14990 
14991     /* Release all the vports against this physical port */
14992     vports = lpfc_create_vport_work_array(phba);
14993     if (vports != NULL)
14994         for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14995             if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14996                 continue;
14997             fc_vport_terminate(vports[i]->fc_vport);
14998         }
14999     lpfc_destroy_vport_work_array(phba, vports);
15000 
15001     /* Remove FC host with the physical port */
15002     fc_remove_host(shost);
15003     scsi_remove_host(shost);
15004 
15005     /* Perform ndlp cleanup on the physical port.  The nvme and nvmet
15006      * localports are destroyed after to cleanup all transport memory.
15007      */
15008     lpfc_cleanup(vport);
15009     lpfc_nvmet_destroy_targetport(phba);
15010     lpfc_nvme_destroy_localport(vport);
15011 
15012     /* De-allocate multi-XRI pools */
15013     if (phba->cfg_xri_rebalancing)
15014         lpfc_destroy_multixri_pools(phba);
15015 
15016     /*
15017      * Bring down the SLI Layer. This step disables all interrupts,
15018      * clears the rings, discards all mailbox commands, and resets
15019      * the HBA FCoE function.
15020      */
15021     lpfc_debugfs_terminate(vport);
15022 
15023     lpfc_stop_hba_timers(phba);
15024     spin_lock_irq(&phba->port_list_lock);
15025     list_del_init(&vport->listentry);
15026     spin_unlock_irq(&phba->port_list_lock);
15027 
15028     /* Perform scsi free before driver resource_unset since scsi
15029      * buffers are released to their corresponding pools here.
15030      */
15031     lpfc_io_free(phba);
15032     lpfc_free_iocb_list(phba);
15033     lpfc_sli4_hba_unset(phba);
15034 
15035     lpfc_unset_driver_resource_phase2(phba);
15036     lpfc_sli4_driver_resource_unset(phba);
15037 
15038     /* Unmap adapter Control and Doorbell registers */
15039     lpfc_sli4_pci_mem_unset(phba);
15040 
15041     /* Release PCI resources and disable device's PCI function */
15042     scsi_host_put(shost);
15043     lpfc_disable_pci_dev(phba);
15044 
15045     /* Finally, free the driver's device data structure */
15046     lpfc_hba_free(phba);
15047 
15048     return;
15049 }
15050 
15051 /**
15052  * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
15053  * @dev_d: pointer to device
15054  *
15055  * This routine is called from the kernel's PCI subsystem to support system
15056  * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
15057  * this method, it quiesces the device by stopping the driver's worker
15058  * thread for the device, turning off device's interrupt and DMA, and bring
15059  * the device offline. Note that as the driver implements the minimum PM
15060  * requirements to a power-aware driver's PM support for suspend/resume -- all
15061  * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
15062  * method call will be treated as SUSPEND and the driver will fully
15063  * reinitialize its device during resume() method call, the driver will set
15064  * device to PCI_D3hot state in PCI config space instead of setting it
15065  * according to the @msg provided by the PM.
15066  *
15067  * Return code
15068  *  0 - driver suspended the device
15069  *  Error otherwise
15070  **/
15071 static int __maybe_unused
15072 lpfc_pci_suspend_one_s4(struct device *dev_d)
15073 {
15074     struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15075     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15076 
15077     lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15078             "2843 PCI device Power Management suspend.\n");
15079 
15080     /* Bring down the device */
15081     lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15082     lpfc_offline(phba);
15083     kthread_stop(phba->worker_thread);
15084 
15085     /* Disable interrupt from device */
15086     lpfc_sli4_disable_intr(phba);
15087     lpfc_sli4_queue_destroy(phba);
15088 
15089     return 0;
15090 }
15091 
15092 /**
15093  * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
15094  * @dev_d: pointer to device
15095  *
15096  * This routine is called from the kernel's PCI subsystem to support system
15097  * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
15098  * this method, it restores the device's PCI config space state and fully
15099  * reinitializes the device and brings it online. Note that as the driver
15100  * implements the minimum PM requirements to a power-aware driver's PM for
15101  * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
15102  * to the suspend() method call will be treated as SUSPEND and the driver
15103  * will fully reinitialize its device during resume() method call, the device
15104  * will be set to PCI_D0 directly in PCI config space before restoring the
15105  * state.
15106  *
15107  * Return code
15108  *  0 - driver suspended the device
15109  *  Error otherwise
15110  **/
15111 static int __maybe_unused
15112 lpfc_pci_resume_one_s4(struct device *dev_d)
15113 {
15114     struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15115     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15116     uint32_t intr_mode;
15117     int error;
15118 
15119     lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15120             "0292 PCI device Power Management resume.\n");
15121 
15122      /* Startup the kernel thread for this host adapter. */
15123     phba->worker_thread = kthread_run(lpfc_do_work, phba,
15124                     "lpfc_worker_%d", phba->brd_no);
15125     if (IS_ERR(phba->worker_thread)) {
15126         error = PTR_ERR(phba->worker_thread);
15127         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15128                 "0293 PM resume failed to start worker "
15129                 "thread: error=x%x.\n", error);
15130         return error;
15131     }
15132 
15133     /* Configure and enable interrupt */
15134     intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15135     if (intr_mode == LPFC_INTR_ERROR) {
15136         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15137                 "0294 PM resume Failed to enable interrupt\n");
15138         return -EIO;
15139     } else
15140         phba->intr_mode = intr_mode;
15141 
15142     /* Restart HBA and bring it online */
15143     lpfc_sli_brdrestart(phba);
15144     lpfc_online(phba);
15145 
15146     /* Log the current active interrupt mode */
15147     lpfc_log_intr_mode(phba, phba->intr_mode);
15148 
15149     return 0;
15150 }
15151 
15152 /**
15153  * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
15154  * @phba: pointer to lpfc hba data structure.
15155  *
15156  * This routine is called to prepare the SLI4 device for PCI slot recover. It
15157  * aborts all the outstanding SCSI I/Os to the pci device.
15158  **/
15159 static void
15160 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
15161 {
15162     lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15163             "2828 PCI channel I/O abort preparing for recovery\n");
15164     /*
15165      * There may be errored I/Os through HBA, abort all I/Os on txcmplq
15166      * and let the SCSI mid-layer to retry them to recover.
15167      */
15168     lpfc_sli_abort_fcp_rings(phba);
15169 }
15170 
15171 /**
15172  * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
15173  * @phba: pointer to lpfc hba data structure.
15174  *
15175  * This routine is called to prepare the SLI4 device for PCI slot reset. It
15176  * disables the device interrupt and pci device, and aborts the internal FCP
15177  * pending I/Os.
15178  **/
15179 static void
15180 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
15181 {
15182     int offline =  pci_channel_offline(phba->pcidev);
15183 
15184     lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15185             "2826 PCI channel disable preparing for reset offline"
15186             " %d\n", offline);
15187 
15188     /* Block any management I/Os to the device */
15189     lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
15190 
15191 
15192     /* HBA_PCI_ERR was set in io_error_detect */
15193     lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
15194     /* Flush all driver's outstanding I/Os as we are to reset */
15195     lpfc_sli_flush_io_rings(phba);
15196     lpfc_offline(phba);
15197 
15198     /* stop all timers */
15199     lpfc_stop_hba_timers(phba);
15200 
15201     lpfc_sli4_queue_destroy(phba);
15202     /* Disable interrupt and pci device */
15203     lpfc_sli4_disable_intr(phba);
15204     pci_disable_device(phba->pcidev);
15205 }
15206 
15207 /**
15208  * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
15209  * @phba: pointer to lpfc hba data structure.
15210  *
15211  * This routine is called to prepare the SLI4 device for PCI slot permanently
15212  * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
15213  * pending I/Os.
15214  **/
15215 static void
15216 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
15217 {
15218     lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15219             "2827 PCI channel permanent disable for failure\n");
15220 
15221     /* Block all SCSI devices' I/Os on the host */
15222     lpfc_scsi_dev_block(phba);
15223 
15224     /* stop all timers */
15225     lpfc_stop_hba_timers(phba);
15226 
15227     /* Clean up all driver's outstanding I/Os */
15228     lpfc_sli_flush_io_rings(phba);
15229 }
15230 
15231 /**
15232  * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
15233  * @pdev: pointer to PCI device.
15234  * @state: the current PCI connection state.
15235  *
15236  * This routine is called from the PCI subsystem for error handling to device
15237  * with SLI-4 interface spec. This function is called by the PCI subsystem
15238  * after a PCI bus error affecting this device has been detected. When this
15239  * function is invoked, it will need to stop all the I/Os and interrupt(s)
15240  * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
15241  * for the PCI subsystem to perform proper recovery as desired.
15242  *
15243  * Return codes
15244  *  PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15245  *  PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15246  **/
15247 static pci_ers_result_t
15248 lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15249 {
15250     struct Scsi_Host *shost = pci_get_drvdata(pdev);
15251     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15252     bool hba_pci_err;
15253 
15254     switch (state) {
15255     case pci_channel_io_normal:
15256         /* Non-fatal error, prepare for recovery */
15257         lpfc_sli4_prep_dev_for_recover(phba);
15258         return PCI_ERS_RESULT_CAN_RECOVER;
15259     case pci_channel_io_frozen:
15260         hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15261         /* Fatal error, prepare for slot reset */
15262         if (!hba_pci_err)
15263             lpfc_sli4_prep_dev_for_reset(phba);
15264         else
15265             lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15266                     "2832  Already handling PCI error "
15267                     "state: x%x\n", state);
15268         return PCI_ERS_RESULT_NEED_RESET;
15269     case pci_channel_io_perm_failure:
15270         set_bit(HBA_PCI_ERR, &phba->bit_flags);
15271         /* Permanent failure, prepare for device down */
15272         lpfc_sli4_prep_dev_for_perm_failure(phba);
15273         return PCI_ERS_RESULT_DISCONNECT;
15274     default:
15275         hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15276         if (!hba_pci_err)
15277             lpfc_sli4_prep_dev_for_reset(phba);
15278         /* Unknown state, prepare and request slot reset */
15279         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15280                 "2825 Unknown PCI error state: x%x\n", state);
15281         lpfc_sli4_prep_dev_for_reset(phba);
15282         return PCI_ERS_RESULT_NEED_RESET;
15283     }
15284 }
15285 
15286 /**
15287  * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
15288  * @pdev: pointer to PCI device.
15289  *
15290  * This routine is called from the PCI subsystem for error handling to device
15291  * with SLI-4 interface spec. It is called after PCI bus has been reset to
15292  * restart the PCI card from scratch, as if from a cold-boot. During the
15293  * PCI subsystem error recovery, after the driver returns
15294  * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
15295  * recovery and then call this routine before calling the .resume method to
15296  * recover the device. This function will initialize the HBA device, enable
15297  * the interrupt, but it will just put the HBA to offline state without
15298  * passing any I/O traffic.
15299  *
15300  * Return codes
15301  *  PCI_ERS_RESULT_RECOVERED - the device has been recovered
15302  *  PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15303  */
15304 static pci_ers_result_t
15305 lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15306 {
15307     struct Scsi_Host *shost = pci_get_drvdata(pdev);
15308     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15309     struct lpfc_sli *psli = &phba->sli;
15310     uint32_t intr_mode;
15311     bool hba_pci_err;
15312 
15313     dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15314     if (pci_enable_device_mem(pdev)) {
15315         printk(KERN_ERR "lpfc: Cannot re-enable "
15316                "PCI device after reset.\n");
15317         return PCI_ERS_RESULT_DISCONNECT;
15318     }
15319 
15320     pci_restore_state(pdev);
15321 
15322     hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
15323     if (!hba_pci_err)
15324         dev_info(&pdev->dev,
15325              "hba_pci_err was not set, recovering slot reset.\n");
15326     /*
15327      * As the new kernel behavior of pci_restore_state() API call clears
15328      * device saved_state flag, need to save the restored state again.
15329      */
15330     pci_save_state(pdev);
15331 
15332     if (pdev->is_busmaster)
15333         pci_set_master(pdev);
15334 
15335     spin_lock_irq(&phba->hbalock);
15336     psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15337     spin_unlock_irq(&phba->hbalock);
15338 
15339     /* Init cpu_map array */
15340     lpfc_cpu_map_array_init(phba);
15341     /* Configure and enable interrupt */
15342     intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15343     if (intr_mode == LPFC_INTR_ERROR) {
15344         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15345                 "2824 Cannot re-enable interrupt after "
15346                 "slot reset.\n");
15347         return PCI_ERS_RESULT_DISCONNECT;
15348     } else
15349         phba->intr_mode = intr_mode;
15350     lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
15351 
15352     /* Log the current active interrupt mode */
15353     lpfc_log_intr_mode(phba, phba->intr_mode);
15354 
15355     return PCI_ERS_RESULT_RECOVERED;
15356 }
15357 
15358 /**
15359  * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
15360  * @pdev: pointer to PCI device
15361  *
15362  * This routine is called from the PCI subsystem for error handling to device
15363  * with SLI-4 interface spec. It is called when kernel error recovery tells
15364  * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
15365  * error recovery. After this call, traffic can start to flow from this device
15366  * again.
15367  **/
15368 static void
15369 lpfc_io_resume_s4(struct pci_dev *pdev)
15370 {
15371     struct Scsi_Host *shost = pci_get_drvdata(pdev);
15372     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15373 
15374     /*
15375      * In case of slot reset, as function reset is performed through
15376      * mailbox command which needs DMA to be enabled, this operation
15377      * has to be moved to the io resume phase. Taking device offline
15378      * will perform the necessary cleanup.
15379      */
15380     if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15381         /* Perform device reset */
15382         lpfc_sli_brdrestart(phba);
15383         /* Bring the device back online */
15384         lpfc_online(phba);
15385     }
15386 }
15387 
15388 /**
15389  * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
15390  * @pdev: pointer to PCI device
15391  * @pid: pointer to PCI device identifier
15392  *
15393  * This routine is to be registered to the kernel's PCI subsystem. When an
15394  * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
15395  * at PCI device-specific information of the device and driver to see if the
15396  * driver state that it can support this kind of device. If the match is
15397  * successful, the driver core invokes this routine. This routine dispatches
15398  * the action to the proper SLI-3 or SLI-4 device probing routine, which will
15399  * do all the initialization that it needs to do to handle the HBA device
15400  * properly.
15401  *
15402  * Return code
15403  *  0 - driver can claim the device
15404  *  negative value - driver can not claim the device
15405  **/
15406 static int
15407 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15408 {
15409     int rc;
15410     struct lpfc_sli_intf intf;
15411 
15412     if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15413         return -ENODEV;
15414 
15415     if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15416         (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15417         rc = lpfc_pci_probe_one_s4(pdev, pid);
15418     else
15419         rc = lpfc_pci_probe_one_s3(pdev, pid);
15420 
15421     return rc;
15422 }
15423 
15424 /**
15425  * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
15426  * @pdev: pointer to PCI device
15427  *
15428  * This routine is to be registered to the kernel's PCI subsystem. When an
15429  * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
15430  * This routine dispatches the action to the proper SLI-3 or SLI-4 device
15431  * remove routine, which will perform all the necessary cleanup for the
15432  * device to be removed from the PCI subsystem properly.
15433  **/
15434 static void
15435 lpfc_pci_remove_one(struct pci_dev *pdev)
15436 {
15437     struct Scsi_Host *shost = pci_get_drvdata(pdev);
15438     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15439 
15440     switch (phba->pci_dev_grp) {
15441     case LPFC_PCI_DEV_LP:
15442         lpfc_pci_remove_one_s3(pdev);
15443         break;
15444     case LPFC_PCI_DEV_OC:
15445         lpfc_pci_remove_one_s4(pdev);
15446         break;
15447     default:
15448         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15449                 "1424 Invalid PCI device group: 0x%x\n",
15450                 phba->pci_dev_grp);
15451         break;
15452     }
15453     return;
15454 }
15455 
15456 /**
15457  * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
15458  * @dev: pointer to device
15459  *
15460  * This routine is to be registered to the kernel's PCI subsystem to support
15461  * system Power Management (PM). When PM invokes this method, it dispatches
15462  * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
15463  * suspend the device.
15464  *
15465  * Return code
15466  *  0 - driver suspended the device
15467  *  Error otherwise
15468  **/
15469 static int __maybe_unused
15470 lpfc_pci_suspend_one(struct device *dev)
15471 {
15472     struct Scsi_Host *shost = dev_get_drvdata(dev);
15473     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15474     int rc = -ENODEV;
15475 
15476     switch (phba->pci_dev_grp) {
15477     case LPFC_PCI_DEV_LP:
15478         rc = lpfc_pci_suspend_one_s3(dev);
15479         break;
15480     case LPFC_PCI_DEV_OC:
15481         rc = lpfc_pci_suspend_one_s4(dev);
15482         break;
15483     default:
15484         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15485                 "1425 Invalid PCI device group: 0x%x\n",
15486                 phba->pci_dev_grp);
15487         break;
15488     }
15489     return rc;
15490 }
15491 
15492 /**
15493  * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
15494  * @dev: pointer to device
15495  *
15496  * This routine is to be registered to the kernel's PCI subsystem to support
15497  * system Power Management (PM). When PM invokes this method, it dispatches
15498  * the action to the proper SLI-3 or SLI-4 device resume routine, which will
15499  * resume the device.
15500  *
15501  * Return code
15502  *  0 - driver suspended the device
15503  *  Error otherwise
15504  **/
15505 static int __maybe_unused
15506 lpfc_pci_resume_one(struct device *dev)
15507 {
15508     struct Scsi_Host *shost = dev_get_drvdata(dev);
15509     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15510     int rc = -ENODEV;
15511 
15512     switch (phba->pci_dev_grp) {
15513     case LPFC_PCI_DEV_LP:
15514         rc = lpfc_pci_resume_one_s3(dev);
15515         break;
15516     case LPFC_PCI_DEV_OC:
15517         rc = lpfc_pci_resume_one_s4(dev);
15518         break;
15519     default:
15520         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15521                 "1426 Invalid PCI device group: 0x%x\n",
15522                 phba->pci_dev_grp);
15523         break;
15524     }
15525     return rc;
15526 }
15527 
15528 /**
15529  * lpfc_io_error_detected - lpfc method for handling PCI I/O error
15530  * @pdev: pointer to PCI device.
15531  * @state: the current PCI connection state.
15532  *
15533  * This routine is registered to the PCI subsystem for error handling. This
15534  * function is called by the PCI subsystem after a PCI bus error affecting
15535  * this device has been detected. When this routine is invoked, it dispatches
15536  * the action to the proper SLI-3 or SLI-4 device error detected handling
15537  * routine, which will perform the proper error detected operation.
15538  *
15539  * Return codes
15540  *  PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
15541  *  PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15542  **/
15543 static pci_ers_result_t
15544 lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15545 {
15546     struct Scsi_Host *shost = pci_get_drvdata(pdev);
15547     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15548     pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15549 
15550     if (phba->link_state == LPFC_HBA_ERROR &&
15551         phba->hba_flag & HBA_IOQ_FLUSH)
15552         return PCI_ERS_RESULT_NEED_RESET;
15553 
15554     switch (phba->pci_dev_grp) {
15555     case LPFC_PCI_DEV_LP:
15556         rc = lpfc_io_error_detected_s3(pdev, state);
15557         break;
15558     case LPFC_PCI_DEV_OC:
15559         rc = lpfc_io_error_detected_s4(pdev, state);
15560         break;
15561     default:
15562         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15563                 "1427 Invalid PCI device group: 0x%x\n",
15564                 phba->pci_dev_grp);
15565         break;
15566     }
15567     return rc;
15568 }
15569 
15570 /**
15571  * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
15572  * @pdev: pointer to PCI device.
15573  *
15574  * This routine is registered to the PCI subsystem for error handling. This
15575  * function is called after PCI bus has been reset to restart the PCI card
15576  * from scratch, as if from a cold-boot. When this routine is invoked, it
15577  * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
15578  * routine, which will perform the proper device reset.
15579  *
15580  * Return codes
15581  *  PCI_ERS_RESULT_RECOVERED - the device has been recovered
15582  *  PCI_ERS_RESULT_DISCONNECT - device could not be recovered
15583  **/
15584 static pci_ers_result_t
15585 lpfc_io_slot_reset(struct pci_dev *pdev)
15586 {
15587     struct Scsi_Host *shost = pci_get_drvdata(pdev);
15588     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15589     pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15590 
15591     switch (phba->pci_dev_grp) {
15592     case LPFC_PCI_DEV_LP:
15593         rc = lpfc_io_slot_reset_s3(pdev);
15594         break;
15595     case LPFC_PCI_DEV_OC:
15596         rc = lpfc_io_slot_reset_s4(pdev);
15597         break;
15598     default:
15599         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15600                 "1428 Invalid PCI device group: 0x%x\n",
15601                 phba->pci_dev_grp);
15602         break;
15603     }
15604     return rc;
15605 }
15606 
15607 /**
15608  * lpfc_io_resume - lpfc method for resuming PCI I/O operation
15609  * @pdev: pointer to PCI device
15610  *
15611  * This routine is registered to the PCI subsystem for error handling. It
15612  * is called when kernel error recovery tells the lpfc driver that it is
15613  * OK to resume normal PCI operation after PCI bus error recovery. When
15614  * this routine is invoked, it dispatches the action to the proper SLI-3
15615  * or SLI-4 device io_resume routine, which will resume the device operation.
15616  **/
15617 static void
15618 lpfc_io_resume(struct pci_dev *pdev)
15619 {
15620     struct Scsi_Host *shost = pci_get_drvdata(pdev);
15621     struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15622 
15623     switch (phba->pci_dev_grp) {
15624     case LPFC_PCI_DEV_LP:
15625         lpfc_io_resume_s3(pdev);
15626         break;
15627     case LPFC_PCI_DEV_OC:
15628         lpfc_io_resume_s4(pdev);
15629         break;
15630     default:
15631         lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15632                 "1429 Invalid PCI device group: 0x%x\n",
15633                 phba->pci_dev_grp);
15634         break;
15635     }
15636     return;
15637 }
15638 
15639 /**
15640  * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
15641  * @phba: pointer to lpfc hba data structure.
15642  *
15643  * This routine checks to see if OAS is supported for this adapter. If
15644  * supported, the configure Flash Optimized Fabric flag is set.  Otherwise,
15645  * the enable oas flag is cleared and the pool created for OAS device data
15646  * is destroyed.
15647  *
15648  **/
15649 static void
15650 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15651 {
15652 
15653     if (!phba->cfg_EnableXLane)
15654         return;
15655 
15656     if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15657         phba->cfg_fof = 1;
15658     } else {
15659         phba->cfg_fof = 0;
15660         mempool_destroy(phba->device_data_mem_pool);
15661         phba->device_data_mem_pool = NULL;
15662     }
15663 
15664     return;
15665 }
15666 
15667 /**
15668  * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
15669  * @phba: pointer to lpfc hba data structure.
15670  *
15671  * This routine checks to see if RAS is supported by the adapter. Check the
15672  * function through which RAS support enablement is to be done.
15673  **/
15674 void
15675 lpfc_sli4_ras_init(struct lpfc_hba *phba)
15676 {
15677     /* if ASIC_GEN_NUM >= 0xC) */
15678     if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15679             LPFC_SLI_INTF_IF_TYPE_6) ||
15680         (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15681             LPFC_SLI_INTF_FAMILY_G6)) {
15682         phba->ras_fwlog.ras_hwsupport = true;
15683         if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15684             phba->cfg_ras_fwlog_buffsize)
15685             phba->ras_fwlog.ras_enabled = true;
15686         else
15687             phba->ras_fwlog.ras_enabled = false;
15688     } else {
15689         phba->ras_fwlog.ras_hwsupport = false;
15690     }
15691 }
15692 
15693 
15694 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15695 
15696 static const struct pci_error_handlers lpfc_err_handler = {
15697     .error_detected = lpfc_io_error_detected,
15698     .slot_reset = lpfc_io_slot_reset,
15699     .resume = lpfc_io_resume,
15700 };
15701 
15702 static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15703              lpfc_pci_suspend_one,
15704              lpfc_pci_resume_one);
15705 
15706 static struct pci_driver lpfc_driver = {
15707     .name       = LPFC_DRIVER_NAME,
15708     .id_table   = lpfc_id_table,
15709     .probe      = lpfc_pci_probe_one,
15710     .remove     = lpfc_pci_remove_one,
15711     .shutdown   = lpfc_pci_remove_one,
15712     .driver.pm  = &lpfc_pci_pm_ops_one,
15713     .err_handler    = &lpfc_err_handler,
15714 };
15715 
15716 static const struct file_operations lpfc_mgmt_fop = {
15717     .owner = THIS_MODULE,
15718 };
15719 
15720 static struct miscdevice lpfc_mgmt_dev = {
15721     .minor = MISC_DYNAMIC_MINOR,
15722     .name = "lpfcmgmt",
15723     .fops = &lpfc_mgmt_fop,
15724 };
15725 
15726 /**
15727  * lpfc_init - lpfc module initialization routine
15728  *
15729  * This routine is to be invoked when the lpfc module is loaded into the
15730  * kernel. The special kernel macro module_init() is used to indicate the
15731  * role of this routine to the kernel as lpfc module entry point.
15732  *
15733  * Return codes
15734  *   0 - successful
15735  *   -ENOMEM - FC attach transport failed
15736  *   all others - failed
15737  */
15738 static int __init
15739 lpfc_init(void)
15740 {
15741     int error = 0;
15742 
15743     pr_info(LPFC_MODULE_DESC "\n");
15744     pr_info(LPFC_COPYRIGHT "\n");
15745 
15746     error = misc_register(&lpfc_mgmt_dev);
15747     if (error)
15748         printk(KERN_ERR "Could not register lpfcmgmt device, "
15749             "misc_register returned with status %d", error);
15750 
15751     error = -ENOMEM;
15752     lpfc_transport_functions.vport_create = lpfc_vport_create;
15753     lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15754     lpfc_transport_template =
15755                 fc_attach_transport(&lpfc_transport_functions);
15756     if (lpfc_transport_template == NULL)
15757         goto unregister;
15758     lpfc_vport_transport_template =
15759         fc_attach_transport(&lpfc_vport_transport_functions);
15760     if (lpfc_vport_transport_template == NULL) {
15761         fc_release_transport(lpfc_transport_template);
15762         goto unregister;
15763     }
15764     lpfc_wqe_cmd_template();
15765     lpfc_nvmet_cmd_template();
15766 
15767     /* Initialize in case vector mapping is needed */
15768     lpfc_present_cpu = num_present_cpus();
15769 
15770     lpfc_pldv_detect = false;
15771 
15772     error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15773                     "lpfc/sli4:online",
15774                     lpfc_cpu_online, lpfc_cpu_offline);
15775     if (error < 0)
15776         goto cpuhp_failure;
15777     lpfc_cpuhp_state = error;
15778 
15779     error = pci_register_driver(&lpfc_driver);
15780     if (error)
15781         goto unwind;
15782 
15783     return error;
15784 
15785 unwind:
15786     cpuhp_remove_multi_state(lpfc_cpuhp_state);
15787 cpuhp_failure:
15788     fc_release_transport(lpfc_transport_template);
15789     fc_release_transport(lpfc_vport_transport_template);
15790 unregister:
15791     misc_deregister(&lpfc_mgmt_dev);
15792 
15793     return error;
15794 }
15795 
15796 void lpfc_dmp_dbg(struct lpfc_hba *phba)
15797 {
15798     unsigned int start_idx;
15799     unsigned int dbg_cnt;
15800     unsigned int temp_idx;
15801     int i;
15802     int j = 0;
15803     unsigned long rem_nsec;
15804 
15805     if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15806         return;
15807 
15808     start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15809     dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15810     if (!dbg_cnt)
15811         goto out;
15812     temp_idx = start_idx;
15813     if (dbg_cnt >= DBG_LOG_SZ) {
15814         dbg_cnt = DBG_LOG_SZ;
15815         temp_idx -= 1;
15816     } else {
15817         if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15818             temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15819         } else {
15820             if (start_idx < dbg_cnt)
15821                 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15822             else
15823                 start_idx -= dbg_cnt;
15824         }
15825     }
15826     dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15827          start_idx, temp_idx, dbg_cnt);
15828 
15829     for (i = 0; i < dbg_cnt; i++) {
15830         if ((start_idx + i) < DBG_LOG_SZ)
15831             temp_idx = (start_idx + i) % DBG_LOG_SZ;
15832         else
15833             temp_idx = j++;
15834         rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15835         dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15836              temp_idx,
15837              (unsigned long)phba->dbg_log[temp_idx].t_ns,
15838              rem_nsec / 1000,
15839              phba->dbg_log[temp_idx].log);
15840     }
15841 out:
15842     atomic_set(&phba->dbg_log_cnt, 0);
15843     atomic_set(&phba->dbg_log_dmping, 0);
15844 }
15845 
15846 __printf(2, 3)
15847 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15848 {
15849     unsigned int idx;
15850     va_list args;
15851     int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15852     struct va_format vaf;
15853 
15854 
15855     va_start(args, fmt);
15856     if (unlikely(dbg_dmping)) {
15857         vaf.fmt = fmt;
15858         vaf.va = &args;
15859         dev_info(&phba->pcidev->dev, "%pV", &vaf);
15860         va_end(args);
15861         return;
15862     }
15863     idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15864         DBG_LOG_SZ;
15865 
15866     atomic_inc(&phba->dbg_log_cnt);
15867 
15868     vscnprintf(phba->dbg_log[idx].log,
15869            sizeof(phba->dbg_log[idx].log), fmt, args);
15870     va_end(args);
15871 
15872     phba->dbg_log[idx].t_ns = local_clock();
15873 }
15874 
15875 /**
15876  * lpfc_exit - lpfc module removal routine
15877  *
15878  * This routine is invoked when the lpfc module is removed from the kernel.
15879  * The special kernel macro module_exit() is used to indicate the role of
15880  * this routine to the kernel as lpfc module exit point.
15881  */
15882 static void __exit
15883 lpfc_exit(void)
15884 {
15885     misc_deregister(&lpfc_mgmt_dev);
15886     pci_unregister_driver(&lpfc_driver);
15887     cpuhp_remove_multi_state(lpfc_cpuhp_state);
15888     fc_release_transport(lpfc_transport_template);
15889     fc_release_transport(lpfc_vport_transport_template);
15890     idr_destroy(&lpfc_hba_index);
15891 }
15892 
15893 module_init(lpfc_init);
15894 module_exit(lpfc_exit);
15895 MODULE_LICENSE("GPL");
15896 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15897 MODULE_AUTHOR("Broadcom");
15898 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);