Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * QLogic Fibre Channel HBA Driver
0004  * Copyright (c)  2003-2014 QLogic Corporation
0005  */
0006 #include "qla_def.h"
0007 #include "qla_gbl.h"
0008 
0009 #include <linux/delay.h>
0010 #include <linux/slab.h>
0011 #include <linux/vmalloc.h>
0012 
0013 #include "qla_devtbl.h"
0014 
0015 #ifdef CONFIG_SPARC
0016 #include <asm/prom.h>
0017 #endif
0018 
0019 #include "qla_target.h"
0020 
0021 /*
0022 *  QLogic ISP2x00 Hardware Support Function Prototypes.
0023 */
0024 static int qla2x00_isp_firmware(scsi_qla_host_t *);
0025 static int qla2x00_setup_chip(scsi_qla_host_t *);
0026 static int qla2x00_fw_ready(scsi_qla_host_t *);
0027 static int qla2x00_configure_hba(scsi_qla_host_t *);
0028 static int qla2x00_configure_loop(scsi_qla_host_t *);
0029 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
0030 static int qla2x00_configure_fabric(scsi_qla_host_t *);
0031 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
0032 static int qla2x00_restart_isp(scsi_qla_host_t *);
0033 
0034 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
0035 static int qla84xx_init_chip(scsi_qla_host_t *);
0036 static int qla25xx_init_queues(struct qla_hw_data *);
0037 static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
0038                       struct event_arg *ea);
0039 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
0040     struct event_arg *);
0041 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
0042 
0043 /* SRB Extensions ---------------------------------------------------------- */
0044 
0045 void
0046 qla2x00_sp_timeout(struct timer_list *t)
0047 {
0048     srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
0049     struct srb_iocb *iocb;
0050     scsi_qla_host_t *vha = sp->vha;
0051 
0052     WARN_ON(irqs_disabled());
0053     iocb = &sp->u.iocb_cmd;
0054     iocb->timeout(sp);
0055 
0056     /* ref: TMR */
0057     kref_put(&sp->cmd_kref, qla2x00_sp_release);
0058 
0059     if (vha && qla2x00_isp_reg_stat(vha->hw)) {
0060         ql_log(ql_log_info, vha, 0x9008,
0061             "PCI/Register disconnect.\n");
0062         qla_pci_set_eeh_busy(vha);
0063     }
0064 }
0065 
0066 void qla2x00_sp_free(srb_t *sp)
0067 {
0068     struct srb_iocb *iocb = &sp->u.iocb_cmd;
0069 
0070     del_timer(&iocb->timer);
0071     qla2x00_rel_sp(sp);
0072 }
0073 
0074 void qla2xxx_rel_done_warning(srb_t *sp, int res)
0075 {
0076     WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
0077 }
0078 
0079 void qla2xxx_rel_free_warning(srb_t *sp)
0080 {
0081     WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
0082 }
0083 
0084 /* Asynchronous Login/Logout Routines -------------------------------------- */
0085 
0086 unsigned long
0087 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
0088 {
0089     unsigned long tmo;
0090     struct qla_hw_data *ha = vha->hw;
0091 
0092     /* Firmware should use switch negotiated r_a_tov for timeout. */
0093     tmo = ha->r_a_tov / 10 * 2;
0094     if (IS_QLAFX00(ha)) {
0095         tmo = FX00_DEF_RATOV * 2;
0096     } else if (!IS_FWI2_CAPABLE(ha)) {
0097         /*
0098          * Except for earlier ISPs where the timeout is seeded from the
0099          * initialization control block.
0100          */
0101         tmo = ha->login_timeout;
0102     }
0103     return tmo;
0104 }
0105 
0106 static void qla24xx_abort_iocb_timeout(void *data)
0107 {
0108     srb_t *sp = data;
0109     struct srb_iocb *abt = &sp->u.iocb_cmd;
0110     struct qla_qpair *qpair = sp->qpair;
0111     u32 handle;
0112     unsigned long flags;
0113 
0114     if (sp->cmd_sp)
0115         ql_dbg(ql_dbg_async, sp->vha, 0x507c,
0116             "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n",
0117             sp->cmd_sp->handle, sp->cmd_sp->type,
0118             sp->handle, sp->type);
0119     else
0120         ql_dbg(ql_dbg_async, sp->vha, 0x507c,
0121             "Abort timeout 2 - hdl=%x, type=%x\n",
0122             sp->handle, sp->type);
0123 
0124     spin_lock_irqsave(qpair->qp_lock_ptr, flags);
0125     for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
0126         if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] ==
0127             sp->cmd_sp))
0128             qpair->req->outstanding_cmds[handle] = NULL;
0129 
0130         /* removing the abort */
0131         if (qpair->req->outstanding_cmds[handle] == sp) {
0132             qpair->req->outstanding_cmds[handle] = NULL;
0133             break;
0134         }
0135     }
0136     spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
0137 
0138     if (sp->cmd_sp) {
0139         /*
0140          * This done function should take care of
0141          * original command ref: INIT
0142          */
0143         sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED);
0144     }
0145 
0146     abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT);
0147     sp->done(sp, QLA_OS_TIMER_EXPIRED);
0148 }
0149 
0150 static void qla24xx_abort_sp_done(srb_t *sp, int res)
0151 {
0152     struct srb_iocb *abt = &sp->u.iocb_cmd;
0153     srb_t *orig_sp = sp->cmd_sp;
0154 
0155     if (orig_sp)
0156         qla_wait_nvme_release_cmd_kref(orig_sp);
0157 
0158     if (sp->flags & SRB_WAKEUP_ON_COMP)
0159         complete(&abt->u.abt.comp);
0160     else
0161         /* ref: INIT */
0162         kref_put(&sp->cmd_kref, qla2x00_sp_release);
0163 }
0164 
0165 int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
0166 {
0167     scsi_qla_host_t *vha = cmd_sp->vha;
0168     struct srb_iocb *abt_iocb;
0169     srb_t *sp;
0170     int rval = QLA_FUNCTION_FAILED;
0171     uint8_t bail;
0172 
0173     /* ref: INIT for ABTS command */
0174     sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
0175                   GFP_ATOMIC);
0176     if (!sp)
0177         return QLA_MEMORY_ALLOC_FAILED;
0178 
0179     QLA_VHA_MARK_BUSY(vha, bail);
0180     abt_iocb = &sp->u.iocb_cmd;
0181     sp->type = SRB_ABT_CMD;
0182     sp->name = "abort";
0183     sp->qpair = cmd_sp->qpair;
0184     sp->cmd_sp = cmd_sp;
0185     if (wait)
0186         sp->flags = SRB_WAKEUP_ON_COMP;
0187 
0188     init_completion(&abt_iocb->u.abt.comp);
0189     /* FW can send 2 x ABTS's timeout/20s */
0190     qla2x00_init_async_sp(sp, 42, qla24xx_abort_sp_done);
0191     sp->u.iocb_cmd.timeout = qla24xx_abort_iocb_timeout;
0192 
0193     abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
0194     abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
0195 
0196     ql_dbg(ql_dbg_async, vha, 0x507c,
0197            "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
0198            cmd_sp->type);
0199 
0200     rval = qla2x00_start_sp(sp);
0201     if (rval != QLA_SUCCESS) {
0202         /* ref: INIT */
0203         kref_put(&sp->cmd_kref, qla2x00_sp_release);
0204         return rval;
0205     }
0206 
0207     if (wait) {
0208         wait_for_completion(&abt_iocb->u.abt.comp);
0209         rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
0210             QLA_SUCCESS : QLA_ERR_FROM_FW;
0211         /* ref: INIT */
0212         kref_put(&sp->cmd_kref, qla2x00_sp_release);
0213     }
0214 
0215     return rval;
0216 }
0217 
0218 void
0219 qla2x00_async_iocb_timeout(void *data)
0220 {
0221     srb_t *sp = data;
0222     fc_port_t *fcport = sp->fcport;
0223     struct srb_iocb *lio = &sp->u.iocb_cmd;
0224     int rc, h;
0225     unsigned long flags;
0226 
0227     if (fcport) {
0228         ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
0229             "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
0230             sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
0231 
0232         fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
0233     } else {
0234         pr_info("Async-%s timeout - hdl=%x.\n",
0235             sp->name, sp->handle);
0236     }
0237 
0238     switch (sp->type) {
0239     case SRB_LOGIN_CMD:
0240         rc = qla24xx_async_abort_cmd(sp, false);
0241         if (rc) {
0242             /* Retry as needed. */
0243             lio->u.logio.data[0] = MBS_COMMAND_ERROR;
0244             lio->u.logio.data[1] =
0245                 lio->u.logio.flags & SRB_LOGIN_RETRIED ?
0246                 QLA_LOGIO_LOGIN_RETRIED : 0;
0247             spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
0248             for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
0249                 h++) {
0250                 if (sp->qpair->req->outstanding_cmds[h] ==
0251                     sp) {
0252                     sp->qpair->req->outstanding_cmds[h] =
0253                         NULL;
0254                     break;
0255                 }
0256             }
0257             spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
0258             sp->done(sp, QLA_FUNCTION_TIMEOUT);
0259         }
0260         break;
0261     case SRB_LOGOUT_CMD:
0262     case SRB_CT_PTHRU_CMD:
0263     case SRB_MB_IOCB:
0264     case SRB_NACK_PLOGI:
0265     case SRB_NACK_PRLI:
0266     case SRB_NACK_LOGO:
0267     case SRB_CTRL_VP:
0268     default:
0269         rc = qla24xx_async_abort_cmd(sp, false);
0270         if (rc) {
0271             spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
0272             for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
0273                 h++) {
0274                 if (sp->qpair->req->outstanding_cmds[h] ==
0275                     sp) {
0276                     sp->qpair->req->outstanding_cmds[h] =
0277                         NULL;
0278                     break;
0279                 }
0280             }
0281             spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
0282             sp->done(sp, QLA_FUNCTION_TIMEOUT);
0283         }
0284         break;
0285     }
0286 }
0287 
0288 static void qla2x00_async_login_sp_done(srb_t *sp, int res)
0289 {
0290     struct scsi_qla_host *vha = sp->vha;
0291     struct srb_iocb *lio = &sp->u.iocb_cmd;
0292     struct event_arg ea;
0293 
0294     ql_dbg(ql_dbg_disc, vha, 0x20dd,
0295         "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
0296 
0297     sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
0298 
0299     if (!test_bit(UNLOADING, &vha->dpc_flags)) {
0300         memset(&ea, 0, sizeof(ea));
0301         ea.fcport = sp->fcport;
0302         ea.data[0] = lio->u.logio.data[0];
0303         ea.data[1] = lio->u.logio.data[1];
0304         ea.iop[0] = lio->u.logio.iop[0];
0305         ea.iop[1] = lio->u.logio.iop[1];
0306         ea.sp = sp;
0307         if (res)
0308             ea.data[0] = MBS_COMMAND_ERROR;
0309         qla24xx_handle_plogi_done_event(vha, &ea);
0310     }
0311 
0312     /* ref: INIT */
0313     kref_put(&sp->cmd_kref, qla2x00_sp_release);
0314 }
0315 
0316 int
0317 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
0318     uint16_t *data)
0319 {
0320     srb_t *sp;
0321     struct srb_iocb *lio;
0322     int rval = QLA_FUNCTION_FAILED;
0323 
0324     if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
0325         fcport->loop_id == FC_NO_LOOP_ID) {
0326         ql_log(ql_log_warn, vha, 0xffff,
0327             "%s: %8phC - not sending command.\n",
0328             __func__, fcport->port_name);
0329         return rval;
0330     }
0331 
0332     /* ref: INIT */
0333     sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
0334     if (!sp)
0335         goto done;
0336 
0337     qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
0338     fcport->flags |= FCF_ASYNC_SENT;
0339     fcport->logout_completed = 0;
0340 
0341     sp->type = SRB_LOGIN_CMD;
0342     sp->name = "login";
0343     sp->gen1 = fcport->rscn_gen;
0344     sp->gen2 = fcport->login_gen;
0345     qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
0346                   qla2x00_async_login_sp_done);
0347 
0348     lio = &sp->u.iocb_cmd;
0349     if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) {
0350         lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
0351     } else {
0352         if (vha->hw->flags.edif_enabled &&
0353             DBELL_ACTIVE(vha)) {
0354             lio->u.logio.flags |=
0355                 (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI);
0356         } else {
0357             lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
0358         }
0359     }
0360 
0361     if (NVME_TARGET(vha->hw, fcport))
0362         lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
0363 
0364     rval = qla2x00_start_sp(sp);
0365 
0366     ql_dbg(ql_dbg_disc, vha, 0x2072,
0367            "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
0368            fcport->port_name, sp->handle, fcport->loop_id,
0369            fcport->d_id.b24, fcport->login_retry,
0370            lio->u.logio.flags & SRB_LOGIN_FCSP ? "FCSP" : "");
0371 
0372     if (rval != QLA_SUCCESS) {
0373         fcport->flags |= FCF_LOGIN_NEEDED;
0374         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
0375         goto done_free_sp;
0376     }
0377 
0378     return rval;
0379 
0380 done_free_sp:
0381     /* ref: INIT */
0382     kref_put(&sp->cmd_kref, qla2x00_sp_release);
0383     fcport->flags &= ~FCF_ASYNC_SENT;
0384 done:
0385     fcport->flags &= ~FCF_ASYNC_ACTIVE;
0386     return rval;
0387 }
0388 
0389 static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
0390 {
0391     sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
0392     sp->fcport->login_gen++;
0393     qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]);
0394     /* ref: INIT */
0395     kref_put(&sp->cmd_kref, qla2x00_sp_release);
0396 }
0397 
0398 int
0399 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
0400 {
0401     srb_t *sp;
0402     int rval = QLA_FUNCTION_FAILED;
0403 
0404     fcport->flags |= FCF_ASYNC_SENT;
0405     /* ref: INIT */
0406     sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
0407     if (!sp)
0408         goto done;
0409 
0410     sp->type = SRB_LOGOUT_CMD;
0411     sp->name = "logout";
0412     qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
0413                   qla2x00_async_logout_sp_done),
0414 
0415     ql_dbg(ql_dbg_disc, vha, 0x2070,
0416         "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n",
0417         sp->handle, fcport->loop_id, fcport->d_id.b.domain,
0418         fcport->d_id.b.area, fcport->d_id.b.al_pa,
0419         fcport->port_name, fcport->explicit_logout);
0420 
0421     rval = qla2x00_start_sp(sp);
0422     if (rval != QLA_SUCCESS)
0423         goto done_free_sp;
0424     return rval;
0425 
0426 done_free_sp:
0427     /* ref: INIT */
0428     kref_put(&sp->cmd_kref, qla2x00_sp_release);
0429 done:
0430     fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
0431     return rval;
0432 }
0433 
0434 void
0435 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
0436     uint16_t *data)
0437 {
0438     fcport->flags &= ~FCF_ASYNC_ACTIVE;
0439     /* Don't re-login in target mode */
0440     if (!fcport->tgt_session)
0441         qla2x00_mark_device_lost(vha, fcport, 1);
0442     qlt_logo_completion_handler(fcport, data[0]);
0443 }
0444 
0445 static void qla2x00_async_prlo_sp_done(srb_t *sp, int res)
0446 {
0447     struct srb_iocb *lio = &sp->u.iocb_cmd;
0448     struct scsi_qla_host *vha = sp->vha;
0449 
0450     sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
0451     if (!test_bit(UNLOADING, &vha->dpc_flags))
0452         qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
0453             lio->u.logio.data);
0454     /* ref: INIT */
0455     kref_put(&sp->cmd_kref, qla2x00_sp_release);
0456 }
0457 
0458 int
0459 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
0460 {
0461     srb_t *sp;
0462     int rval;
0463 
0464     rval = QLA_FUNCTION_FAILED;
0465     /* ref: INIT */
0466     sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
0467     if (!sp)
0468         goto done;
0469 
0470     sp->type = SRB_PRLO_CMD;
0471     sp->name = "prlo";
0472     qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
0473                   qla2x00_async_prlo_sp_done);
0474 
0475     ql_dbg(ql_dbg_disc, vha, 0x2070,
0476         "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
0477         sp->handle, fcport->loop_id, fcport->d_id.b.domain,
0478         fcport->d_id.b.area, fcport->d_id.b.al_pa);
0479 
0480     rval = qla2x00_start_sp(sp);
0481     if (rval != QLA_SUCCESS)
0482         goto done_free_sp;
0483 
0484     return rval;
0485 
0486 done_free_sp:
0487     /* ref: INIT */
0488     kref_put(&sp->cmd_kref, qla2x00_sp_release);
0489 done:
0490     fcport->flags &= ~FCF_ASYNC_ACTIVE;
0491     return rval;
0492 }
0493 
0494 static
0495 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
0496 {
0497     struct fc_port *fcport = ea->fcport;
0498 
0499     ql_dbg(ql_dbg_disc, vha, 0x20d2,
0500         "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
0501         __func__, fcport->port_name, fcport->disc_state,
0502         fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
0503         fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
0504 
0505     WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
0506           ea->data[0]);
0507 
0508     if (ea->data[0] != MBS_COMMAND_COMPLETE) {
0509         ql_dbg(ql_dbg_disc, vha, 0x2066,
0510             "%s %8phC: adisc fail: post delete\n",
0511             __func__, ea->fcport->port_name);
0512         /* deleted = 0 & logout_on_delete = force fw cleanup */
0513         fcport->deleted = 0;
0514         fcport->logout_on_delete = 1;
0515         qlt_schedule_sess_for_deletion(ea->fcport);
0516         return;
0517     }
0518 
0519     if (ea->fcport->disc_state == DSC_DELETE_PEND)
0520         return;
0521 
0522     if (ea->sp->gen2 != ea->fcport->login_gen) {
0523         /* target side must have changed it. */
0524         ql_dbg(ql_dbg_disc, vha, 0x20d3,
0525             "%s %8phC generation changed\n",
0526             __func__, ea->fcport->port_name);
0527         return;
0528     } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
0529         qla_rscn_replay(fcport);
0530         qlt_schedule_sess_for_deletion(fcport);
0531         return;
0532     }
0533 
0534     __qla24xx_handle_gpdb_event(vha, ea);
0535 }
0536 
0537 static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
0538 {
0539     struct qla_work_evt *e;
0540 
0541     e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
0542     if (!e)
0543         return QLA_FUNCTION_FAILED;
0544 
0545     e->u.fcport.fcport = fcport;
0546     fcport->flags |= FCF_ASYNC_ACTIVE;
0547     qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
0548     return qla2x00_post_work(vha, e);
0549 }
0550 
0551 static void qla2x00_async_adisc_sp_done(srb_t *sp, int res)
0552 {
0553     struct scsi_qla_host *vha = sp->vha;
0554     struct event_arg ea;
0555     struct srb_iocb *lio = &sp->u.iocb_cmd;
0556 
0557     ql_dbg(ql_dbg_disc, vha, 0x2066,
0558         "Async done-%s res %x %8phC\n",
0559         sp->name, res, sp->fcport->port_name);
0560 
0561     sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
0562 
0563     memset(&ea, 0, sizeof(ea));
0564     ea.rc = res;
0565     ea.data[0] = lio->u.logio.data[0];
0566     ea.data[1] = lio->u.logio.data[1];
0567     ea.iop[0] = lio->u.logio.iop[0];
0568     ea.iop[1] = lio->u.logio.iop[1];
0569     ea.fcport = sp->fcport;
0570     ea.sp = sp;
0571     if (res)
0572         ea.data[0] = MBS_COMMAND_ERROR;
0573 
0574     qla24xx_handle_adisc_event(vha, &ea);
0575     /* ref: INIT */
0576     kref_put(&sp->cmd_kref, qla2x00_sp_release);
0577 }
0578 
0579 int
0580 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
0581     uint16_t *data)
0582 {
0583     srb_t *sp;
0584     struct srb_iocb *lio;
0585     int rval = QLA_FUNCTION_FAILED;
0586 
0587     if (IS_SESSION_DELETED(fcport)) {
0588         ql_log(ql_log_warn, vha, 0xffff,
0589                "%s: %8phC is being delete - not sending command.\n",
0590                __func__, fcport->port_name);
0591         fcport->flags &= ~FCF_ASYNC_ACTIVE;
0592         return rval;
0593     }
0594 
0595     if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
0596         return rval;
0597 
0598     fcport->flags |= FCF_ASYNC_SENT;
0599     /* ref: INIT */
0600     sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
0601     if (!sp)
0602         goto done;
0603 
0604     sp->type = SRB_ADISC_CMD;
0605     sp->name = "adisc";
0606     sp->gen1 = fcport->rscn_gen;
0607     sp->gen2 = fcport->login_gen;
0608     qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
0609                   qla2x00_async_adisc_sp_done);
0610 
0611     if (data[1] & QLA_LOGIO_LOGIN_RETRIED) {
0612         lio = &sp->u.iocb_cmd;
0613         lio->u.logio.flags |= SRB_LOGIN_RETRIED;
0614     }
0615 
0616     ql_dbg(ql_dbg_disc, vha, 0x206f,
0617         "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
0618         sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
0619 
0620     rval = qla2x00_start_sp(sp);
0621     if (rval != QLA_SUCCESS)
0622         goto done_free_sp;
0623 
0624     return rval;
0625 
0626 done_free_sp:
0627     /* ref: INIT */
0628     kref_put(&sp->cmd_kref, qla2x00_sp_release);
0629 done:
0630     fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
0631     qla2x00_post_async_adisc_work(vha, fcport, data);
0632     return rval;
0633 }
0634 
0635 static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
0636 {
0637     struct qla_hw_data *ha = vha->hw;
0638 
0639     if (IS_FWI2_CAPABLE(ha))
0640         return loop_id > NPH_LAST_HANDLE;
0641 
0642     return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
0643         loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST;
0644 }
0645 
0646 /**
0647  * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID
0648  * @vha: adapter state pointer.
0649  * @dev: port structure pointer.
0650  *
0651  * Returns:
0652  *  qla2x00 local function return status code.
0653  *
0654  * Context:
0655  *  Kernel context.
0656  */
0657 static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
0658 {
0659     int rval;
0660     struct qla_hw_data *ha = vha->hw;
0661     unsigned long flags = 0;
0662 
0663     rval = QLA_SUCCESS;
0664 
0665     spin_lock_irqsave(&ha->vport_slock, flags);
0666 
0667     dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE);
0668     if (dev->loop_id >= LOOPID_MAP_SIZE ||
0669         qla2x00_is_reserved_id(vha, dev->loop_id)) {
0670         dev->loop_id = FC_NO_LOOP_ID;
0671         rval = QLA_FUNCTION_FAILED;
0672     } else {
0673         set_bit(dev->loop_id, ha->loop_id_map);
0674     }
0675     spin_unlock_irqrestore(&ha->vport_slock, flags);
0676 
0677     if (rval == QLA_SUCCESS)
0678         ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
0679                "Assigning new loopid=%x, portid=%x.\n",
0680                dev->loop_id, dev->d_id.b24);
0681     else
0682         ql_log(ql_log_warn, dev->vha, 0x2087,
0683                "No loop_id's available, portid=%x.\n",
0684                dev->d_id.b24);
0685 
0686     return rval;
0687 }
0688 
0689 void qla2x00_clear_loop_id(fc_port_t *fcport)
0690 {
0691     struct qla_hw_data *ha = fcport->vha->hw;
0692 
0693     if (fcport->loop_id == FC_NO_LOOP_ID ||
0694         qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
0695         return;
0696 
0697     clear_bit(fcport->loop_id, ha->loop_id_map);
0698     fcport->loop_id = FC_NO_LOOP_ID;
0699 }
0700 
0701 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
0702     struct event_arg *ea)
0703 {
0704     fc_port_t *fcport, *conflict_fcport;
0705     struct get_name_list_extended *e;
0706     u16 i, n, found = 0, loop_id;
0707     port_id_t id;
0708     u64 wwn;
0709     u16 data[2];
0710     u8 current_login_state, nvme_cls;
0711 
0712     fcport = ea->fcport;
0713     ql_dbg(ql_dbg_disc, vha, 0xffff,
0714         "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n",
0715         __func__, fcport->port_name, fcport->disc_state,
0716         fcport->fw_login_state, ea->rc,
0717         fcport->login_gen, fcport->last_login_gen,
0718         fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable);
0719 
0720     if (fcport->disc_state == DSC_DELETE_PEND)
0721         return;
0722 
0723     if (ea->rc) { /* rval */
0724         if (fcport->login_retry == 0) {
0725             ql_dbg(ql_dbg_disc, vha, 0x20de,
0726                 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
0727                 fcport->port_name, fcport->login_retry);
0728         }
0729         return;
0730     }
0731 
0732     if (fcport->last_rscn_gen != fcport->rscn_gen) {
0733         qla_rscn_replay(fcport);
0734         qlt_schedule_sess_for_deletion(fcport);
0735         return;
0736     } else if (fcport->last_login_gen != fcport->login_gen) {
0737         ql_dbg(ql_dbg_disc, vha, 0x20e0,
0738             "%s %8phC login gen changed\n",
0739             __func__, fcport->port_name);
0740         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
0741         return;
0742     }
0743 
0744     n = ea->data[0] / sizeof(struct get_name_list_extended);
0745 
0746     ql_dbg(ql_dbg_disc, vha, 0x20e1,
0747         "%s %d %8phC n %d %02x%02x%02x lid %d \n",
0748         __func__, __LINE__, fcport->port_name, n,
0749         fcport->d_id.b.domain, fcport->d_id.b.area,
0750         fcport->d_id.b.al_pa, fcport->loop_id);
0751 
0752     for (i = 0; i < n; i++) {
0753         e = &vha->gnl.l[i];
0754         wwn = wwn_to_u64(e->port_name);
0755         id.b.domain = e->port_id[2];
0756         id.b.area = e->port_id[1];
0757         id.b.al_pa = e->port_id[0];
0758         id.b.rsvd_1 = 0;
0759 
0760         if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
0761             continue;
0762 
0763         if (IS_SW_RESV_ADDR(id))
0764             continue;
0765 
0766         found = 1;
0767 
0768         loop_id = le16_to_cpu(e->nport_handle);
0769         loop_id = (loop_id & 0x7fff);
0770         nvme_cls = e->current_login_state >> 4;
0771         current_login_state = e->current_login_state & 0xf;
0772 
0773         if (PRLI_PHASE(nvme_cls)) {
0774             current_login_state = nvme_cls;
0775             fcport->fc4_type &= ~FS_FC4TYPE_FCP;
0776             fcport->fc4_type |= FS_FC4TYPE_NVME;
0777         } else if (PRLI_PHASE(current_login_state)) {
0778             fcport->fc4_type |= FS_FC4TYPE_FCP;
0779             fcport->fc4_type &= ~FS_FC4TYPE_NVME;
0780         }
0781 
0782         ql_dbg(ql_dbg_disc, vha, 0x20e2,
0783             "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n",
0784             __func__, fcport->port_name,
0785             e->current_login_state, fcport->fw_login_state,
0786             fcport->fc4_type, id.b24, fcport->d_id.b24,
0787             loop_id, fcport->loop_id);
0788 
0789         switch (fcport->disc_state) {
0790         case DSC_DELETE_PEND:
0791         case DSC_DELETED:
0792             break;
0793         default:
0794             if ((id.b24 != fcport->d_id.b24 &&
0795                 fcport->d_id.b24 &&
0796                 fcport->loop_id != FC_NO_LOOP_ID) ||
0797                 (fcport->loop_id != FC_NO_LOOP_ID &&
0798                 fcport->loop_id != loop_id)) {
0799                 ql_dbg(ql_dbg_disc, vha, 0x20e3,
0800                     "%s %d %8phC post del sess\n",
0801                     __func__, __LINE__, fcport->port_name);
0802                 if (fcport->n2n_flag)
0803                     fcport->d_id.b24 = 0;
0804                 qlt_schedule_sess_for_deletion(fcport);
0805                 return;
0806             }
0807             break;
0808         }
0809 
0810         fcport->loop_id = loop_id;
0811         if (fcport->n2n_flag)
0812             fcport->d_id.b24 = id.b24;
0813 
0814         wwn = wwn_to_u64(fcport->port_name);
0815         qlt_find_sess_invalidate_other(vha, wwn,
0816             id, loop_id, &conflict_fcport);
0817 
0818         if (conflict_fcport) {
0819             /*
0820              * Another share fcport share the same loop_id &
0821              * nport id. Conflict fcport needs to finish
0822              * cleanup before this fcport can proceed to login.
0823              */
0824             conflict_fcport->conflict = fcport;
0825             fcport->login_pause = 1;
0826         }
0827 
0828         switch (vha->hw->current_topology) {
0829         default:
0830             switch (current_login_state) {
0831             case DSC_LS_PRLI_COMP:
0832                 ql_dbg(ql_dbg_disc,
0833                     vha, 0x20e4, "%s %d %8phC post gpdb\n",
0834                     __func__, __LINE__, fcport->port_name);
0835 
0836                 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
0837                     fcport->port_type = FCT_INITIATOR;
0838                 else
0839                     fcport->port_type = FCT_TARGET;
0840                 data[0] = data[1] = 0;
0841                 qla2x00_post_async_adisc_work(vha, fcport,
0842                     data);
0843                 break;
0844             case DSC_LS_PLOGI_COMP:
0845                 if (vha->hw->flags.edif_enabled) {
0846                     /* check to see if App support Secure */
0847                     qla24xx_post_gpdb_work(vha, fcport, 0);
0848                     break;
0849                 }
0850                 fallthrough;
0851             case DSC_LS_PORT_UNAVAIL:
0852             default:
0853                 if (fcport->loop_id == FC_NO_LOOP_ID) {
0854                     qla2x00_find_new_loop_id(vha, fcport);
0855                     fcport->fw_login_state =
0856                         DSC_LS_PORT_UNAVAIL;
0857                 }
0858                 ql_dbg(ql_dbg_disc, vha, 0x20e5,
0859                     "%s %d %8phC\n", __func__, __LINE__,
0860                     fcport->port_name);
0861                 qla24xx_fcport_handle_login(vha, fcport);
0862                 break;
0863             }
0864             break;
0865         case ISP_CFG_N:
0866             fcport->fw_login_state = current_login_state;
0867             fcport->d_id = id;
0868             switch (current_login_state) {
0869             case DSC_LS_PRLI_PEND:
0870                 /*
0871                  * In the middle of PRLI. Let it finish.
0872                  * Allow relogin code to recheck state again
0873                  * with GNL. Push disc_state back to DELETED
0874                  * so GNL can go out again
0875                  */
0876                 qla2x00_set_fcport_disc_state(fcport,
0877                     DSC_DELETED);
0878                 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
0879                 break;
0880             case DSC_LS_PRLI_COMP:
0881                 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
0882                     fcport->port_type = FCT_INITIATOR;
0883                 else
0884                     fcport->port_type = FCT_TARGET;
0885 
0886                 data[0] = data[1] = 0;
0887                 qla2x00_post_async_adisc_work(vha, fcport,
0888                     data);
0889                 break;
0890             case DSC_LS_PLOGI_COMP:
0891                 if (vha->hw->flags.edif_enabled &&
0892                     DBELL_ACTIVE(vha)) {
0893                     /* check to see if App support secure or not */
0894                     qla24xx_post_gpdb_work(vha, fcport, 0);
0895                     break;
0896                 }
0897                 if (fcport_is_bigger(fcport)) {
0898                     /* local adapter is smaller */
0899                     if (fcport->loop_id != FC_NO_LOOP_ID)
0900                         qla2x00_clear_loop_id(fcport);
0901 
0902                     fcport->loop_id = loop_id;
0903                     qla24xx_fcport_handle_login(vha,
0904                         fcport);
0905                     break;
0906                 }
0907                 fallthrough;
0908             default:
0909                 if (fcport_is_smaller(fcport)) {
0910                     /* local adapter is bigger */
0911                     if (fcport->loop_id != FC_NO_LOOP_ID)
0912                         qla2x00_clear_loop_id(fcport);
0913 
0914                     fcport->loop_id = loop_id;
0915                     qla24xx_fcport_handle_login(vha,
0916                         fcport);
0917                 }
0918                 break;
0919             }
0920             break;
0921         } /* switch (ha->current_topology) */
0922     }
0923 
0924     if (!found) {
0925         switch (vha->hw->current_topology) {
0926         case ISP_CFG_F:
0927         case ISP_CFG_FL:
0928             for (i = 0; i < n; i++) {
0929                 e = &vha->gnl.l[i];
0930                 id.b.domain = e->port_id[0];
0931                 id.b.area = e->port_id[1];
0932                 id.b.al_pa = e->port_id[2];
0933                 id.b.rsvd_1 = 0;
0934                 loop_id = le16_to_cpu(e->nport_handle);
0935 
0936                 if (fcport->d_id.b24 == id.b24) {
0937                     conflict_fcport =
0938                         qla2x00_find_fcport_by_wwpn(vha,
0939                         e->port_name, 0);
0940                     if (conflict_fcport) {
0941                         ql_dbg(ql_dbg_disc + ql_dbg_verbose,
0942                             vha, 0x20e5,
0943                             "%s %d %8phC post del sess\n",
0944                             __func__, __LINE__,
0945                             conflict_fcport->port_name);
0946                         qlt_schedule_sess_for_deletion
0947                             (conflict_fcport);
0948                     }
0949                 }
0950                 /*
0951                  * FW already picked this loop id for
0952                  * another fcport
0953                  */
0954                 if (fcport->loop_id == loop_id)
0955                     fcport->loop_id = FC_NO_LOOP_ID;
0956             }
0957             qla24xx_fcport_handle_login(vha, fcport);
0958             break;
0959         case ISP_CFG_N:
0960             qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
0961             if (time_after_eq(jiffies, fcport->dm_login_expire)) {
0962                 if (fcport->n2n_link_reset_cnt < 2) {
0963                     fcport->n2n_link_reset_cnt++;
0964                     /*
0965                      * remote port is not sending PLOGI.
0966                      * Reset link to kick start his state
0967                      * machine
0968                      */
0969                     set_bit(N2N_LINK_RESET,
0970                         &vha->dpc_flags);
0971                 } else {
0972                     if (fcport->n2n_chip_reset < 1) {
0973                         ql_log(ql_log_info, vha, 0x705d,
0974                             "Chip reset to bring laser down");
0975                         set_bit(ISP_ABORT_NEEDED,
0976                             &vha->dpc_flags);
0977                         fcport->n2n_chip_reset++;
0978                     } else {
0979                         ql_log(ql_log_info, vha, 0x705d,
0980                             "Remote port %8ph is not coming back\n",
0981                             fcport->port_name);
0982                         fcport->scan_state = 0;
0983                     }
0984                 }
0985                 qla2xxx_wake_dpc(vha);
0986             } else {
0987                 /*
0988                  * report port suppose to do PLOGI. Give him
0989                  * more time. FW will catch it.
0990                  */
0991                 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
0992             }
0993             break;
0994         case ISP_CFG_NL:
0995             qla24xx_fcport_handle_login(vha, fcport);
0996             break;
0997         default:
0998             break;
0999         }
1000     }
1001 } /* gnl_event */
1002 
1003 static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
1004 {
1005     struct scsi_qla_host *vha = sp->vha;
1006     unsigned long flags;
1007     struct fc_port *fcport = NULL, *tf;
1008     u16 i, n = 0, loop_id;
1009     struct event_arg ea;
1010     struct get_name_list_extended *e;
1011     u64 wwn;
1012     struct list_head h;
1013     bool found = false;
1014 
1015     ql_dbg(ql_dbg_disc, vha, 0x20e7,
1016         "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
1017         sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
1018         sp->u.iocb_cmd.u.mbx.in_mb[2]);
1019 
1020 
1021     sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
1022     memset(&ea, 0, sizeof(ea));
1023     ea.sp = sp;
1024     ea.rc = res;
1025 
1026     if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
1027         sizeof(struct get_name_list_extended)) {
1028         n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
1029             sizeof(struct get_name_list_extended);
1030         ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
1031     }
1032 
1033     for (i = 0; i < n; i++) {
1034         e = &vha->gnl.l[i];
1035         loop_id = le16_to_cpu(e->nport_handle);
1036         /* mask out reserve bit */
1037         loop_id = (loop_id & 0x7fff);
1038         set_bit(loop_id, vha->hw->loop_id_map);
1039         wwn = wwn_to_u64(e->port_name);
1040 
1041         ql_dbg(ql_dbg_disc, vha, 0x20e8,
1042             "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
1043             __func__, &wwn, e->port_id[2], e->port_id[1],
1044             e->port_id[0], e->current_login_state, e->last_login_state,
1045             (loop_id & 0x7fff));
1046     }
1047 
1048     spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1049 
1050     INIT_LIST_HEAD(&h);
1051     fcport = tf = NULL;
1052     if (!list_empty(&vha->gnl.fcports))
1053         list_splice_init(&vha->gnl.fcports, &h);
1054     spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1055 
1056     list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
1057         spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1058         list_del_init(&fcport->gnl_entry);
1059         fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1060         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1061         ea.fcport = fcport;
1062 
1063         qla24xx_handle_gnl_done_event(vha, &ea);
1064     }
1065 
1066     /* create new fcport if fw has knowledge of new sessions */
1067     for (i = 0; i < n; i++) {
1068         port_id_t id;
1069         u64 wwnn;
1070 
1071         e = &vha->gnl.l[i];
1072         wwn = wwn_to_u64(e->port_name);
1073 
1074         found = false;
1075         list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
1076             if (!memcmp((u8 *)&wwn, fcport->port_name,
1077                 WWN_SIZE)) {
1078                 found = true;
1079                 break;
1080             }
1081         }
1082 
1083         id.b.domain = e->port_id[2];
1084         id.b.area = e->port_id[1];
1085         id.b.al_pa = e->port_id[0];
1086         id.b.rsvd_1 = 0;
1087 
1088         if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
1089             ql_dbg(ql_dbg_disc, vha, 0x2065,
1090                 "%s %d %8phC %06x post new sess\n",
1091                 __func__, __LINE__, (u8 *)&wwn, id.b24);
1092             wwnn = wwn_to_u64(e->node_name);
1093             qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
1094                 (u8 *)&wwnn, NULL, 0);
1095         }
1096     }
1097 
1098     spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1099     vha->gnl.sent = 0;
1100     if (!list_empty(&vha->gnl.fcports)) {
1101         /* retrigger gnl */
1102         list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports,
1103             gnl_entry) {
1104             list_del_init(&fcport->gnl_entry);
1105             fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1106             if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS)
1107                 break;
1108         }
1109     }
1110     spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1111 
1112     /* ref: INIT */
1113     kref_put(&sp->cmd_kref, qla2x00_sp_release);
1114 }
1115 
1116 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
1117 {
1118     srb_t *sp;
1119     int rval = QLA_FUNCTION_FAILED;
1120     unsigned long flags;
1121     u16 *mb;
1122 
1123     if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
1124         return rval;
1125 
1126     ql_dbg(ql_dbg_disc, vha, 0x20d9,
1127         "Async-gnlist WWPN %8phC \n", fcport->port_name);
1128 
1129     spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1130     fcport->flags |= FCF_ASYNC_SENT;
1131     qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
1132     fcport->last_rscn_gen = fcport->rscn_gen;
1133     fcport->last_login_gen = fcport->login_gen;
1134 
1135     list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
1136     if (vha->gnl.sent) {
1137         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1138         return QLA_SUCCESS;
1139     }
1140     vha->gnl.sent = 1;
1141     spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1142 
1143     /* ref: INIT */
1144     sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1145     if (!sp)
1146         goto done;
1147 
1148     sp->type = SRB_MB_IOCB;
1149     sp->name = "gnlist";
1150     sp->gen1 = fcport->rscn_gen;
1151     sp->gen2 = fcport->login_gen;
1152     qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1153                   qla24xx_async_gnl_sp_done);
1154 
1155     mb = sp->u.iocb_cmd.u.mbx.out_mb;
1156     mb[0] = MBC_PORT_NODE_NAME_LIST;
1157     mb[1] = BIT_2 | BIT_3;
1158     mb[2] = MSW(vha->gnl.ldma);
1159     mb[3] = LSW(vha->gnl.ldma);
1160     mb[6] = MSW(MSD(vha->gnl.ldma));
1161     mb[7] = LSW(MSD(vha->gnl.ldma));
1162     mb[8] = vha->gnl.size;
1163     mb[9] = vha->vp_idx;
1164 
1165     ql_dbg(ql_dbg_disc, vha, 0x20da,
1166         "Async-%s - OUT WWPN %8phC hndl %x\n",
1167         sp->name, fcport->port_name, sp->handle);
1168 
1169     rval = qla2x00_start_sp(sp);
1170     if (rval != QLA_SUCCESS)
1171         goto done_free_sp;
1172 
1173     return rval;
1174 
1175 done_free_sp:
1176     /* ref: INIT */
1177     kref_put(&sp->cmd_kref, qla2x00_sp_release);
1178 done:
1179     fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
1180     return rval;
1181 }
1182 
1183 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1184 {
1185     struct qla_work_evt *e;
1186 
1187     e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
1188     if (!e)
1189         return QLA_FUNCTION_FAILED;
1190 
1191     e->u.fcport.fcport = fcport;
1192     fcport->flags |= FCF_ASYNC_ACTIVE;
1193     return qla2x00_post_work(vha, e);
1194 }
1195 
1196 static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
1197 {
1198     struct scsi_qla_host *vha = sp->vha;
1199     struct qla_hw_data *ha = vha->hw;
1200     fc_port_t *fcport = sp->fcport;
1201     u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
1202     struct event_arg ea;
1203 
1204     ql_dbg(ql_dbg_disc, vha, 0x20db,
1205         "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
1206         sp->name, res, fcport->port_name, mb[1], mb[2]);
1207 
1208     fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1209 
1210     if (res == QLA_FUNCTION_TIMEOUT)
1211         goto done;
1212 
1213     memset(&ea, 0, sizeof(ea));
1214     ea.fcport = fcport;
1215     ea.sp = sp;
1216 
1217     qla24xx_handle_gpdb_event(vha, &ea);
1218 
1219 done:
1220     dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
1221         sp->u.iocb_cmd.u.mbx.in_dma);
1222 
1223     kref_put(&sp->cmd_kref, qla2x00_sp_release);
1224 }
1225 
1226 int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1227 {
1228     struct qla_work_evt *e;
1229 
1230     if (vha->host->active_mode == MODE_TARGET)
1231         return QLA_FUNCTION_FAILED;
1232 
1233     e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
1234     if (!e)
1235         return QLA_FUNCTION_FAILED;
1236 
1237     e->u.fcport.fcport = fcport;
1238 
1239     return qla2x00_post_work(vha, e);
1240 }
1241 
1242 static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
1243 {
1244     struct scsi_qla_host *vha = sp->vha;
1245     struct srb_iocb *lio = &sp->u.iocb_cmd;
1246     struct event_arg ea;
1247 
1248     ql_dbg(ql_dbg_disc, vha, 0x2129,
1249         "%s %8phC res %x\n", __func__,
1250         sp->fcport->port_name, res);
1251 
1252     sp->fcport->flags &= ~FCF_ASYNC_SENT;
1253 
1254     if (!test_bit(UNLOADING, &vha->dpc_flags)) {
1255         memset(&ea, 0, sizeof(ea));
1256         ea.fcport = sp->fcport;
1257         ea.data[0] = lio->u.logio.data[0];
1258         ea.data[1] = lio->u.logio.data[1];
1259         ea.iop[0] = lio->u.logio.iop[0];
1260         ea.iop[1] = lio->u.logio.iop[1];
1261         ea.sp = sp;
1262         if (res == QLA_OS_TIMER_EXPIRED)
1263             ea.data[0] = QLA_OS_TIMER_EXPIRED;
1264         else if (res)
1265             ea.data[0] = MBS_COMMAND_ERROR;
1266 
1267         qla24xx_handle_prli_done_event(vha, &ea);
1268     }
1269 
1270     kref_put(&sp->cmd_kref, qla2x00_sp_release);
1271 }
1272 
1273 int
1274 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
1275 {
1276     srb_t *sp;
1277     struct srb_iocb *lio;
1278     int rval = QLA_FUNCTION_FAILED;
1279 
1280     if (!vha->flags.online) {
1281         ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1282             __func__, __LINE__, fcport->port_name);
1283         return rval;
1284     }
1285 
1286     if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
1287         fcport->fw_login_state == DSC_LS_PRLI_PEND) &&
1288         qla_dual_mode_enabled(vha)) {
1289         ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n",
1290             __func__, __LINE__, fcport->port_name);
1291         return rval;
1292     }
1293 
1294     sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1295     if (!sp)
1296         return rval;
1297 
1298     fcport->flags |= FCF_ASYNC_SENT;
1299     fcport->logout_completed = 0;
1300 
1301     sp->type = SRB_PRLI_CMD;
1302     sp->name = "prli";
1303     qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1304                   qla2x00_async_prli_sp_done);
1305 
1306     lio = &sp->u.iocb_cmd;
1307     lio->u.logio.flags = 0;
1308 
1309     if (NVME_TARGET(vha->hw, fcport))
1310         lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
1311 
1312     ql_dbg(ql_dbg_disc, vha, 0x211b,
1313         "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d fc4type %x priority %x %s.\n",
1314         fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
1315         fcport->login_retry, fcport->fc4_type, vha->hw->fc4_type_priority,
1316         NVME_TARGET(vha->hw, fcport) ? "nvme" : "fcp");
1317 
1318     rval = qla2x00_start_sp(sp);
1319     if (rval != QLA_SUCCESS) {
1320         fcport->flags |= FCF_LOGIN_NEEDED;
1321         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1322         goto done_free_sp;
1323     }
1324 
1325     return rval;
1326 
1327 done_free_sp:
1328     /* ref: INIT */
1329     kref_put(&sp->cmd_kref, qla2x00_sp_release);
1330     fcport->flags &= ~FCF_ASYNC_SENT;
1331     return rval;
1332 }
1333 
1334 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1335 {
1336     struct qla_work_evt *e;
1337 
1338     e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
1339     if (!e)
1340         return QLA_FUNCTION_FAILED;
1341 
1342     e->u.fcport.fcport = fcport;
1343     e->u.fcport.opt = opt;
1344     fcport->flags |= FCF_ASYNC_ACTIVE;
1345     return qla2x00_post_work(vha, e);
1346 }
1347 
1348 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1349 {
1350     srb_t *sp;
1351     struct srb_iocb *mbx;
1352     int rval = QLA_FUNCTION_FAILED;
1353     u16 *mb;
1354     dma_addr_t pd_dma;
1355     struct port_database_24xx *pd;
1356     struct qla_hw_data *ha = vha->hw;
1357 
1358     if (IS_SESSION_DELETED(fcport)) {
1359         ql_log(ql_log_warn, vha, 0xffff,
1360                "%s: %8phC is being delete - not sending command.\n",
1361                __func__, fcport->port_name);
1362         fcport->flags &= ~FCF_ASYNC_ACTIVE;
1363         return rval;
1364     }
1365 
1366     if (!vha->flags.online || fcport->flags & FCF_ASYNC_SENT) {
1367         ql_log(ql_log_warn, vha, 0xffff,
1368             "%s: %8phC online %d flags %x - not sending command.\n",
1369             __func__, fcport->port_name, vha->flags.online, fcport->flags);
1370         goto done;
1371     }
1372 
1373     sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1374     if (!sp)
1375         goto done;
1376 
1377     qla2x00_set_fcport_disc_state(fcport, DSC_GPDB);
1378 
1379     fcport->flags |= FCF_ASYNC_SENT;
1380     sp->type = SRB_MB_IOCB;
1381     sp->name = "gpdb";
1382     sp->gen1 = fcport->rscn_gen;
1383     sp->gen2 = fcport->login_gen;
1384     qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
1385                   qla24xx_async_gpdb_sp_done);
1386 
1387     pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1388     if (pd == NULL) {
1389         ql_log(ql_log_warn, vha, 0xd043,
1390             "Failed to allocate port database structure.\n");
1391         goto done_free_sp;
1392     }
1393 
1394     mb = sp->u.iocb_cmd.u.mbx.out_mb;
1395     mb[0] = MBC_GET_PORT_DATABASE;
1396     mb[1] = fcport->loop_id;
1397     mb[2] = MSW(pd_dma);
1398     mb[3] = LSW(pd_dma);
1399     mb[6] = MSW(MSD(pd_dma));
1400     mb[7] = LSW(MSD(pd_dma));
1401     mb[9] = vha->vp_idx;
1402     mb[10] = opt;
1403 
1404     mbx = &sp->u.iocb_cmd;
1405     mbx->u.mbx.in = (void *)pd;
1406     mbx->u.mbx.in_dma = pd_dma;
1407 
1408     ql_dbg(ql_dbg_disc, vha, 0x20dc,
1409         "Async-%s %8phC hndl %x opt %x\n",
1410         sp->name, fcport->port_name, sp->handle, opt);
1411 
1412     rval = qla2x00_start_sp(sp);
1413     if (rval != QLA_SUCCESS)
1414         goto done_free_sp;
1415     return rval;
1416 
1417 done_free_sp:
1418     if (pd)
1419         dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1420 
1421     kref_put(&sp->cmd_kref, qla2x00_sp_release);
1422     fcport->flags &= ~FCF_ASYNC_SENT;
1423 done:
1424     fcport->flags &= ~FCF_ASYNC_ACTIVE;
1425     qla24xx_post_gpdb_work(vha, fcport, opt);
1426     return rval;
1427 }
1428 
1429 static
1430 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1431 {
1432     unsigned long flags;
1433 
1434     spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1435     ea->fcport->login_gen++;
1436     ea->fcport->deleted = 0;
1437     ea->fcport->logout_on_delete = 1;
1438 
1439     if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
1440         vha->fcport_count++;
1441         ea->fcport->login_succ = 1;
1442 
1443         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1444         qla24xx_sched_upd_fcport(ea->fcport);
1445         spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1446     } else if (ea->fcport->login_succ) {
1447         /*
1448          * We have an existing session. A late RSCN delivery
1449          * must have triggered the session to be re-validate.
1450          * Session is still valid.
1451          */
1452         ql_dbg(ql_dbg_disc, vha, 0x20d6,
1453             "%s %d %8phC session revalidate success\n",
1454             __func__, __LINE__, ea->fcport->port_name);
1455         qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE);
1456     }
1457     spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1458 }
1459 
1460 static int  qla_chk_secure_login(scsi_qla_host_t    *vha, fc_port_t *fcport,
1461     struct port_database_24xx *pd)
1462 {
1463     int rc = 0;
1464 
1465     if (pd->secure_login) {
1466         ql_dbg(ql_dbg_disc, vha, 0x104d,
1467             "Secure Login established on %8phC\n",
1468             fcport->port_name);
1469         fcport->flags |= FCF_FCSP_DEVICE;
1470     } else {
1471         ql_dbg(ql_dbg_disc, vha, 0x104d,
1472             "non-Secure Login %8phC",
1473             fcport->port_name);
1474         fcport->flags &= ~FCF_FCSP_DEVICE;
1475     }
1476     if (vha->hw->flags.edif_enabled) {
1477         if (fcport->flags & FCF_FCSP_DEVICE) {
1478             qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND);
1479             /* Start edif prli timer & ring doorbell for app */
1480             fcport->edif.rx_sa_set = 0;
1481             fcport->edif.tx_sa_set = 0;
1482             fcport->edif.rx_sa_pending = 0;
1483             fcport->edif.tx_sa_pending = 0;
1484 
1485             qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
1486                 fcport->d_id.b24);
1487 
1488             if (DBELL_ACTIVE(vha)) {
1489                 ql_dbg(ql_dbg_disc, vha, 0x20ef,
1490                     "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n",
1491                     __func__, __LINE__, fcport->port_name);
1492                 fcport->edif.app_sess_online = 1;
1493 
1494                 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED,
1495                     fcport->d_id.b24, 0, fcport);
1496             }
1497 
1498             rc = 1;
1499         } else if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
1500             ql_dbg(ql_dbg_disc, vha, 0x2117,
1501                 "%s %d %8phC post prli\n",
1502                 __func__, __LINE__, fcport->port_name);
1503             qla24xx_post_prli_work(vha, fcport);
1504             rc = 1;
1505         }
1506     }
1507     return rc;
1508 }
1509 
1510 static
1511 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1512 {
1513     fc_port_t *fcport = ea->fcport;
1514     struct port_database_24xx *pd;
1515     struct srb *sp = ea->sp;
1516     uint8_t ls;
1517 
1518     pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1519 
1520     fcport->flags &= ~FCF_ASYNC_SENT;
1521 
1522     ql_dbg(ql_dbg_disc, vha, 0x20d2,
1523         "%s %8phC DS %d LS %x fc4_type %x rc %x\n", __func__,
1524         fcport->port_name, fcport->disc_state, pd->current_login_state,
1525         fcport->fc4_type, ea->rc);
1526 
1527     if (fcport->disc_state == DSC_DELETE_PEND) {
1528         ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC\n",
1529                __func__, __LINE__, fcport->port_name);
1530         return;
1531     }
1532 
1533     if (NVME_TARGET(vha->hw, fcport))
1534         ls = pd->current_login_state >> 4;
1535     else
1536         ls = pd->current_login_state & 0xf;
1537 
1538     if (ea->sp->gen2 != fcport->login_gen) {
1539         /* target side must have changed it. */
1540 
1541         ql_dbg(ql_dbg_disc, vha, 0x20d3,
1542             "%s %8phC generation changed\n",
1543             __func__, fcport->port_name);
1544         return;
1545     } else if (ea->sp->gen1 != fcport->rscn_gen) {
1546         qla_rscn_replay(fcport);
1547         qlt_schedule_sess_for_deletion(fcport);
1548         ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1549                __func__, __LINE__, fcport->port_name, ls);
1550         return;
1551     }
1552 
1553     switch (ls) {
1554     case PDS_PRLI_COMPLETE:
1555         __qla24xx_parse_gpdb(vha, fcport, pd);
1556         break;
1557     case PDS_PLOGI_COMPLETE:
1558         if (qla_chk_secure_login(vha, fcport, pd)) {
1559             ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1560                    __func__, __LINE__, fcport->port_name, ls);
1561             return;
1562         }
1563         fallthrough;
1564     case PDS_PLOGI_PENDING:
1565     case PDS_PRLI_PENDING:
1566     case PDS_PRLI2_PENDING:
1567         /* Set discovery state back to GNL to Relogin attempt */
1568         if (qla_dual_mode_enabled(vha) ||
1569             qla_ini_mode_enabled(vha)) {
1570             qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
1571             set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1572         }
1573         ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n",
1574                __func__, __LINE__, fcport->port_name, ls);
1575         return;
1576     case PDS_LOGO_PENDING:
1577     case PDS_PORT_UNAVAILABLE:
1578     default:
1579         ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
1580             __func__, __LINE__, fcport->port_name);
1581         qlt_schedule_sess_for_deletion(fcport);
1582         return;
1583     }
1584     __qla24xx_handle_gpdb_event(vha, ea);
1585 } /* gpdb event */
1586 
1587 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1588 {
1589     u8 login = 0;
1590     int rc;
1591 
1592     ql_dbg(ql_dbg_disc, vha, 0x307b,
1593         "%s %8phC DS %d LS %d lid %d retries=%d\n",
1594         __func__, fcport->port_name, fcport->disc_state,
1595         fcport->fw_login_state, fcport->loop_id, fcport->login_retry);
1596 
1597     if (qla_tgt_mode_enabled(vha))
1598         return;
1599 
1600     if (qla_dual_mode_enabled(vha)) {
1601         if (N2N_TOPO(vha->hw)) {
1602             u64 mywwn, wwn;
1603 
1604             mywwn = wwn_to_u64(vha->port_name);
1605             wwn = wwn_to_u64(fcport->port_name);
1606             if (mywwn > wwn)
1607                 login = 1;
1608             else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1609                 && time_after_eq(jiffies,
1610                     fcport->plogi_nack_done_deadline))
1611                 login = 1;
1612         } else {
1613             login = 1;
1614         }
1615     } else {
1616         /* initiator mode */
1617         login = 1;
1618     }
1619 
1620     if (login && fcport->login_retry) {
1621         fcport->login_retry--;
1622         if (fcport->loop_id == FC_NO_LOOP_ID) {
1623             fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
1624             rc = qla2x00_find_new_loop_id(vha, fcport);
1625             if (rc) {
1626                 ql_dbg(ql_dbg_disc, vha, 0x20e6,
1627                     "%s %d %8phC post del sess - out of loopid\n",
1628                     __func__, __LINE__, fcport->port_name);
1629                 fcport->scan_state = 0;
1630                 qlt_schedule_sess_for_deletion(fcport);
1631                 return;
1632             }
1633         }
1634         ql_dbg(ql_dbg_disc, vha, 0x20bf,
1635             "%s %d %8phC post login\n",
1636             __func__, __LINE__, fcport->port_name);
1637         qla2x00_post_async_login_work(vha, fcport, NULL);
1638     }
1639 }
1640 
1641 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1642 {
1643     u16 data[2];
1644     u64 wwn;
1645     u16 sec;
1646 
1647     ql_dbg(ql_dbg_disc, vha, 0x20d8,
1648         "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d fc4type %x\n",
1649         __func__, fcport->port_name, fcport->disc_state,
1650         fcport->fw_login_state, fcport->login_pause, fcport->flags,
1651         fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
1652         fcport->login_gen, fcport->loop_id, fcport->scan_state,
1653         fcport->fc4_type);
1654 
1655     if (fcport->scan_state != QLA_FCPORT_FOUND ||
1656         fcport->disc_state == DSC_DELETE_PEND)
1657         return 0;
1658 
1659     if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1660         qla_dual_mode_enabled(vha) &&
1661         ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1662          (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
1663         return 0;
1664 
1665     if (fcport->fw_login_state == DSC_LS_PLOGI_COMP &&
1666         !N2N_TOPO(vha->hw)) {
1667         if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1668             set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1669             return 0;
1670         }
1671     }
1672 
1673     /* Target won't initiate port login if fabric is present */
1674     if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw))
1675         return 0;
1676 
1677     if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) {
1678         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1679         return 0;
1680     }
1681 
1682     switch (fcport->disc_state) {
1683     case DSC_DELETED:
1684         wwn = wwn_to_u64(fcport->node_name);
1685         switch (vha->hw->current_topology) {
1686         case ISP_CFG_N:
1687             if (fcport_is_smaller(fcport)) {
1688                 /* this adapter is bigger */
1689                 if (fcport->login_retry) {
1690                     if (fcport->loop_id == FC_NO_LOOP_ID) {
1691                         qla2x00_find_new_loop_id(vha,
1692                             fcport);
1693                         fcport->fw_login_state =
1694                             DSC_LS_PORT_UNAVAIL;
1695                     }
1696                     fcport->login_retry--;
1697                     qla_post_els_plogi_work(vha, fcport);
1698                 } else {
1699                     ql_log(ql_log_info, vha, 0x705d,
1700                         "Unable to reach remote port %8phC",
1701                         fcport->port_name);
1702                 }
1703             } else {
1704                 qla24xx_post_gnl_work(vha, fcport);
1705             }
1706             break;
1707         default:
1708             if (wwn == 0)    {
1709                 ql_dbg(ql_dbg_disc, vha, 0xffff,
1710                     "%s %d %8phC post GNNID\n",
1711                     __func__, __LINE__, fcport->port_name);
1712                 qla24xx_post_gnnid_work(vha, fcport);
1713             } else if (fcport->loop_id == FC_NO_LOOP_ID) {
1714                 ql_dbg(ql_dbg_disc, vha, 0x20bd,
1715                     "%s %d %8phC post gnl\n",
1716                     __func__, __LINE__, fcport->port_name);
1717                 qla24xx_post_gnl_work(vha, fcport);
1718             } else {
1719                 qla_chk_n2n_b4_login(vha, fcport);
1720             }
1721             break;
1722         }
1723         break;
1724 
1725     case DSC_GNL:
1726         switch (vha->hw->current_topology) {
1727         case ISP_CFG_N:
1728             if ((fcport->current_login_state & 0xf) == 0x6) {
1729                 ql_dbg(ql_dbg_disc, vha, 0x2118,
1730                     "%s %d %8phC post GPDB work\n",
1731                     __func__, __LINE__, fcport->port_name);
1732                 fcport->chip_reset =
1733                     vha->hw->base_qpair->chip_reset;
1734                 qla24xx_post_gpdb_work(vha, fcport, 0);
1735             }  else {
1736                 ql_dbg(ql_dbg_disc, vha, 0x2118,
1737                     "%s %d %8phC post %s PRLI\n",
1738                     __func__, __LINE__, fcport->port_name,
1739                     NVME_TARGET(vha->hw, fcport) ? "NVME" :
1740                     "FC");
1741                 qla24xx_post_prli_work(vha, fcport);
1742             }
1743             break;
1744         default:
1745             if (fcport->login_pause) {
1746                 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1747                     "%s %d %8phC exit\n",
1748                     __func__, __LINE__,
1749                     fcport->port_name);
1750                 fcport->last_rscn_gen = fcport->rscn_gen;
1751                 fcport->last_login_gen = fcport->login_gen;
1752                 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1753                 break;
1754             }
1755             qla_chk_n2n_b4_login(vha, fcport);
1756             break;
1757         }
1758         break;
1759 
1760     case DSC_LOGIN_FAILED:
1761         if (N2N_TOPO(vha->hw))
1762             qla_chk_n2n_b4_login(vha, fcport);
1763         else
1764             qlt_schedule_sess_for_deletion(fcport);
1765         break;
1766 
1767     case DSC_LOGIN_COMPLETE:
1768         /* recheck login state */
1769         data[0] = data[1] = 0;
1770         qla2x00_post_async_adisc_work(vha, fcport, data);
1771         break;
1772 
1773     case DSC_LOGIN_PEND:
1774         if (vha->hw->flags.edif_enabled)
1775             break;
1776 
1777         if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
1778             ql_dbg(ql_dbg_disc, vha, 0x2118,
1779                    "%s %d %8phC post %s PRLI\n",
1780                    __func__, __LINE__, fcport->port_name,
1781                    NVME_TARGET(vha->hw, fcport) ? "NVME" : "FC");
1782             qla24xx_post_prli_work(vha, fcport);
1783         }
1784         break;
1785 
1786     case DSC_UPD_FCPORT:
1787         sec =  jiffies_to_msecs(jiffies -
1788             fcport->jiffies_at_registration)/1000;
1789         if (fcport->sec_since_registration < sec && sec &&
1790             !(sec % 60)) {
1791             fcport->sec_since_registration = sec;
1792             ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
1793                 "%s %8phC - Slow Rport registration(%d Sec)\n",
1794                 __func__, fcport->port_name, sec);
1795         }
1796 
1797         if (fcport->next_disc_state != DSC_DELETE_PEND)
1798             fcport->next_disc_state = DSC_ADISC;
1799         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1800         break;
1801 
1802     default:
1803         break;
1804     }
1805 
1806     return 0;
1807 }
1808 
1809 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1810     u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
1811 {
1812     struct qla_work_evt *e;
1813 
1814     e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1815     if (!e)
1816         return QLA_FUNCTION_FAILED;
1817 
1818     e->u.new_sess.id = *id;
1819     e->u.new_sess.pla = pla;
1820     e->u.new_sess.fc4_type = fc4_type;
1821     memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
1822     if (node_name)
1823         memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
1824 
1825     return qla2x00_post_work(vha, e);
1826 }
1827 
1828 void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
1829 {
1830     fc_port_t *fcport;
1831     unsigned long flags;
1832 
1833     switch (ea->id.b.rsvd_1) {
1834     case RSCN_PORT_ADDR:
1835         fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1836         if (fcport) {
1837             if (fcport->flags & FCF_FCP2_DEVICE &&
1838                 atomic_read(&fcport->state) == FCS_ONLINE) {
1839                 ql_dbg(ql_dbg_disc, vha, 0x2115,
1840                        "Delaying session delete for FCP2 portid=%06x %8phC ",
1841                     fcport->d_id.b24, fcport->port_name);
1842                 return;
1843             }
1844 
1845             if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) {
1846                 /*
1847                  * On ipsec start by remote port, Target port
1848                  * may use RSCN to trigger initiator to
1849                  * relogin. If driver is already in the
1850                  * process of a relogin, then ignore the RSCN
1851                  * and allow the current relogin to continue.
1852                  * This reduces thrashing of the connection.
1853                  */
1854                 if (atomic_read(&fcport->state) == FCS_ONLINE) {
1855                     /*
1856                      * If state = online, then set scan_needed=1 to do relogin.
1857                      * Otherwise we're already in the middle of a relogin
1858                      */
1859                     fcport->scan_needed = 1;
1860                     fcport->rscn_gen++;
1861                 }
1862             } else {
1863                 fcport->scan_needed = 1;
1864                 fcport->rscn_gen++;
1865             }
1866         }
1867         break;
1868     case RSCN_AREA_ADDR:
1869         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1870             if (fcport->flags & FCF_FCP2_DEVICE &&
1871                 atomic_read(&fcport->state) == FCS_ONLINE)
1872                 continue;
1873 
1874             if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) {
1875                 fcport->scan_needed = 1;
1876                 fcport->rscn_gen++;
1877             }
1878         }
1879         break;
1880     case RSCN_DOM_ADDR:
1881         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1882             if (fcport->flags & FCF_FCP2_DEVICE &&
1883                 atomic_read(&fcport->state) == FCS_ONLINE)
1884                 continue;
1885 
1886             if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) {
1887                 fcport->scan_needed = 1;
1888                 fcport->rscn_gen++;
1889             }
1890         }
1891         break;
1892     case RSCN_FAB_ADDR:
1893     default:
1894         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1895             if (fcport->flags & FCF_FCP2_DEVICE &&
1896                 atomic_read(&fcport->state) == FCS_ONLINE)
1897                 continue;
1898 
1899             fcport->scan_needed = 1;
1900             fcport->rscn_gen++;
1901         }
1902         break;
1903     }
1904 
1905     spin_lock_irqsave(&vha->work_lock, flags);
1906     if (vha->scan.scan_flags == 0) {
1907         ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
1908         vha->scan.scan_flags |= SF_QUEUED;
1909         schedule_delayed_work(&vha->scan.scan_work, 5);
1910     }
1911     spin_unlock_irqrestore(&vha->work_lock, flags);
1912 }
1913 
1914 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1915     struct event_arg *ea)
1916 {
1917     fc_port_t *fcport = ea->fcport;
1918 
1919     if (test_bit(UNLOADING, &vha->dpc_flags))
1920         return;
1921 
1922     ql_dbg(ql_dbg_disc, vha, 0x2102,
1923         "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1924         __func__, fcport->port_name, fcport->disc_state,
1925         fcport->fw_login_state, fcport->login_pause,
1926         fcport->deleted, fcport->conflict,
1927         fcport->last_rscn_gen, fcport->rscn_gen,
1928         fcport->last_login_gen, fcport->login_gen,
1929         fcport->flags);
1930 
1931     if (fcport->last_rscn_gen != fcport->rscn_gen) {
1932         ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n",
1933             __func__, __LINE__, fcport->port_name);
1934         qla24xx_post_gnl_work(vha, fcport);
1935         return;
1936     }
1937 
1938     qla24xx_fcport_handle_login(vha, fcport);
1939 }
1940 
1941 void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
1942                       struct event_arg *ea)
1943 {
1944     if (N2N_TOPO(vha->hw) && fcport_is_smaller(ea->fcport) &&
1945         vha->hw->flags.edif_enabled) {
1946         /* check to see if App support Secure */
1947         qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1948         return;
1949     }
1950 
1951     /* for pure Target Mode, PRLI will not be initiated */
1952     if (vha->host->active_mode == MODE_TARGET)
1953         return;
1954 
1955     ql_dbg(ql_dbg_disc, vha, 0x2118,
1956         "%s %d %8phC post PRLI\n",
1957         __func__, __LINE__, ea->fcport->port_name);
1958     qla24xx_post_prli_work(vha, ea->fcport);
1959 }
1960 
1961 /*
1962  * RSCN(s) came in for this fcport, but the RSCN(s) was not able
1963  * to be consumed by the fcport
1964  */
1965 void qla_rscn_replay(fc_port_t *fcport)
1966 {
1967     struct event_arg ea;
1968 
1969     switch (fcport->disc_state) {
1970     case DSC_DELETE_PEND:
1971         return;
1972     default:
1973         break;
1974     }
1975 
1976     if (fcport->scan_needed) {
1977         memset(&ea, 0, sizeof(ea));
1978         ea.id = fcport->d_id;
1979         ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
1980         qla2x00_handle_rscn(fcport->vha, &ea);
1981     }
1982 }
1983 
1984 static void
1985 qla2x00_tmf_iocb_timeout(void *data)
1986 {
1987     srb_t *sp = data;
1988     struct srb_iocb *tmf = &sp->u.iocb_cmd;
1989     int rc, h;
1990     unsigned long flags;
1991 
1992     rc = qla24xx_async_abort_cmd(sp, false);
1993     if (rc) {
1994         spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
1995         for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
1996             if (sp->qpair->req->outstanding_cmds[h] == sp) {
1997                 sp->qpair->req->outstanding_cmds[h] = NULL;
1998                 break;
1999             }
2000         }
2001         spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2002         tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT);
2003         tmf->u.tmf.data = QLA_FUNCTION_FAILED;
2004         complete(&tmf->u.tmf.comp);
2005     }
2006 }
2007 
2008 static void qla2x00_tmf_sp_done(srb_t *sp, int res)
2009 {
2010     struct srb_iocb *tmf = &sp->u.iocb_cmd;
2011 
2012     complete(&tmf->u.tmf.comp);
2013 }
2014 
2015 int
2016 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
2017     uint32_t tag)
2018 {
2019     struct scsi_qla_host *vha = fcport->vha;
2020     struct srb_iocb *tm_iocb;
2021     srb_t *sp;
2022     int rval = QLA_FUNCTION_FAILED;
2023     uint8_t bail;
2024 
2025     /* ref: INIT */
2026     sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2027     if (!sp)
2028         goto done;
2029 
2030     QLA_VHA_MARK_BUSY(vha, bail);
2031     sp->type = SRB_TM_CMD;
2032     sp->name = "tmf";
2033     qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha),
2034                   qla2x00_tmf_sp_done);
2035     sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout;
2036 
2037     tm_iocb = &sp->u.iocb_cmd;
2038     init_completion(&tm_iocb->u.tmf.comp);
2039     tm_iocb->u.tmf.flags = flags;
2040     tm_iocb->u.tmf.lun = lun;
2041 
2042     ql_dbg(ql_dbg_taskm, vha, 0x802f,
2043         "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
2044         sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2045         fcport->d_id.b.area, fcport->d_id.b.al_pa);
2046 
2047     rval = qla2x00_start_sp(sp);
2048     if (rval != QLA_SUCCESS)
2049         goto done_free_sp;
2050     wait_for_completion(&tm_iocb->u.tmf.comp);
2051 
2052     rval = tm_iocb->u.tmf.data;
2053 
2054     if (rval != QLA_SUCCESS) {
2055         ql_log(ql_log_warn, vha, 0x8030,
2056             "TM IOCB failed (%x).\n", rval);
2057     }
2058 
2059     if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
2060         flags = tm_iocb->u.tmf.flags;
2061         lun = (uint16_t)tm_iocb->u.tmf.lun;
2062 
2063         /* Issue Marker IOCB */
2064         qla2x00_marker(vha, vha->hw->base_qpair,
2065             fcport->loop_id, lun,
2066             flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
2067     }
2068 
2069 done_free_sp:
2070     /* ref: INIT */
2071     kref_put(&sp->cmd_kref, qla2x00_sp_release);
2072     fcport->flags &= ~FCF_ASYNC_SENT;
2073 done:
2074     return rval;
2075 }
2076 
2077 int
2078 qla24xx_async_abort_command(srb_t *sp)
2079 {
2080     unsigned long   flags = 0;
2081 
2082     uint32_t    handle;
2083     fc_port_t   *fcport = sp->fcport;
2084     struct qla_qpair *qpair = sp->qpair;
2085     struct scsi_qla_host *vha = fcport->vha;
2086     struct req_que *req = qpair->req;
2087 
2088     spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2089     for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
2090         if (req->outstanding_cmds[handle] == sp)
2091             break;
2092     }
2093     spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2094 
2095     if (handle == req->num_outstanding_cmds) {
2096         /* Command not found. */
2097         return QLA_ERR_NOT_FOUND;
2098     }
2099     if (sp->type == SRB_FXIOCB_DCMD)
2100         return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
2101             FXDISC_ABORT_IOCTL);
2102 
2103     return qla24xx_async_abort_cmd(sp, true);
2104 }
2105 
2106 static void
2107 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
2108 {
2109     struct srb *sp;
2110     WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
2111           ea->data[0]);
2112 
2113     switch (ea->data[0]) {
2114     case MBS_COMMAND_COMPLETE:
2115         ql_dbg(ql_dbg_disc, vha, 0x2118,
2116             "%s %d %8phC post gpdb\n",
2117             __func__, __LINE__, ea->fcport->port_name);
2118 
2119         ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2120         ea->fcport->logout_on_delete = 1;
2121         ea->fcport->nvme_prli_service_param = ea->iop[0];
2122         if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST)
2123             ea->fcport->nvme_first_burst_size =
2124                 (ea->iop[1] & 0xffff) * 512;
2125         else
2126             ea->fcport->nvme_first_burst_size = 0;
2127         qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2128         break;
2129     default:
2130         sp = ea->sp;
2131         ql_dbg(ql_dbg_disc, vha, 0x2118,
2132                "%s %d %8phC priority %s, fc4type %x prev try %s\n",
2133                __func__, __LINE__, ea->fcport->port_name,
2134                vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ?
2135                "FCP" : "NVMe", ea->fcport->fc4_type,
2136                (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) ?
2137             "NVME" : "FCP");
2138 
2139         if (NVME_FCP_TARGET(ea->fcport)) {
2140             if (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI)
2141                 ea->fcport->do_prli_nvme = 0;
2142             else
2143                 ea->fcport->do_prli_nvme = 1;
2144         } else {
2145             ea->fcport->do_prli_nvme = 0;
2146         }
2147 
2148         if (N2N_TOPO(vha->hw)) {
2149             if (ea->fcport->n2n_link_reset_cnt ==
2150                 vha->hw->login_retry_count &&
2151                 ea->fcport->flags & FCF_FCSP_DEVICE) {
2152                 /* remote authentication app just started */
2153                 ea->fcport->n2n_link_reset_cnt = 0;
2154             }
2155 
2156             if (ea->fcport->n2n_link_reset_cnt <
2157                 vha->hw->login_retry_count) {
2158                 ea->fcport->n2n_link_reset_cnt++;
2159                 vha->relogin_jif = jiffies + 2 * HZ;
2160                 /*
2161                  * PRLI failed. Reset link to kick start
2162                  * state machine
2163                  */
2164                 set_bit(N2N_LINK_RESET, &vha->dpc_flags);
2165                 qla2xxx_wake_dpc(vha);
2166             } else {
2167                 ql_log(ql_log_warn, vha, 0x2119,
2168                        "%s %d %8phC Unable to reconnect\n",
2169                        __func__, __LINE__,
2170                        ea->fcport->port_name);
2171             }
2172         } else {
2173             /*
2174              * switch connect. login failed. Take connection down
2175              * and allow relogin to retrigger
2176              */
2177             ea->fcport->flags &= ~FCF_ASYNC_SENT;
2178             ea->fcport->keep_nport_handle = 0;
2179             ea->fcport->logout_on_delete = 1;
2180             qlt_schedule_sess_for_deletion(ea->fcport);
2181         }
2182         break;
2183     }
2184 }
2185 
2186 void
2187 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
2188 {
2189     port_id_t cid;  /* conflict Nport id */
2190     u16 lid;
2191     struct fc_port *conflict_fcport;
2192     unsigned long flags;
2193     struct fc_port *fcport = ea->fcport;
2194 
2195     ql_dbg(ql_dbg_disc, vha, 0xffff,
2196         "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
2197         __func__, fcport->port_name, fcport->disc_state,
2198         fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2199         ea->sp->gen1, fcport->rscn_gen,
2200         ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
2201 
2202     if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
2203         (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
2204         ql_dbg(ql_dbg_disc, vha, 0x20ea,
2205             "%s %d %8phC Remote is trying to login\n",
2206             __func__, __LINE__, fcport->port_name);
2207         return;
2208     }
2209 
2210     if ((fcport->disc_state == DSC_DELETE_PEND) ||
2211         (fcport->disc_state == DSC_DELETED)) {
2212         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2213         return;
2214     }
2215 
2216     if (ea->sp->gen2 != fcport->login_gen) {
2217         /* target side must have changed it. */
2218         ql_dbg(ql_dbg_disc, vha, 0x20d3,
2219             "%s %8phC generation changed\n",
2220             __func__, fcport->port_name);
2221         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2222         return;
2223     } else if (ea->sp->gen1 != fcport->rscn_gen) {
2224         ql_dbg(ql_dbg_disc, vha, 0x20d3,
2225             "%s %8phC RSCN generation changed\n",
2226             __func__, fcport->port_name);
2227         qla_rscn_replay(fcport);
2228         qlt_schedule_sess_for_deletion(fcport);
2229         return;
2230     }
2231 
2232     WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
2233           ea->data[0]);
2234 
2235     switch (ea->data[0]) {
2236     case MBS_COMMAND_COMPLETE:
2237         /*
2238          * Driver must validate login state - If PRLI not complete,
2239          * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
2240          * requests.
2241          */
2242         if (vha->hw->flags.edif_enabled) {
2243             set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2244             spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
2245             ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2246             ea->fcport->logout_on_delete = 1;
2247             ea->fcport->send_els_logo = 0;
2248             ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
2249             spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2250 
2251             qla24xx_post_gpdb_work(vha, ea->fcport, 0);
2252         } else {
2253             if (NVME_TARGET(vha->hw, fcport)) {
2254                 ql_dbg(ql_dbg_disc, vha, 0x2117,
2255                     "%s %d %8phC post prli\n",
2256                     __func__, __LINE__, fcport->port_name);
2257                 qla24xx_post_prli_work(vha, fcport);
2258             } else {
2259                 ql_dbg(ql_dbg_disc, vha, 0x20ea,
2260                     "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n",
2261                     __func__, __LINE__, fcport->port_name,
2262                     fcport->loop_id, fcport->d_id.b24);
2263 
2264                 set_bit(fcport->loop_id, vha->hw->loop_id_map);
2265                 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
2266                 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2267                 fcport->logout_on_delete = 1;
2268                 fcport->send_els_logo = 0;
2269                 fcport->fw_login_state = DSC_LS_PRLI_COMP;
2270                 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
2271 
2272                 qla24xx_post_gpdb_work(vha, fcport, 0);
2273             }
2274         }
2275         break;
2276     case MBS_COMMAND_ERROR:
2277         ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
2278             __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
2279 
2280         qlt_schedule_sess_for_deletion(ea->fcport);
2281         break;
2282     case MBS_LOOP_ID_USED:
2283         /* data[1] = IO PARAM 1 = nport ID  */
2284         cid.b.domain = (ea->iop[1] >> 16) & 0xff;
2285         cid.b.area   = (ea->iop[1] >>  8) & 0xff;
2286         cid.b.al_pa  = ea->iop[1] & 0xff;
2287         cid.b.rsvd_1 = 0;
2288 
2289         ql_dbg(ql_dbg_disc, vha, 0x20ec,
2290             "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2291             __func__, __LINE__, ea->fcport->port_name,
2292             ea->fcport->loop_id, cid.b24);
2293 
2294         set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2295         ea->fcport->loop_id = FC_NO_LOOP_ID;
2296         qla24xx_post_gnl_work(vha, ea->fcport);
2297         break;
2298     case MBS_PORT_ID_USED:
2299         lid = ea->iop[1] & 0xffff;
2300         qlt_find_sess_invalidate_other(vha,
2301             wwn_to_u64(ea->fcport->port_name),
2302             ea->fcport->d_id, lid, &conflict_fcport);
2303 
2304         if (conflict_fcport) {
2305             /*
2306              * Another fcport share the same loop_id/nport id.
2307              * Conflict fcport needs to finish cleanup before this
2308              * fcport can proceed to login.
2309              */
2310             conflict_fcport->conflict = ea->fcport;
2311             ea->fcport->login_pause = 1;
2312 
2313             ql_dbg(ql_dbg_disc, vha, 0x20ed,
2314                 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
2315                 __func__, __LINE__, ea->fcport->port_name,
2316                 ea->fcport->d_id.b24, lid);
2317         } else {
2318             ql_dbg(ql_dbg_disc, vha, 0x20ed,
2319                 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
2320                 __func__, __LINE__, ea->fcport->port_name,
2321                 ea->fcport->d_id.b24, lid);
2322 
2323             qla2x00_clear_loop_id(ea->fcport);
2324             set_bit(lid, vha->hw->loop_id_map);
2325             ea->fcport->loop_id = lid;
2326             ea->fcport->keep_nport_handle = 0;
2327             ea->fcport->logout_on_delete = 1;
2328             qlt_schedule_sess_for_deletion(ea->fcport);
2329         }
2330         break;
2331     }
2332     return;
2333 }
2334 
2335 /****************************************************************************/
2336 /*                QLogic ISP2x00 Hardware Support Functions.                */
2337 /****************************************************************************/
2338 
2339 static int
2340 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
2341 {
2342     int rval = QLA_SUCCESS;
2343     struct qla_hw_data *ha = vha->hw;
2344     uint32_t idc_major_ver, idc_minor_ver;
2345     uint16_t config[4];
2346 
2347     qla83xx_idc_lock(vha, 0);
2348 
2349     /* SV: TODO: Assign initialization timeout from
2350      * flash-info / other param
2351      */
2352     ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
2353     ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
2354 
2355     /* Set our fcoe function presence */
2356     if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
2357         ql_dbg(ql_dbg_p3p, vha, 0xb077,
2358             "Error while setting DRV-Presence.\n");
2359         rval = QLA_FUNCTION_FAILED;
2360         goto exit;
2361     }
2362 
2363     /* Decide the reset ownership */
2364     qla83xx_reset_ownership(vha);
2365 
2366     /*
2367      * On first protocol driver load:
2368      * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
2369      * register.
2370      * Others: Check compatibility with current IDC Major version.
2371      */
2372     qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
2373     if (ha->flags.nic_core_reset_owner) {
2374         /* Set IDC Major version */
2375         idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
2376         qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
2377 
2378         /* Clearing IDC-Lock-Recovery register */
2379         qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
2380     } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
2381         /*
2382          * Clear further IDC participation if we are not compatible with
2383          * the current IDC Major Version.
2384          */
2385         ql_log(ql_log_warn, vha, 0xb07d,
2386             "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
2387             idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
2388         __qla83xx_clear_drv_presence(vha);
2389         rval = QLA_FUNCTION_FAILED;
2390         goto exit;
2391     }
2392     /* Each function sets its supported Minor version. */
2393     qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
2394     idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
2395     qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
2396 
2397     if (ha->flags.nic_core_reset_owner) {
2398         memset(config, 0, sizeof(config));
2399         if (!qla81xx_get_port_config(vha, config))
2400             qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
2401                 QLA8XXX_DEV_READY);
2402     }
2403 
2404     rval = qla83xx_idc_state_handler(vha);
2405 
2406 exit:
2407     qla83xx_idc_unlock(vha, 0);
2408 
2409     return rval;
2410 }
2411 
2412 /*
2413 * qla2x00_initialize_adapter
2414 *      Initialize board.
2415 *
2416 * Input:
2417 *      ha = adapter block pointer.
2418 *
2419 * Returns:
2420 *      0 = success
2421 */
2422 int
2423 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
2424 {
2425     int rval;
2426     struct qla_hw_data *ha = vha->hw;
2427     struct req_que *req = ha->req_q_map[0];
2428     struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2429 
2430     memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2431     memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2432 
2433     /* Clear adapter flags. */
2434     vha->flags.online = 0;
2435     ha->flags.chip_reset_done = 0;
2436     vha->flags.reset_active = 0;
2437     ha->flags.pci_channel_io_perm_failure = 0;
2438     ha->flags.eeh_busy = 0;
2439     vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2440     atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2441     atomic_set(&vha->loop_state, LOOP_DOWN);
2442     vha->device_flags = DFLG_NO_CABLE;
2443     vha->dpc_flags = 0;
2444     vha->flags.management_server_logged_in = 0;
2445     vha->marker_needed = 0;
2446     ha->isp_abort_cnt = 0;
2447     ha->beacon_blink_led = 0;
2448 
2449     set_bit(0, ha->req_qid_map);
2450     set_bit(0, ha->rsp_qid_map);
2451 
2452     ql_dbg(ql_dbg_init, vha, 0x0040,
2453         "Configuring PCI space...\n");
2454     rval = ha->isp_ops->pci_config(vha);
2455     if (rval) {
2456         ql_log(ql_log_warn, vha, 0x0044,
2457             "Unable to configure PCI space.\n");
2458         return (rval);
2459     }
2460 
2461     ha->isp_ops->reset_chip(vha);
2462 
2463     /* Check for secure flash support */
2464     if (IS_QLA28XX(ha)) {
2465         if (rd_reg_word(&reg->mailbox12) & BIT_0)
2466             ha->flags.secure_adapter = 1;
2467         ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n",
2468             (ha->flags.secure_adapter) ? "Yes" : "No");
2469     }
2470 
2471 
2472     rval = qla2xxx_get_flash_info(vha);
2473     if (rval) {
2474         ql_log(ql_log_fatal, vha, 0x004f,
2475             "Unable to validate FLASH data.\n");
2476         return rval;
2477     }
2478 
2479     if (IS_QLA8044(ha)) {
2480         qla8044_read_reset_template(vha);
2481 
2482         /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
2483          * If DONRESET_BIT0 is set, drivers should not set dev_state
2484          * to NEED_RESET. But if NEED_RESET is set, drivers should
2485          * should honor the reset. */
2486         if (ql2xdontresethba == 1)
2487             qla8044_set_idc_dontreset(vha);
2488     }
2489 
2490     ha->isp_ops->get_flash_version(vha, req->ring);
2491     ql_dbg(ql_dbg_init, vha, 0x0061,
2492         "Configure NVRAM parameters...\n");
2493 
2494     /* Let priority default to FCP, can be overridden by nvram_config */
2495     ha->fc4_type_priority = FC4_PRIORITY_FCP;
2496 
2497     ha->isp_ops->nvram_config(vha);
2498 
2499     if (ha->fc4_type_priority != FC4_PRIORITY_FCP &&
2500         ha->fc4_type_priority != FC4_PRIORITY_NVME)
2501         ha->fc4_type_priority = FC4_PRIORITY_FCP;
2502 
2503     ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n",
2504            ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe");
2505 
2506     if (ha->flags.disable_serdes) {
2507         /* Mask HBA via NVRAM settings? */
2508         ql_log(ql_log_info, vha, 0x0077,
2509             "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
2510         return QLA_FUNCTION_FAILED;
2511     }
2512 
2513     ql_dbg(ql_dbg_init, vha, 0x0078,
2514         "Verifying loaded RISC code...\n");
2515 
2516     /* If smartsan enabled then require fdmi and rdp enabled */
2517     if (ql2xsmartsan) {
2518         ql2xfdmienable = 1;
2519         ql2xrdpenable = 1;
2520     }
2521 
2522     if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
2523         rval = ha->isp_ops->chip_diag(vha);
2524         if (rval)
2525             return (rval);
2526         rval = qla2x00_setup_chip(vha);
2527         if (rval)
2528             return (rval);
2529     }
2530 
2531     if (IS_QLA84XX(ha)) {
2532         ha->cs84xx = qla84xx_get_chip(vha);
2533         if (!ha->cs84xx) {
2534             ql_log(ql_log_warn, vha, 0x00d0,
2535                 "Unable to configure ISP84XX.\n");
2536             return QLA_FUNCTION_FAILED;
2537         }
2538     }
2539 
2540     if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2541         rval = qla2x00_init_rings(vha);
2542 
2543     /* No point in continuing if firmware initialization failed. */
2544     if (rval != QLA_SUCCESS)
2545         return rval;
2546 
2547     ha->flags.chip_reset_done = 1;
2548 
2549     if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
2550         /* Issue verify 84xx FW IOCB to complete 84xx initialization */
2551         rval = qla84xx_init_chip(vha);
2552         if (rval != QLA_SUCCESS) {
2553             ql_log(ql_log_warn, vha, 0x00d4,
2554                 "Unable to initialize ISP84XX.\n");
2555             qla84xx_put_chip(vha);
2556         }
2557     }
2558 
2559     /* Load the NIC Core f/w if we are the first protocol driver. */
2560     if (IS_QLA8031(ha)) {
2561         rval = qla83xx_nic_core_fw_load(vha);
2562         if (rval)
2563             ql_log(ql_log_warn, vha, 0x0124,
2564                 "Error in initializing NIC Core f/w.\n");
2565     }
2566 
2567     if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
2568         qla24xx_read_fcp_prio_cfg(vha);
2569 
2570     if (IS_P3P_TYPE(ha))
2571         qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
2572     else
2573         qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
2574 
2575     return (rval);
2576 }
2577 
2578 /**
2579  * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
2580  * @vha: HA context
2581  *
2582  * Returns 0 on success.
2583  */
2584 int
2585 qla2100_pci_config(scsi_qla_host_t *vha)
2586 {
2587     uint16_t w;
2588     unsigned long flags;
2589     struct qla_hw_data *ha = vha->hw;
2590     struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2591 
2592     pci_set_master(ha->pdev);
2593     pci_try_set_mwi(ha->pdev);
2594 
2595     pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2596     w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2597     pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2598 
2599     pci_disable_rom(ha->pdev);
2600 
2601     /* Get PCI bus information. */
2602     spin_lock_irqsave(&ha->hardware_lock, flags);
2603     ha->pci_attr = rd_reg_word(&reg->ctrl_status);
2604     spin_unlock_irqrestore(&ha->hardware_lock, flags);
2605 
2606     return QLA_SUCCESS;
2607 }
2608 
2609 /**
2610  * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
2611  * @vha: HA context
2612  *
2613  * Returns 0 on success.
2614  */
2615 int
2616 qla2300_pci_config(scsi_qla_host_t *vha)
2617 {
2618     uint16_t    w;
2619     unsigned long   flags = 0;
2620     uint32_t    cnt;
2621     struct qla_hw_data *ha = vha->hw;
2622     struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2623 
2624     pci_set_master(ha->pdev);
2625     pci_try_set_mwi(ha->pdev);
2626 
2627     pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2628     w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2629 
2630     if (IS_QLA2322(ha) || IS_QLA6322(ha))
2631         w &= ~PCI_COMMAND_INTX_DISABLE;
2632     pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2633 
2634     /*
2635      * If this is a 2300 card and not 2312, reset the
2636      * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
2637      * the 2310 also reports itself as a 2300 so we need to get the
2638      * fb revision level -- a 6 indicates it really is a 2300 and
2639      * not a 2310.
2640      */
2641     if (IS_QLA2300(ha)) {
2642         spin_lock_irqsave(&ha->hardware_lock, flags);
2643 
2644         /* Pause RISC. */
2645         wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
2646         for (cnt = 0; cnt < 30000; cnt++) {
2647             if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) != 0)
2648                 break;
2649 
2650             udelay(10);
2651         }
2652 
2653         /* Select FPM registers. */
2654         wrt_reg_word(&reg->ctrl_status, 0x20);
2655         rd_reg_word(&reg->ctrl_status);
2656 
2657         /* Get the fb rev level */
2658         ha->fb_rev = RD_FB_CMD_REG(ha, reg);
2659 
2660         if (ha->fb_rev == FPM_2300)
2661             pci_clear_mwi(ha->pdev);
2662 
2663         /* Deselect FPM registers. */
2664         wrt_reg_word(&reg->ctrl_status, 0x0);
2665         rd_reg_word(&reg->ctrl_status);
2666 
2667         /* Release RISC module. */
2668         wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
2669         for (cnt = 0; cnt < 30000; cnt++) {
2670             if ((rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0)
2671                 break;
2672 
2673             udelay(10);
2674         }
2675 
2676         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2677     }
2678 
2679     pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2680 
2681     pci_disable_rom(ha->pdev);
2682 
2683     /* Get PCI bus information. */
2684     spin_lock_irqsave(&ha->hardware_lock, flags);
2685     ha->pci_attr = rd_reg_word(&reg->ctrl_status);
2686     spin_unlock_irqrestore(&ha->hardware_lock, flags);
2687 
2688     return QLA_SUCCESS;
2689 }
2690 
2691 /**
2692  * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
2693  * @vha: HA context
2694  *
2695  * Returns 0 on success.
2696  */
2697 int
2698 qla24xx_pci_config(scsi_qla_host_t *vha)
2699 {
2700     uint16_t w;
2701     unsigned long flags = 0;
2702     struct qla_hw_data *ha = vha->hw;
2703     struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2704 
2705     pci_set_master(ha->pdev);
2706     pci_try_set_mwi(ha->pdev);
2707 
2708     pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2709     w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2710     w &= ~PCI_COMMAND_INTX_DISABLE;
2711     pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2712 
2713     pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2714 
2715     /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
2716     if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
2717         pcix_set_mmrbc(ha->pdev, 2048);
2718 
2719     /* PCIe -- adjust Maximum Read Request Size (2048). */
2720     if (pci_is_pcie(ha->pdev))
2721         pcie_set_readrq(ha->pdev, 4096);
2722 
2723     pci_disable_rom(ha->pdev);
2724 
2725     ha->chip_revision = ha->pdev->revision;
2726 
2727     /* Get PCI bus information. */
2728     spin_lock_irqsave(&ha->hardware_lock, flags);
2729     ha->pci_attr = rd_reg_dword(&reg->ctrl_status);
2730     spin_unlock_irqrestore(&ha->hardware_lock, flags);
2731 
2732     return QLA_SUCCESS;
2733 }
2734 
2735 /**
2736  * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
2737  * @vha: HA context
2738  *
2739  * Returns 0 on success.
2740  */
2741 int
2742 qla25xx_pci_config(scsi_qla_host_t *vha)
2743 {
2744     uint16_t w;
2745     struct qla_hw_data *ha = vha->hw;
2746 
2747     pci_set_master(ha->pdev);
2748     pci_try_set_mwi(ha->pdev);
2749 
2750     pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2751     w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2752     w &= ~PCI_COMMAND_INTX_DISABLE;
2753     pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2754 
2755     /* PCIe -- adjust Maximum Read Request Size (2048). */
2756     if (pci_is_pcie(ha->pdev))
2757         pcie_set_readrq(ha->pdev, 4096);
2758 
2759     pci_disable_rom(ha->pdev);
2760 
2761     ha->chip_revision = ha->pdev->revision;
2762 
2763     return QLA_SUCCESS;
2764 }
2765 
2766 /**
2767  * qla2x00_isp_firmware() - Choose firmware image.
2768  * @vha: HA context
2769  *
2770  * Returns 0 on success.
2771  */
2772 static int
2773 qla2x00_isp_firmware(scsi_qla_host_t *vha)
2774 {
2775     int  rval;
2776     uint16_t loop_id, topo, sw_cap;
2777     uint8_t domain, area, al_pa;
2778     struct qla_hw_data *ha = vha->hw;
2779 
2780     /* Assume loading risc code */
2781     rval = QLA_FUNCTION_FAILED;
2782 
2783     if (ha->flags.disable_risc_code_load) {
2784         ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
2785 
2786         /* Verify checksum of loaded RISC code. */
2787         rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
2788         if (rval == QLA_SUCCESS) {
2789             /* And, verify we are not in ROM code. */
2790             rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2791                 &area, &domain, &topo, &sw_cap);
2792         }
2793     }
2794 
2795     if (rval)
2796         ql_dbg(ql_dbg_init, vha, 0x007a,
2797             "**** Load RISC code ****.\n");
2798 
2799     return (rval);
2800 }
2801 
2802 /**
2803  * qla2x00_reset_chip() - Reset ISP chip.
2804  * @vha: HA context
2805  *
2806  * Returns 0 on success.
2807  */
2808 int
2809 qla2x00_reset_chip(scsi_qla_host_t *vha)
2810 {
2811     unsigned long   flags = 0;
2812     struct qla_hw_data *ha = vha->hw;
2813     struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2814     uint32_t    cnt;
2815     uint16_t    cmd;
2816     int rval = QLA_FUNCTION_FAILED;
2817 
2818     if (unlikely(pci_channel_offline(ha->pdev)))
2819         return rval;
2820 
2821     ha->isp_ops->disable_intrs(ha);
2822 
2823     spin_lock_irqsave(&ha->hardware_lock, flags);
2824 
2825     /* Turn off master enable */
2826     cmd = 0;
2827     pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
2828     cmd &= ~PCI_COMMAND_MASTER;
2829     pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2830 
2831     if (!IS_QLA2100(ha)) {
2832         /* Pause RISC. */
2833         wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
2834         if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
2835             for (cnt = 0; cnt < 30000; cnt++) {
2836                 if ((rd_reg_word(&reg->hccr) &
2837                     HCCR_RISC_PAUSE) != 0)
2838                     break;
2839                 udelay(100);
2840             }
2841         } else {
2842             rd_reg_word(&reg->hccr);    /* PCI Posting. */
2843             udelay(10);
2844         }
2845 
2846         /* Select FPM registers. */
2847         wrt_reg_word(&reg->ctrl_status, 0x20);
2848         rd_reg_word(&reg->ctrl_status);     /* PCI Posting. */
2849 
2850         /* FPM Soft Reset. */
2851         wrt_reg_word(&reg->fpm_diag_config, 0x100);
2852         rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */
2853 
2854         /* Toggle Fpm Reset. */
2855         if (!IS_QLA2200(ha)) {
2856             wrt_reg_word(&reg->fpm_diag_config, 0x0);
2857             rd_reg_word(&reg->fpm_diag_config); /* PCI Posting. */
2858         }
2859 
2860         /* Select frame buffer registers. */
2861         wrt_reg_word(&reg->ctrl_status, 0x10);
2862         rd_reg_word(&reg->ctrl_status);     /* PCI Posting. */
2863 
2864         /* Reset frame buffer FIFOs. */
2865         if (IS_QLA2200(ha)) {
2866             WRT_FB_CMD_REG(ha, reg, 0xa000);
2867             RD_FB_CMD_REG(ha, reg);     /* PCI Posting. */
2868         } else {
2869             WRT_FB_CMD_REG(ha, reg, 0x00fc);
2870 
2871             /* Read back fb_cmd until zero or 3 seconds max */
2872             for (cnt = 0; cnt < 3000; cnt++) {
2873                 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
2874                     break;
2875                 udelay(100);
2876             }
2877         }
2878 
2879         /* Select RISC module registers. */
2880         wrt_reg_word(&reg->ctrl_status, 0);
2881         rd_reg_word(&reg->ctrl_status);     /* PCI Posting. */
2882 
2883         /* Reset RISC processor. */
2884         wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
2885         rd_reg_word(&reg->hccr);        /* PCI Posting. */
2886 
2887         /* Release RISC processor. */
2888         wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
2889         rd_reg_word(&reg->hccr);        /* PCI Posting. */
2890     }
2891 
2892     wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
2893     wrt_reg_word(&reg->hccr, HCCR_CLR_HOST_INT);
2894 
2895     /* Reset ISP chip. */
2896     wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
2897 
2898     /* Wait for RISC to recover from reset. */
2899     if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2900         /*
2901          * It is necessary to for a delay here since the card doesn't
2902          * respond to PCI reads during a reset. On some architectures
2903          * this will result in an MCA.
2904          */
2905         udelay(20);
2906         for (cnt = 30000; cnt; cnt--) {
2907             if ((rd_reg_word(&reg->ctrl_status) &
2908                 CSR_ISP_SOFT_RESET) == 0)
2909                 break;
2910             udelay(100);
2911         }
2912     } else
2913         udelay(10);
2914 
2915     /* Reset RISC processor. */
2916     wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
2917 
2918     wrt_reg_word(&reg->semaphore, 0);
2919 
2920     /* Release RISC processor. */
2921     wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
2922     rd_reg_word(&reg->hccr);            /* PCI Posting. */
2923 
2924     if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2925         for (cnt = 0; cnt < 30000; cnt++) {
2926             if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
2927                 break;
2928 
2929             udelay(100);
2930         }
2931     } else
2932         udelay(100);
2933 
2934     /* Turn on master enable */
2935     cmd |= PCI_COMMAND_MASTER;
2936     pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2937 
2938     /* Disable RISC pause on FPM parity error. */
2939     if (!IS_QLA2100(ha)) {
2940         wrt_reg_word(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
2941         rd_reg_word(&reg->hccr);        /* PCI Posting. */
2942     }
2943 
2944     spin_unlock_irqrestore(&ha->hardware_lock, flags);
2945 
2946     return QLA_SUCCESS;
2947 }
2948 
2949 /**
2950  * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
2951  * @vha: HA context
2952  *
2953  * Returns 0 on success.
2954  */
2955 static int
2956 qla81xx_reset_mpi(scsi_qla_host_t *vha)
2957 {
2958     uint16_t mb[4] = {0x1010, 0, 1, 0};
2959 
2960     if (!IS_QLA81XX(vha->hw))
2961         return QLA_SUCCESS;
2962 
2963     return qla81xx_write_mpi_register(vha, mb);
2964 }
2965 
2966 static int
2967 qla_chk_risc_recovery(scsi_qla_host_t *vha)
2968 {
2969     struct qla_hw_data *ha = vha->hw;
2970     struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2971     __le16 __iomem *mbptr = &reg->mailbox0;
2972     int i;
2973     u16 mb[32];
2974     int rc = QLA_SUCCESS;
2975 
2976     if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2977         return rc;
2978 
2979     /* this check is only valid after RISC reset */
2980     mb[0] = rd_reg_word(mbptr);
2981     mbptr++;
2982     if (mb[0] == 0xf) {
2983         rc = QLA_FUNCTION_FAILED;
2984 
2985         for (i = 1; i < 32; i++) {
2986             mb[i] = rd_reg_word(mbptr);
2987             mbptr++;
2988         }
2989 
2990         ql_log(ql_log_warn, vha, 0x1015,
2991                "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
2992                mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6], mb[7]);
2993         ql_log(ql_log_warn, vha, 0x1015,
2994                "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
2995                mb[8], mb[9], mb[10], mb[11], mb[12], mb[13], mb[14],
2996                mb[15]);
2997         ql_log(ql_log_warn, vha, 0x1015,
2998                "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
2999                mb[16], mb[17], mb[18], mb[19], mb[20], mb[21], mb[22],
3000                mb[23]);
3001         ql_log(ql_log_warn, vha, 0x1015,
3002                "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n",
3003                mb[24], mb[25], mb[26], mb[27], mb[28], mb[29], mb[30],
3004                mb[31]);
3005     }
3006     return rc;
3007 }
3008 
3009 /**
3010  * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
3011  * @vha: HA context
3012  *
3013  * Returns 0 on success.
3014  */
3015 static inline int
3016 qla24xx_reset_risc(scsi_qla_host_t *vha)
3017 {
3018     unsigned long flags = 0;
3019     struct qla_hw_data *ha = vha->hw;
3020     struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3021     uint32_t cnt;
3022     uint16_t wd;
3023     static int abts_cnt; /* ISP abort retry counts */
3024     int rval = QLA_SUCCESS;
3025     int print = 1;
3026 
3027     spin_lock_irqsave(&ha->hardware_lock, flags);
3028 
3029     /* Reset RISC. */
3030     wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
3031     for (cnt = 0; cnt < 30000; cnt++) {
3032         if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
3033             break;
3034 
3035         udelay(10);
3036     }
3037 
3038     if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
3039         set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
3040 
3041     ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
3042         "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
3043         rd_reg_dword(&reg->hccr),
3044         rd_reg_dword(&reg->ctrl_status),
3045         (rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE));
3046 
3047     wrt_reg_dword(&reg->ctrl_status,
3048         CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
3049     pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
3050 
3051     udelay(100);
3052 
3053     /* Wait for firmware to complete NVRAM accesses. */
3054     rd_reg_word(&reg->mailbox0);
3055     for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 &&
3056         rval == QLA_SUCCESS; cnt--) {
3057         barrier();
3058         if (cnt)
3059             udelay(5);
3060         else
3061             rval = QLA_FUNCTION_TIMEOUT;
3062     }
3063 
3064     if (rval == QLA_SUCCESS)
3065         set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
3066 
3067     ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
3068         "HCCR: 0x%x, MailBox0 Status 0x%x\n",
3069         rd_reg_dword(&reg->hccr),
3070         rd_reg_word(&reg->mailbox0));
3071 
3072     /* Wait for soft-reset to complete. */
3073     rd_reg_dword(&reg->ctrl_status);
3074     for (cnt = 0; cnt < 60; cnt++) {
3075         barrier();
3076         if ((rd_reg_dword(&reg->ctrl_status) &
3077             CSRX_ISP_SOFT_RESET) == 0)
3078             break;
3079 
3080         udelay(5);
3081     }
3082     if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
3083         set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
3084 
3085     ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
3086         "HCCR: 0x%x, Soft Reset status: 0x%x\n",
3087         rd_reg_dword(&reg->hccr),
3088         rd_reg_dword(&reg->ctrl_status));
3089 
3090     /* If required, do an MPI FW reset now */
3091     if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
3092         if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
3093             if (++abts_cnt < 5) {
3094                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3095                 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
3096             } else {
3097                 /*
3098                  * We exhausted the ISP abort retries. We have to
3099                  * set the board offline.
3100                  */
3101                 abts_cnt = 0;
3102                 vha->flags.online = 0;
3103             }
3104         }
3105     }
3106 
3107     wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
3108     rd_reg_dword(&reg->hccr);
3109 
3110     wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
3111     rd_reg_dword(&reg->hccr);
3112 
3113     wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
3114     mdelay(10);
3115     rd_reg_dword(&reg->hccr);
3116 
3117     wd = rd_reg_word(&reg->mailbox0);
3118     for (cnt = 300; wd != 0 && rval == QLA_SUCCESS; cnt--) {
3119         barrier();
3120         if (cnt) {
3121             mdelay(1);
3122             if (print && qla_chk_risc_recovery(vha))
3123                 print = 0;
3124 
3125             wd = rd_reg_word(&reg->mailbox0);
3126         } else {
3127             rval = QLA_FUNCTION_TIMEOUT;
3128 
3129             ql_log(ql_log_warn, vha, 0x015e,
3130                    "RISC reset timeout\n");
3131         }
3132     }
3133 
3134     if (rval == QLA_SUCCESS)
3135         set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
3136 
3137     ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
3138         "Host Risc 0x%x, mailbox0 0x%x\n",
3139         rd_reg_dword(&reg->hccr),
3140          rd_reg_word(&reg->mailbox0));
3141 
3142     spin_unlock_irqrestore(&ha->hardware_lock, flags);
3143 
3144     ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
3145         "Driver in %s mode\n",
3146         IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
3147 
3148     if (IS_NOPOLLING_TYPE(ha))
3149         ha->isp_ops->enable_intrs(ha);
3150 
3151     return rval;
3152 }
3153 
3154 static void
3155 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
3156 {
3157     struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
3158 
3159     wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
3160     *data = rd_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET);
3161 }
3162 
3163 static void
3164 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
3165 {
3166     struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
3167 
3168     wrt_reg_dword(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
3169     wrt_reg_dword(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data);
3170 }
3171 
3172 static void
3173 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
3174 {
3175     uint32_t wd32 = 0;
3176     uint delta_msec = 100;
3177     uint elapsed_msec = 0;
3178     uint timeout_msec;
3179     ulong n;
3180 
3181     if (vha->hw->pdev->subsystem_device != 0x0175 &&
3182         vha->hw->pdev->subsystem_device != 0x0240)
3183         return;
3184 
3185     wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
3186     udelay(100);
3187 
3188 attempt:
3189     timeout_msec = TIMEOUT_SEMAPHORE;
3190     n = timeout_msec / delta_msec;
3191     while (n--) {
3192         qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
3193         qla25xx_read_risc_sema_reg(vha, &wd32);
3194         if (wd32 & RISC_SEMAPHORE)
3195             break;
3196         msleep(delta_msec);
3197         elapsed_msec += delta_msec;
3198         if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
3199             goto force;
3200     }
3201 
3202     if (!(wd32 & RISC_SEMAPHORE))
3203         goto force;
3204 
3205     if (!(wd32 & RISC_SEMAPHORE_FORCE))
3206         goto acquired;
3207 
3208     qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
3209     timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
3210     n = timeout_msec / delta_msec;
3211     while (n--) {
3212         qla25xx_read_risc_sema_reg(vha, &wd32);
3213         if (!(wd32 & RISC_SEMAPHORE_FORCE))
3214             break;
3215         msleep(delta_msec);
3216         elapsed_msec += delta_msec;
3217         if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
3218             goto force;
3219     }
3220 
3221     if (wd32 & RISC_SEMAPHORE_FORCE)
3222         qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
3223 
3224     goto attempt;
3225 
3226 force:
3227     qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
3228 
3229 acquired:
3230     return;
3231 }
3232 
3233 /**
3234  * qla24xx_reset_chip() - Reset ISP24xx chip.
3235  * @vha: HA context
3236  *
3237  * Returns 0 on success.
3238  */
3239 int
3240 qla24xx_reset_chip(scsi_qla_host_t *vha)
3241 {
3242     struct qla_hw_data *ha = vha->hw;
3243     int rval = QLA_FUNCTION_FAILED;
3244 
3245     if (pci_channel_offline(ha->pdev) &&
3246         ha->flags.pci_channel_io_perm_failure) {
3247         return rval;
3248     }
3249 
3250     ha->isp_ops->disable_intrs(ha);
3251 
3252     qla25xx_manipulate_risc_semaphore(vha);
3253 
3254     /* Perform RISC reset. */
3255     rval = qla24xx_reset_risc(vha);
3256 
3257     return rval;
3258 }
3259 
3260 /**
3261  * qla2x00_chip_diag() - Test chip for proper operation.
3262  * @vha: HA context
3263  *
3264  * Returns 0 on success.
3265  */
3266 int
3267 qla2x00_chip_diag(scsi_qla_host_t *vha)
3268 {
3269     int     rval;
3270     struct qla_hw_data *ha = vha->hw;
3271     struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3272     unsigned long   flags = 0;
3273     uint16_t    data;
3274     uint32_t    cnt;
3275     uint16_t    mb[5];
3276     struct req_que *req = ha->req_q_map[0];
3277 
3278     /* Assume a failed state */
3279     rval = QLA_FUNCTION_FAILED;
3280 
3281     ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
3282            &reg->flash_address);
3283 
3284     spin_lock_irqsave(&ha->hardware_lock, flags);
3285 
3286     /* Reset ISP chip. */
3287     wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
3288 
3289     /*
3290      * We need to have a delay here since the card will not respond while
3291      * in reset causing an MCA on some architectures.
3292      */
3293     udelay(20);
3294     data = qla2x00_debounce_register(&reg->ctrl_status);
3295     for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
3296         udelay(5);
3297         data = rd_reg_word(&reg->ctrl_status);
3298         barrier();
3299     }
3300 
3301     if (!cnt)
3302         goto chip_diag_failed;
3303 
3304     ql_dbg(ql_dbg_init, vha, 0x007c,
3305         "Reset register cleared by chip reset.\n");
3306 
3307     /* Reset RISC processor. */
3308     wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
3309     wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
3310 
3311     /* Workaround for QLA2312 PCI parity error */
3312     if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
3313         data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
3314         for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
3315             udelay(5);
3316             data = RD_MAILBOX_REG(ha, reg, 0);
3317             barrier();
3318         }
3319     } else
3320         udelay(10);
3321 
3322     if (!cnt)
3323         goto chip_diag_failed;
3324 
3325     /* Check product ID of chip */
3326     ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
3327 
3328     mb[1] = RD_MAILBOX_REG(ha, reg, 1);
3329     mb[2] = RD_MAILBOX_REG(ha, reg, 2);
3330     mb[3] = RD_MAILBOX_REG(ha, reg, 3);
3331     mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
3332     if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
3333         mb[3] != PROD_ID_3) {
3334         ql_log(ql_log_warn, vha, 0x0062,
3335             "Wrong product ID = 0x%x,0x%x,0x%x.\n",
3336             mb[1], mb[2], mb[3]);
3337 
3338         goto chip_diag_failed;
3339     }
3340     ha->product_id[0] = mb[1];
3341     ha->product_id[1] = mb[2];
3342     ha->product_id[2] = mb[3];
3343     ha->product_id[3] = mb[4];
3344 
3345     /* Adjust fw RISC transfer size */
3346     if (req->length > 1024)
3347         ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
3348     else
3349         ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
3350             req->length;
3351 
3352     if (IS_QLA2200(ha) &&
3353         RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
3354         /* Limit firmware transfer size with a 2200A */
3355         ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
3356 
3357         ha->device_type |= DT_ISP2200A;
3358         ha->fw_transfer_size = 128;
3359     }
3360 
3361     /* Wrap Incoming Mailboxes Test. */
3362     spin_unlock_irqrestore(&ha->hardware_lock, flags);
3363 
3364     ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
3365     rval = qla2x00_mbx_reg_test(vha);
3366     if (rval)
3367         ql_log(ql_log_warn, vha, 0x0080,
3368             "Failed mailbox send register test.\n");
3369     else
3370         /* Flag a successful rval */
3371         rval = QLA_SUCCESS;
3372     spin_lock_irqsave(&ha->hardware_lock, flags);
3373 
3374 chip_diag_failed:
3375     if (rval)
3376         ql_log(ql_log_info, vha, 0x0081,
3377             "Chip diagnostics **** FAILED ****.\n");
3378 
3379     spin_unlock_irqrestore(&ha->hardware_lock, flags);
3380 
3381     return (rval);
3382 }
3383 
3384 /**
3385  * qla24xx_chip_diag() - Test ISP24xx for proper operation.
3386  * @vha: HA context
3387  *
3388  * Returns 0 on success.
3389  */
3390 int
3391 qla24xx_chip_diag(scsi_qla_host_t *vha)
3392 {
3393     int rval;
3394     struct qla_hw_data *ha = vha->hw;
3395     struct req_que *req = ha->req_q_map[0];
3396 
3397     if (IS_P3P_TYPE(ha))
3398         return QLA_SUCCESS;
3399 
3400     ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
3401 
3402     rval = qla2x00_mbx_reg_test(vha);
3403     if (rval) {
3404         ql_log(ql_log_warn, vha, 0x0082,
3405             "Failed mailbox send register test.\n");
3406     } else {
3407         /* Flag a successful rval */
3408         rval = QLA_SUCCESS;
3409     }
3410 
3411     return rval;
3412 }
3413 
3414 static void
3415 qla2x00_init_fce_trace(scsi_qla_host_t *vha)
3416 {
3417     int rval;
3418     dma_addr_t tc_dma;
3419     void *tc;
3420     struct qla_hw_data *ha = vha->hw;
3421 
3422     if (!IS_FWI2_CAPABLE(ha))
3423         return;
3424 
3425     if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3426         !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3427         return;
3428 
3429     if (ha->fce) {
3430         ql_dbg(ql_dbg_init, vha, 0x00bd,
3431                "%s: FCE Mem is already allocated.\n",
3432                __func__);
3433         return;
3434     }
3435 
3436     /* Allocate memory for Fibre Channel Event Buffer. */
3437     tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3438                 GFP_KERNEL);
3439     if (!tc) {
3440         ql_log(ql_log_warn, vha, 0x00be,
3441                "Unable to allocate (%d KB) for FCE.\n",
3442                FCE_SIZE / 1024);
3443         return;
3444     }
3445 
3446     rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
3447                     ha->fce_mb, &ha->fce_bufs);
3448     if (rval) {
3449         ql_log(ql_log_warn, vha, 0x00bf,
3450                "Unable to initialize FCE (%d).\n", rval);
3451         dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
3452         return;
3453     }
3454 
3455     ql_dbg(ql_dbg_init, vha, 0x00c0,
3456            "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
3457 
3458     ha->flags.fce_enabled = 1;
3459     ha->fce_dma = tc_dma;
3460     ha->fce = tc;
3461 }
3462 
3463 static void
3464 qla2x00_init_eft_trace(scsi_qla_host_t *vha)
3465 {
3466     int rval;
3467     dma_addr_t tc_dma;
3468     void *tc;
3469     struct qla_hw_data *ha = vha->hw;
3470 
3471     if (!IS_FWI2_CAPABLE(ha))
3472         return;
3473 
3474     if (ha->eft) {
3475         ql_dbg(ql_dbg_init, vha, 0x00bd,
3476             "%s: EFT Mem is already allocated.\n",
3477             __func__);
3478         return;
3479     }
3480 
3481     /* Allocate memory for Extended Trace Buffer. */
3482     tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3483                 GFP_KERNEL);
3484     if (!tc) {
3485         ql_log(ql_log_warn, vha, 0x00c1,
3486                "Unable to allocate (%d KB) for EFT.\n",
3487                EFT_SIZE / 1024);
3488         return;
3489     }
3490 
3491     rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
3492     if (rval) {
3493         ql_log(ql_log_warn, vha, 0x00c2,
3494                "Unable to initialize EFT (%d).\n", rval);
3495         dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
3496         return;
3497     }
3498 
3499     ql_dbg(ql_dbg_init, vha, 0x00c3,
3500            "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
3501 
3502     ha->eft_dma = tc_dma;
3503     ha->eft = tc;
3504 }
3505 
3506 static void
3507 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
3508 {
3509     qla2x00_init_fce_trace(vha);
3510     qla2x00_init_eft_trace(vha);
3511 }
3512 
3513 void
3514 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
3515 {
3516     uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
3517         eft_size, fce_size, mq_size;
3518     struct qla_hw_data *ha = vha->hw;
3519     struct req_que *req = ha->req_q_map[0];
3520     struct rsp_que *rsp = ha->rsp_q_map[0];
3521     struct qla2xxx_fw_dump *fw_dump;
3522 
3523     if (ha->fw_dump) {
3524         ql_dbg(ql_dbg_init, vha, 0x00bd,
3525             "Firmware dump already allocated.\n");
3526         return;
3527     }
3528 
3529     ha->fw_dumped = 0;
3530     ha->fw_dump_cap_flags = 0;
3531     dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
3532     req_q_size = rsp_q_size = 0;
3533 
3534     if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3535         fixed_size = sizeof(struct qla2100_fw_dump);
3536     } else if (IS_QLA23XX(ha)) {
3537         fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
3538         mem_size = (ha->fw_memory_size - 0x11000 + 1) *
3539             sizeof(uint16_t);
3540     } else if (IS_FWI2_CAPABLE(ha)) {
3541         if (IS_QLA83XX(ha))
3542             fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
3543         else if (IS_QLA81XX(ha))
3544             fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
3545         else if (IS_QLA25XX(ha))
3546             fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
3547         else
3548             fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
3549 
3550         mem_size = (ha->fw_memory_size - 0x100000 + 1) *
3551             sizeof(uint32_t);
3552         if (ha->mqenable) {
3553             if (!IS_QLA83XX(ha))
3554                 mq_size = sizeof(struct qla2xxx_mq_chain);
3555             /*
3556              * Allocate maximum buffer size for all queues - Q0.
3557              * Resizing must be done at end-of-dump processing.
3558              */
3559             mq_size += (ha->max_req_queues - 1) *
3560                 (req->length * sizeof(request_t));
3561             mq_size += (ha->max_rsp_queues - 1) *
3562                 (rsp->length * sizeof(response_t));
3563         }
3564         if (ha->tgt.atio_ring)
3565             mq_size += ha->tgt.atio_q_length * sizeof(request_t);
3566 
3567         qla2x00_init_fce_trace(vha);
3568         if (ha->fce)
3569             fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
3570         qla2x00_init_eft_trace(vha);
3571         if (ha->eft)
3572             eft_size = EFT_SIZE;
3573     }
3574 
3575     if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3576         struct fwdt *fwdt = ha->fwdt;
3577         uint j;
3578 
3579         for (j = 0; j < 2; j++, fwdt++) {
3580             if (!fwdt->template) {
3581                 ql_dbg(ql_dbg_init, vha, 0x00ba,
3582                     "-> fwdt%u no template\n", j);
3583                 continue;
3584             }
3585             ql_dbg(ql_dbg_init, vha, 0x00fa,
3586                 "-> fwdt%u calculating fwdump size...\n", j);
3587             fwdt->dump_size = qla27xx_fwdt_calculate_dump_size(
3588                 vha, fwdt->template);
3589             ql_dbg(ql_dbg_init, vha, 0x00fa,
3590                 "-> fwdt%u calculated fwdump size = %#lx bytes\n",
3591                 j, fwdt->dump_size);
3592             dump_size += fwdt->dump_size;
3593         }
3594         /* Add space for spare MPI fw dump. */
3595         dump_size += ha->fwdt[1].dump_size;
3596     } else {
3597         req_q_size = req->length * sizeof(request_t);
3598         rsp_q_size = rsp->length * sizeof(response_t);
3599         dump_size = offsetof(struct qla2xxx_fw_dump, isp);
3600         dump_size += fixed_size + mem_size + req_q_size + rsp_q_size
3601             + eft_size;
3602         ha->chain_offset = dump_size;
3603         dump_size += mq_size + fce_size;
3604         if (ha->exchoffld_buf)
3605             dump_size += sizeof(struct qla2xxx_offld_chain) +
3606                 ha->exchoffld_size;
3607         if (ha->exlogin_buf)
3608             dump_size += sizeof(struct qla2xxx_offld_chain) +
3609                 ha->exlogin_size;
3610     }
3611 
3612     if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
3613 
3614         ql_dbg(ql_dbg_init, vha, 0x00c5,
3615             "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n",
3616             __func__, dump_size, ha->fw_dump_len,
3617             ha->fw_dump_alloc_len);
3618 
3619         fw_dump = vmalloc(dump_size);
3620         if (!fw_dump) {
3621             ql_log(ql_log_warn, vha, 0x00c4,
3622                 "Unable to allocate (%d KB) for firmware dump.\n",
3623                 dump_size / 1024);
3624         } else {
3625             mutex_lock(&ha->optrom_mutex);
3626             if (ha->fw_dumped) {
3627                 memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len);
3628                 vfree(ha->fw_dump);
3629                 ha->fw_dump = fw_dump;
3630                 ha->fw_dump_alloc_len =  dump_size;
3631                 ql_dbg(ql_dbg_init, vha, 0x00c5,
3632                     "Re-Allocated (%d KB) and save firmware dump.\n",
3633                     dump_size / 1024);
3634             } else {
3635                 vfree(ha->fw_dump);
3636                 ha->fw_dump = fw_dump;
3637 
3638                 ha->fw_dump_len = ha->fw_dump_alloc_len =
3639                     dump_size;
3640                 ql_dbg(ql_dbg_init, vha, 0x00c5,
3641                     "Allocated (%d KB) for firmware dump.\n",
3642                     dump_size / 1024);
3643 
3644                 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3645                     ha->mpi_fw_dump = (char *)fw_dump +
3646                         ha->fwdt[1].dump_size;
3647                     mutex_unlock(&ha->optrom_mutex);
3648                     return;
3649                 }
3650 
3651                 ha->fw_dump->signature[0] = 'Q';
3652                 ha->fw_dump->signature[1] = 'L';
3653                 ha->fw_dump->signature[2] = 'G';
3654                 ha->fw_dump->signature[3] = 'C';
3655                 ha->fw_dump->version = htonl(1);
3656 
3657                 ha->fw_dump->fixed_size = htonl(fixed_size);
3658                 ha->fw_dump->mem_size = htonl(mem_size);
3659                 ha->fw_dump->req_q_size = htonl(req_q_size);
3660                 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
3661 
3662                 ha->fw_dump->eft_size = htonl(eft_size);
3663                 ha->fw_dump->eft_addr_l =
3664                     htonl(LSD(ha->eft_dma));
3665                 ha->fw_dump->eft_addr_h =
3666                     htonl(MSD(ha->eft_dma));
3667 
3668                 ha->fw_dump->header_size =
3669                     htonl(offsetof
3670                         (struct qla2xxx_fw_dump, isp));
3671             }
3672             mutex_unlock(&ha->optrom_mutex);
3673         }
3674     }
3675 }
3676 
3677 static int
3678 qla81xx_mpi_sync(scsi_qla_host_t *vha)
3679 {
3680 #define MPS_MASK    0xe0
3681     int rval;
3682     uint16_t dc;
3683     uint32_t dw;
3684 
3685     if (!IS_QLA81XX(vha->hw))
3686         return QLA_SUCCESS;
3687 
3688     rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
3689     if (rval != QLA_SUCCESS) {
3690         ql_log(ql_log_warn, vha, 0x0105,
3691             "Unable to acquire semaphore.\n");
3692         goto done;
3693     }
3694 
3695     pci_read_config_word(vha->hw->pdev, 0x54, &dc);
3696     rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
3697     if (rval != QLA_SUCCESS) {
3698         ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
3699         goto done_release;
3700     }
3701 
3702     dc &= MPS_MASK;
3703     if (dc == (dw & MPS_MASK))
3704         goto done_release;
3705 
3706     dw &= ~MPS_MASK;
3707     dw |= dc;
3708     rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
3709     if (rval != QLA_SUCCESS) {
3710         ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
3711     }
3712 
3713 done_release:
3714     rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
3715     if (rval != QLA_SUCCESS) {
3716         ql_log(ql_log_warn, vha, 0x006d,
3717             "Unable to release semaphore.\n");
3718     }
3719 
3720 done:
3721     return rval;
3722 }
3723 
3724 int
3725 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
3726 {
3727     /* Don't try to reallocate the array */
3728     if (req->outstanding_cmds)
3729         return QLA_SUCCESS;
3730 
3731     if (!IS_FWI2_CAPABLE(ha))
3732         req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
3733     else {
3734         if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
3735             req->num_outstanding_cmds = ha->cur_fw_xcb_count;
3736         else
3737             req->num_outstanding_cmds = ha->cur_fw_iocb_count;
3738     }
3739 
3740     req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3741                     sizeof(srb_t *),
3742                     GFP_KERNEL);
3743 
3744     if (!req->outstanding_cmds) {
3745         /*
3746          * Try to allocate a minimal size just so we can get through
3747          * initialization.
3748          */
3749         req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
3750         req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3751                         sizeof(srb_t *),
3752                         GFP_KERNEL);
3753 
3754         if (!req->outstanding_cmds) {
3755             ql_log(ql_log_fatal, NULL, 0x0126,
3756                 "Failed to allocate memory for "
3757                 "outstanding_cmds for req_que %p.\n", req);
3758             req->num_outstanding_cmds = 0;
3759             return QLA_FUNCTION_FAILED;
3760         }
3761     }
3762 
3763     return QLA_SUCCESS;
3764 }
3765 
3766 #define PRINT_FIELD(_field, _flag, _str) {      \
3767     if (a0->_field & _flag) {\
3768         if (p) {\
3769             strcat(ptr, "|");\
3770             ptr++;\
3771             leftover--;\
3772         } \
3773         len = snprintf(ptr, leftover, "%s", _str);  \
3774         p = 1;\
3775         leftover -= len;\
3776         ptr += len; \
3777     } \
3778 }
3779 
3780 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
3781 {
3782 #define STR_LEN 64
3783     struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
3784     u8 str[STR_LEN], *ptr, p;
3785     int leftover, len;
3786 
3787     memset(str, 0, STR_LEN);
3788     snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
3789     ql_dbg(ql_dbg_init, vha, 0x015a,
3790         "SFP MFG Name: %s\n", str);
3791 
3792     memset(str, 0, STR_LEN);
3793     snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
3794     ql_dbg(ql_dbg_init, vha, 0x015c,
3795         "SFP Part Name: %s\n", str);
3796 
3797     /* media */
3798     memset(str, 0, STR_LEN);
3799     ptr = str;
3800     leftover = STR_LEN;
3801     p = len = 0;
3802     PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
3803     PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
3804     PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
3805     PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
3806     PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
3807     PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
3808     PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
3809     ql_dbg(ql_dbg_init, vha, 0x0160,
3810         "SFP Media: %s\n", str);
3811 
3812     /* link length */
3813     memset(str, 0, STR_LEN);
3814     ptr = str;
3815     leftover = STR_LEN;
3816     p = len = 0;
3817     PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
3818     PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
3819     PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
3820     PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
3821     PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
3822     ql_dbg(ql_dbg_init, vha, 0x0196,
3823         "SFP Link Length: %s\n", str);
3824 
3825     memset(str, 0, STR_LEN);
3826     ptr = str;
3827     leftover = STR_LEN;
3828     p = len = 0;
3829     PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
3830     PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
3831     PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
3832     PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
3833     PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
3834     ql_dbg(ql_dbg_init, vha, 0x016e,
3835         "SFP FC Link Tech: %s\n", str);
3836 
3837     if (a0->length_km)
3838         ql_dbg(ql_dbg_init, vha, 0x016f,
3839             "SFP Distant: %d km\n", a0->length_km);
3840     if (a0->length_100m)
3841         ql_dbg(ql_dbg_init, vha, 0x0170,
3842             "SFP Distant: %d m\n", a0->length_100m*100);
3843     if (a0->length_50um_10m)
3844         ql_dbg(ql_dbg_init, vha, 0x0189,
3845             "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
3846     if (a0->length_62um_10m)
3847         ql_dbg(ql_dbg_init, vha, 0x018a,
3848           "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
3849     if (a0->length_om4_10m)
3850         ql_dbg(ql_dbg_init, vha, 0x0194,
3851             "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
3852     if (a0->length_om3_10m)
3853         ql_dbg(ql_dbg_init, vha, 0x0195,
3854             "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
3855 }
3856 
3857 
3858 /**
3859  * qla24xx_detect_sfp()
3860  *
3861  * @vha: adapter state pointer.
3862  *
3863  * @return
3864  *  0 -- Configure firmware to use short-range settings -- normal
3865  *       buffer-to-buffer credits.
3866  *
3867  *  1 -- Configure firmware to use long-range settings -- extra
3868  *       buffer-to-buffer credits should be allocated with
3869  *       ha->lr_distance containing distance settings from NVRAM or SFP
3870  *       (if supported).
3871  */
3872 int
3873 qla24xx_detect_sfp(scsi_qla_host_t *vha)
3874 {
3875     int rc, used_nvram;
3876     struct sff_8247_a0 *a;
3877     struct qla_hw_data *ha = vha->hw;
3878     struct nvram_81xx *nv = ha->nvram;
3879 #define LR_DISTANCE_UNKNOWN 2
3880     static const char * const types[] = { "Short", "Long" };
3881     static const char * const lengths[] = { "(10km)", "(5km)", "" };
3882     u8 ll = 0;
3883 
3884     /* Seed with NVRAM settings. */
3885     used_nvram = 0;
3886     ha->flags.lr_detected = 0;
3887     if (IS_BPM_RANGE_CAPABLE(ha) &&
3888         (nv->enhanced_features & NEF_LR_DIST_ENABLE)) {
3889         used_nvram = 1;
3890         ha->flags.lr_detected = 1;
3891         ha->lr_distance =
3892             (nv->enhanced_features >> LR_DIST_NV_POS)
3893              & LR_DIST_NV_MASK;
3894     }
3895 
3896     if (!IS_BPM_ENABLED(vha))
3897         goto out;
3898     /* Determine SR/LR capabilities of SFP/Transceiver. */
3899     rc = qla2x00_read_sfp_dev(vha, NULL, 0);
3900     if (rc)
3901         goto out;
3902 
3903     used_nvram = 0;
3904     a = (struct sff_8247_a0 *)vha->hw->sfp_data;
3905     qla2xxx_print_sfp_info(vha);
3906 
3907     ha->flags.lr_detected = 0;
3908     ll = a->fc_ll_cc7;
3909     if (ll & FC_LL_VL || ll & FC_LL_L) {
3910         /* Long range, track length. */
3911         ha->flags.lr_detected = 1;
3912 
3913         if (a->length_km > 5 || a->length_100m > 50)
3914             ha->lr_distance = LR_DISTANCE_10K;
3915         else
3916             ha->lr_distance = LR_DISTANCE_5K;
3917     }
3918 
3919 out:
3920     ql_dbg(ql_dbg_async, vha, 0x507b,
3921         "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n",
3922         types[ha->flags.lr_detected],
3923         ha->flags.lr_detected ? lengths[ha->lr_distance] :
3924            lengths[LR_DISTANCE_UNKNOWN],
3925         used_nvram, ll, ha->flags.lr_detected, ha->lr_distance);
3926     return ha->flags.lr_detected;
3927 }
3928 
3929 void qla_init_iocb_limit(scsi_qla_host_t *vha)
3930 {
3931     u16 i, num_qps;
3932     u32 limit;
3933     struct qla_hw_data *ha = vha->hw;
3934 
3935     num_qps = ha->num_qpairs + 1;
3936     limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
3937 
3938     ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
3939     ha->base_qpair->fwres.iocbs_limit = limit;
3940     ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
3941     ha->base_qpair->fwres.iocbs_used = 0;
3942     for (i = 0; i < ha->max_qpairs; i++) {
3943         if (ha->queue_pair_map[i])  {
3944             ha->queue_pair_map[i]->fwres.iocbs_total =
3945                 ha->orig_fw_iocb_count;
3946             ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
3947             ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
3948                 limit / num_qps;
3949             ha->queue_pair_map[i]->fwres.iocbs_used = 0;
3950         }
3951     }
3952 }
3953 
3954 /**
3955  * qla2x00_setup_chip() - Load and start RISC firmware.
3956  * @vha: HA context
3957  *
3958  * Returns 0 on success.
3959  */
3960 static int
3961 qla2x00_setup_chip(scsi_qla_host_t *vha)
3962 {
3963     int rval;
3964     uint32_t srisc_address = 0;
3965     struct qla_hw_data *ha = vha->hw;
3966     struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3967     unsigned long flags;
3968     uint16_t fw_major_version;
3969     int done_once = 0;
3970 
3971     if (IS_P3P_TYPE(ha)) {
3972         rval = ha->isp_ops->load_risc(vha, &srisc_address);
3973         if (rval == QLA_SUCCESS) {
3974             qla2x00_stop_firmware(vha);
3975             goto enable_82xx_npiv;
3976         } else
3977             goto failed;
3978     }
3979 
3980     if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3981         /* Disable SRAM, Instruction RAM and GP RAM parity.  */
3982         spin_lock_irqsave(&ha->hardware_lock, flags);
3983         wrt_reg_word(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
3984         rd_reg_word(&reg->hccr);
3985         spin_unlock_irqrestore(&ha->hardware_lock, flags);
3986     }
3987 
3988     qla81xx_mpi_sync(vha);
3989 
3990 execute_fw_with_lr:
3991     /* Load firmware sequences */
3992     rval = ha->isp_ops->load_risc(vha, &srisc_address);
3993     if (rval == QLA_SUCCESS) {
3994         ql_dbg(ql_dbg_init, vha, 0x00c9,
3995             "Verifying Checksum of loaded RISC code.\n");
3996 
3997         rval = qla2x00_verify_checksum(vha, srisc_address);
3998         if (rval == QLA_SUCCESS) {
3999             /* Start firmware execution. */
4000             ql_dbg(ql_dbg_init, vha, 0x00ca,
4001                 "Starting firmware.\n");
4002 
4003             if (ql2xexlogins)
4004                 ha->flags.exlogins_enabled = 1;
4005 
4006             if (qla_is_exch_offld_enabled(vha))
4007                 ha->flags.exchoffld_enabled = 1;
4008 
4009             rval = qla2x00_execute_fw(vha, srisc_address);
4010             /* Retrieve firmware information. */
4011             if (rval == QLA_SUCCESS) {
4012                 /* Enable BPM support? */
4013                 if (!done_once++ && qla24xx_detect_sfp(vha)) {
4014                     ql_dbg(ql_dbg_init, vha, 0x00ca,
4015                         "Re-starting firmware -- BPM.\n");
4016                     /* Best-effort - re-init. */
4017                     ha->isp_ops->reset_chip(vha);
4018                     ha->isp_ops->chip_diag(vha);
4019                     goto execute_fw_with_lr;
4020                 }
4021 
4022                 if (IS_ZIO_THRESHOLD_CAPABLE(ha))
4023                     qla27xx_set_zio_threshold(vha,
4024                         ha->last_zio_threshold);
4025 
4026                 rval = qla2x00_set_exlogins_buffer(vha);
4027                 if (rval != QLA_SUCCESS)
4028                     goto failed;
4029 
4030                 rval = qla2x00_set_exchoffld_buffer(vha);
4031                 if (rval != QLA_SUCCESS)
4032                     goto failed;
4033 
4034 enable_82xx_npiv:
4035                 fw_major_version = ha->fw_major_version;
4036                 if (IS_P3P_TYPE(ha))
4037                     qla82xx_check_md_needed(vha);
4038                 else
4039                     rval = qla2x00_get_fw_version(vha);
4040                 if (rval != QLA_SUCCESS)
4041                     goto failed;
4042                 ha->flags.npiv_supported = 0;
4043                 if (IS_QLA2XXX_MIDTYPE(ha) &&
4044                      (ha->fw_attributes & BIT_2)) {
4045                     ha->flags.npiv_supported = 1;
4046                     if ((!ha->max_npiv_vports) ||
4047                         ((ha->max_npiv_vports + 1) %
4048                         MIN_MULTI_ID_FABRIC))
4049                         ha->max_npiv_vports =
4050                             MIN_MULTI_ID_FABRIC - 1;
4051                 }
4052                 qla2x00_get_resource_cnts(vha);
4053                 qla_init_iocb_limit(vha);
4054 
4055                 /*
4056                  * Allocate the array of outstanding commands
4057                  * now that we know the firmware resources.
4058                  */
4059                 rval = qla2x00_alloc_outstanding_cmds(ha,
4060                     vha->req);
4061                 if (rval != QLA_SUCCESS)
4062                     goto failed;
4063 
4064                 if (!fw_major_version && !(IS_P3P_TYPE(ha)))
4065                     qla2x00_alloc_offload_mem(vha);
4066 
4067                 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
4068                     qla2x00_alloc_fw_dump(vha);
4069 
4070             } else {
4071                 goto failed;
4072             }
4073         } else {
4074             ql_log(ql_log_fatal, vha, 0x00cd,
4075                 "ISP Firmware failed checksum.\n");
4076             goto failed;
4077         }
4078 
4079         /* Enable PUREX PASSTHRU */
4080         if (ql2xrdpenable || ha->flags.scm_supported_f ||
4081             ha->flags.edif_enabled)
4082             qla25xx_set_els_cmds_supported(vha);
4083     } else
4084         goto failed;
4085 
4086     if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
4087         /* Enable proper parity. */
4088         spin_lock_irqsave(&ha->hardware_lock, flags);
4089         if (IS_QLA2300(ha))
4090             /* SRAM parity */
4091             wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
4092         else
4093             /* SRAM, Instruction RAM and GP RAM parity */
4094             wrt_reg_word(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
4095         rd_reg_word(&reg->hccr);
4096         spin_unlock_irqrestore(&ha->hardware_lock, flags);
4097     }
4098 
4099     if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
4100         ha->flags.fac_supported = 1;
4101     else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
4102         uint32_t size;
4103 
4104         rval = qla81xx_fac_get_sector_size(vha, &size);
4105         if (rval == QLA_SUCCESS) {
4106             ha->flags.fac_supported = 1;
4107             ha->fdt_block_size = size << 2;
4108         } else {
4109             ql_log(ql_log_warn, vha, 0x00ce,
4110                 "Unsupported FAC firmware (%d.%02d.%02d).\n",
4111                 ha->fw_major_version, ha->fw_minor_version,
4112                 ha->fw_subminor_version);
4113 
4114             if (IS_QLA83XX(ha)) {
4115                 ha->flags.fac_supported = 0;
4116                 rval = QLA_SUCCESS;
4117             }
4118         }
4119     }
4120 failed:
4121     if (rval) {
4122         ql_log(ql_log_fatal, vha, 0x00cf,
4123             "Setup chip ****FAILED****.\n");
4124     }
4125 
4126     return (rval);
4127 }
4128 
4129 /**
4130  * qla2x00_init_response_q_entries() - Initializes response queue entries.
4131  * @rsp: response queue
4132  *
4133  * Beginning of request ring has initialization control block already built
4134  * by nvram config routine.
4135  *
4136  * Returns 0 on success.
4137  */
4138 void
4139 qla2x00_init_response_q_entries(struct rsp_que *rsp)
4140 {
4141     uint16_t cnt;
4142     response_t *pkt;
4143 
4144     rsp->ring_ptr = rsp->ring;
4145     rsp->ring_index    = 0;
4146     rsp->status_srb = NULL;
4147     pkt = rsp->ring_ptr;
4148     for (cnt = 0; cnt < rsp->length; cnt++) {
4149         pkt->signature = RESPONSE_PROCESSED;
4150         pkt++;
4151     }
4152 }
4153 
4154 /**
4155  * qla2x00_update_fw_options() - Read and process firmware options.
4156  * @vha: HA context
4157  *
4158  * Returns 0 on success.
4159  */
4160 void
4161 qla2x00_update_fw_options(scsi_qla_host_t *vha)
4162 {
4163     uint16_t swing, emphasis, tx_sens, rx_sens;
4164     struct qla_hw_data *ha = vha->hw;
4165 
4166     memset(ha->fw_options, 0, sizeof(ha->fw_options));
4167     qla2x00_get_fw_options(vha, ha->fw_options);
4168 
4169     if (IS_QLA2100(ha) || IS_QLA2200(ha))
4170         return;
4171 
4172     /* Serial Link options. */
4173     ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
4174         "Serial link options.\n");
4175     ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
4176         ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options));
4177 
4178     ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
4179     if (ha->fw_seriallink_options[3] & BIT_2) {
4180         ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
4181 
4182         /*  1G settings */
4183         swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
4184         emphasis = (ha->fw_seriallink_options[2] &
4185             (BIT_4 | BIT_3)) >> 3;
4186         tx_sens = ha->fw_seriallink_options[0] &
4187             (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4188         rx_sens = (ha->fw_seriallink_options[0] &
4189             (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
4190         ha->fw_options[10] = (emphasis << 14) | (swing << 8);
4191         if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
4192             if (rx_sens == 0x0)
4193                 rx_sens = 0x3;
4194             ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
4195         } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
4196             ha->fw_options[10] |= BIT_5 |
4197                 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
4198                 (tx_sens & (BIT_1 | BIT_0));
4199 
4200         /*  2G settings */
4201         swing = (ha->fw_seriallink_options[2] &
4202             (BIT_7 | BIT_6 | BIT_5)) >> 5;
4203         emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
4204         tx_sens = ha->fw_seriallink_options[1] &
4205             (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4206         rx_sens = (ha->fw_seriallink_options[1] &
4207             (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
4208         ha->fw_options[11] = (emphasis << 14) | (swing << 8);
4209         if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
4210             if (rx_sens == 0x0)
4211                 rx_sens = 0x3;
4212             ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
4213         } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
4214             ha->fw_options[11] |= BIT_5 |
4215                 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
4216                 (tx_sens & (BIT_1 | BIT_0));
4217     }
4218 
4219     /* FCP2 options. */
4220     /*  Return command IOCBs without waiting for an ABTS to complete. */
4221     ha->fw_options[3] |= BIT_13;
4222 
4223     /* LED scheme. */
4224     if (ha->flags.enable_led_scheme)
4225         ha->fw_options[2] |= BIT_12;
4226 
4227     /* Detect ISP6312. */
4228     if (IS_QLA6312(ha))
4229         ha->fw_options[2] |= BIT_13;
4230 
4231     /* Set Retry FLOGI in case of P2P connection */
4232     if (ha->operating_mode == P2P) {
4233         ha->fw_options[2] |= BIT_3;
4234         ql_dbg(ql_dbg_disc, vha, 0x2100,
4235             "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
4236             __func__, ha->fw_options[2]);
4237     }
4238 
4239     /* Update firmware options. */
4240     qla2x00_set_fw_options(vha, ha->fw_options);
4241 }
4242 
4243 void
4244 qla24xx_update_fw_options(scsi_qla_host_t *vha)
4245 {
4246     int rval;
4247     struct qla_hw_data *ha = vha->hw;
4248 
4249     if (IS_P3P_TYPE(ha))
4250         return;
4251 
4252     /*  Hold status IOCBs until ABTS response received. */
4253     if (ql2xfwholdabts)
4254         ha->fw_options[3] |= BIT_12;
4255 
4256     /* Set Retry FLOGI in case of P2P connection */
4257     if (ha->operating_mode == P2P) {
4258         ha->fw_options[2] |= BIT_3;
4259         ql_dbg(ql_dbg_disc, vha, 0x2101,
4260             "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
4261             __func__, ha->fw_options[2]);
4262     }
4263 
4264     /* Move PUREX, ABTS RX & RIDA to ATIOQ */
4265     if (ql2xmvasynctoatio && !ha->flags.edif_enabled &&
4266         (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
4267         if (qla_tgt_mode_enabled(vha) ||
4268             qla_dual_mode_enabled(vha))
4269             ha->fw_options[2] |= BIT_11;
4270         else
4271             ha->fw_options[2] &= ~BIT_11;
4272     }
4273 
4274     if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4275         IS_QLA28XX(ha)) {
4276         /*
4277          * Tell FW to track each exchange to prevent
4278          * driver from using stale exchange.
4279          */
4280         if (qla_tgt_mode_enabled(vha) ||
4281             qla_dual_mode_enabled(vha))
4282             ha->fw_options[2] |= BIT_4;
4283         else
4284             ha->fw_options[2] &= ~(BIT_4);
4285 
4286         /* Reserve 1/2 of emergency exchanges for ELS.*/
4287         if (qla2xuseresexchforels)
4288             ha->fw_options[2] |= BIT_8;
4289         else
4290             ha->fw_options[2] &= ~BIT_8;
4291 
4292         /*
4293          * N2N: set Secure=1 for PLOGI ACC and
4294          * fw shal not send PRLI after PLOGI Acc
4295          */
4296         if (ha->flags.edif_enabled &&
4297             DBELL_ACTIVE(vha)) {
4298             ha->fw_options[3] |= BIT_15;
4299             ha->flags.n2n_fw_acc_sec = 1;
4300         } else {
4301             ha->fw_options[3] &= ~BIT_15;
4302             ha->flags.n2n_fw_acc_sec = 0;
4303         }
4304     }
4305 
4306     if (ql2xrdpenable || ha->flags.scm_supported_f ||
4307         ha->flags.edif_enabled)
4308         ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB;
4309 
4310     /* Enable Async 8130/8131 events -- transceiver insertion/removal */
4311     if (IS_BPM_RANGE_CAPABLE(ha))
4312         ha->fw_options[3] |= BIT_10;
4313 
4314     ql_dbg(ql_dbg_init, vha, 0x00e8,
4315         "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
4316         __func__, ha->fw_options[1], ha->fw_options[2],
4317         ha->fw_options[3], vha->host->active_mode);
4318 
4319     if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
4320         qla2x00_set_fw_options(vha, ha->fw_options);
4321 
4322     /* Update Serial Link options. */
4323     if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
4324         return;
4325 
4326     rval = qla2x00_set_serdes_params(vha,
4327         le16_to_cpu(ha->fw_seriallink_options24[1]),
4328         le16_to_cpu(ha->fw_seriallink_options24[2]),
4329         le16_to_cpu(ha->fw_seriallink_options24[3]));
4330     if (rval != QLA_SUCCESS) {
4331         ql_log(ql_log_warn, vha, 0x0104,
4332             "Unable to update Serial Link options (%x).\n", rval);
4333     }
4334 }
4335 
4336 void
4337 qla2x00_config_rings(struct scsi_qla_host *vha)
4338 {
4339     struct qla_hw_data *ha = vha->hw;
4340     struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4341     struct req_que *req = ha->req_q_map[0];
4342     struct rsp_que *rsp = ha->rsp_q_map[0];
4343 
4344     /* Setup ring parameters in initialization control block. */
4345     ha->init_cb->request_q_outpointer = cpu_to_le16(0);
4346     ha->init_cb->response_q_inpointer = cpu_to_le16(0);
4347     ha->init_cb->request_q_length = cpu_to_le16(req->length);
4348     ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
4349     put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
4350     put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
4351 
4352     wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0);
4353     wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0);
4354     wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0);
4355     wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0);
4356     rd_reg_word(ISP_RSP_Q_OUT(ha, reg));        /* PCI Posting. */
4357 }
4358 
4359 void
4360 qla24xx_config_rings(struct scsi_qla_host *vha)
4361 {
4362     struct qla_hw_data *ha = vha->hw;
4363     device_reg_t *reg = ISP_QUE_REG(ha, 0);
4364     struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
4365     struct qla_msix_entry *msix;
4366     struct init_cb_24xx *icb;
4367     uint16_t rid = 0;
4368     struct req_que *req = ha->req_q_map[0];
4369     struct rsp_que *rsp = ha->rsp_q_map[0];
4370 
4371     /* Setup ring parameters in initialization control block. */
4372     icb = (struct init_cb_24xx *)ha->init_cb;
4373     icb->request_q_outpointer = cpu_to_le16(0);
4374     icb->response_q_inpointer = cpu_to_le16(0);
4375     icb->request_q_length = cpu_to_le16(req->length);
4376     icb->response_q_length = cpu_to_le16(rsp->length);
4377     put_unaligned_le64(req->dma, &icb->request_q_address);
4378     put_unaligned_le64(rsp->dma, &icb->response_q_address);
4379 
4380     /* Setup ATIO queue dma pointers for target mode */
4381     icb->atio_q_inpointer = cpu_to_le16(0);
4382     icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
4383     put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address);
4384 
4385     if (IS_SHADOW_REG_CAPABLE(ha))
4386         icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
4387 
4388     if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4389         IS_QLA28XX(ha)) {
4390         icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
4391         icb->rid = cpu_to_le16(rid);
4392         if (ha->flags.msix_enabled) {
4393             msix = &ha->msix_entries[1];
4394             ql_dbg(ql_dbg_init, vha, 0x0019,
4395                 "Registering vector 0x%x for base que.\n",
4396                 msix->entry);
4397             icb->msix = cpu_to_le16(msix->entry);
4398         }
4399         /* Use alternate PCI bus number */
4400         if (MSB(rid))
4401             icb->firmware_options_2 |= cpu_to_le32(BIT_19);
4402         /* Use alternate PCI devfn */
4403         if (LSB(rid))
4404             icb->firmware_options_2 |= cpu_to_le32(BIT_18);
4405 
4406         /* Use Disable MSIX Handshake mode for capable adapters */
4407         if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
4408             (ha->flags.msix_enabled)) {
4409             icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
4410             ha->flags.disable_msix_handshake = 1;
4411             ql_dbg(ql_dbg_init, vha, 0x00fe,
4412                 "MSIX Handshake Disable Mode turned on.\n");
4413         } else {
4414             icb->firmware_options_2 |= cpu_to_le32(BIT_22);
4415         }
4416         icb->firmware_options_2 |= cpu_to_le32(BIT_23);
4417 
4418         wrt_reg_dword(&reg->isp25mq.req_q_in, 0);
4419         wrt_reg_dword(&reg->isp25mq.req_q_out, 0);
4420         wrt_reg_dword(&reg->isp25mq.rsp_q_in, 0);
4421         wrt_reg_dword(&reg->isp25mq.rsp_q_out, 0);
4422     } else {
4423         wrt_reg_dword(&reg->isp24.req_q_in, 0);
4424         wrt_reg_dword(&reg->isp24.req_q_out, 0);
4425         wrt_reg_dword(&reg->isp24.rsp_q_in, 0);
4426         wrt_reg_dword(&reg->isp24.rsp_q_out, 0);
4427     }
4428 
4429     qlt_24xx_config_rings(vha);
4430 
4431     /* If the user has configured the speed, set it here */
4432     if (ha->set_data_rate) {
4433         ql_dbg(ql_dbg_init, vha, 0x00fd,
4434             "Speed set by user : %s Gbps \n",
4435             qla2x00_get_link_speed_str(ha, ha->set_data_rate));
4436         icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13);
4437     }
4438 
4439     /* PCI posting */
4440     rd_reg_word(&ioreg->hccr);
4441 }
4442 
4443 /**
4444  * qla2x00_init_rings() - Initializes firmware.
4445  * @vha: HA context
4446  *
4447  * Beginning of request ring has initialization control block already built
4448  * by nvram config routine.
4449  *
4450  * Returns 0 on success.
4451  */
4452 int
4453 qla2x00_init_rings(scsi_qla_host_t *vha)
4454 {
4455     int rval;
4456     unsigned long flags = 0;
4457     int cnt, que;
4458     struct qla_hw_data *ha = vha->hw;
4459     struct req_que *req;
4460     struct rsp_que *rsp;
4461     struct mid_init_cb_24xx *mid_init_cb =
4462         (struct mid_init_cb_24xx *) ha->init_cb;
4463 
4464     spin_lock_irqsave(&ha->hardware_lock, flags);
4465 
4466     /* Clear outstanding commands array. */
4467     for (que = 0; que < ha->max_req_queues; que++) {
4468         req = ha->req_q_map[que];
4469         if (!req || !test_bit(que, ha->req_qid_map))
4470             continue;
4471         req->out_ptr = (uint16_t *)(req->ring + req->length);
4472         *req->out_ptr = 0;
4473         for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
4474             req->outstanding_cmds[cnt] = NULL;
4475 
4476         req->current_outstanding_cmd = 1;
4477 
4478         /* Initialize firmware. */
4479         req->ring_ptr  = req->ring;
4480         req->ring_index    = 0;
4481         req->cnt      = req->length;
4482     }
4483 
4484     for (que = 0; que < ha->max_rsp_queues; que++) {
4485         rsp = ha->rsp_q_map[que];
4486         if (!rsp || !test_bit(que, ha->rsp_qid_map))
4487             continue;
4488         rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
4489         *rsp->in_ptr = 0;
4490         /* Initialize response queue entries */
4491         if (IS_QLAFX00(ha))
4492             qlafx00_init_response_q_entries(rsp);
4493         else
4494             qla2x00_init_response_q_entries(rsp);
4495     }
4496 
4497     ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4498     ha->tgt.atio_ring_index = 0;
4499     /* Initialize ATIO queue entries */
4500     qlt_init_atio_q_entries(vha);
4501 
4502     ha->isp_ops->config_rings(vha);
4503 
4504     spin_unlock_irqrestore(&ha->hardware_lock, flags);
4505 
4506     if (IS_QLAFX00(ha)) {
4507         rval = qlafx00_init_firmware(vha, ha->init_cb_size);
4508         goto next_check;
4509     }
4510 
4511     /* Update any ISP specific firmware options before initialization. */
4512     ha->isp_ops->update_fw_options(vha);
4513 
4514     ql_dbg(ql_dbg_init, vha, 0x00d1,
4515            "Issue init firmware FW opt 1-3= %08x %08x %08x.\n",
4516            le32_to_cpu(mid_init_cb->init_cb.firmware_options_1),
4517            le32_to_cpu(mid_init_cb->init_cb.firmware_options_2),
4518            le32_to_cpu(mid_init_cb->init_cb.firmware_options_3));
4519 
4520     if (ha->flags.npiv_supported) {
4521         if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
4522             ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
4523         mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
4524     }
4525 
4526     if (IS_FWI2_CAPABLE(ha)) {
4527         mid_init_cb->options = cpu_to_le16(BIT_1);
4528         mid_init_cb->init_cb.execution_throttle =
4529             cpu_to_le16(ha->cur_fw_xcb_count);
4530         ha->flags.dport_enabled =
4531             (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
4532              BIT_7) != 0;
4533         ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
4534             (ha->flags.dport_enabled) ? "enabled" : "disabled");
4535         /* FA-WWPN Status */
4536         ha->flags.fawwpn_enabled =
4537             (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) &
4538              BIT_6) != 0;
4539         ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
4540             (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
4541         /* Init_cb will be reused for other command(s).  Save a backup copy of port_name */
4542         memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE);
4543     }
4544 
4545     /* ELS pass through payload is limit by frame size. */
4546     if (ha->flags.edif_enabled)
4547         mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD);
4548 
4549     rval = qla2x00_init_firmware(vha, ha->init_cb_size);
4550 next_check:
4551     if (rval) {
4552         ql_log(ql_log_fatal, vha, 0x00d2,
4553             "Init Firmware **** FAILED ****.\n");
4554     } else {
4555         ql_dbg(ql_dbg_init, vha, 0x00d3,
4556             "Init Firmware -- success.\n");
4557         QLA_FW_STARTED(ha);
4558         vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
4559     }
4560 
4561     return (rval);
4562 }
4563 
4564 /**
4565  * qla2x00_fw_ready() - Waits for firmware ready.
4566  * @vha: HA context
4567  *
4568  * Returns 0 on success.
4569  */
4570 static int
4571 qla2x00_fw_ready(scsi_qla_host_t *vha)
4572 {
4573     int     rval;
4574     unsigned long   wtime, mtime, cs84xx_time;
4575     uint16_t    min_wait;   /* Minimum wait time if loop is down */
4576     uint16_t    wait_time;  /* Wait time if loop is coming ready */
4577     uint16_t    state[6];
4578     struct qla_hw_data *ha = vha->hw;
4579 
4580     if (IS_QLAFX00(vha->hw))
4581         return qlafx00_fw_ready(vha);
4582 
4583     /* Time to wait for loop down */
4584     if (IS_P3P_TYPE(ha))
4585         min_wait = 30;
4586     else
4587         min_wait = 20;
4588 
4589     /*
4590      * Firmware should take at most one RATOV to login, plus 5 seconds for
4591      * our own processing.
4592      */
4593     if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
4594         wait_time = min_wait;
4595     }
4596 
4597     /* Min wait time if loop down */
4598     mtime = jiffies + (min_wait * HZ);
4599 
4600     /* wait time before firmware ready */
4601     wtime = jiffies + (wait_time * HZ);
4602 
4603     /* Wait for ISP to finish LIP */
4604     if (!vha->flags.init_done)
4605         ql_log(ql_log_info, vha, 0x801e,
4606             "Waiting for LIP to complete.\n");
4607 
4608     do {
4609         memset(state, -1, sizeof(state));
4610         rval = qla2x00_get_firmware_state(vha, state);
4611         if (rval == QLA_SUCCESS) {
4612             if (state[0] < FSTATE_LOSS_OF_SYNC) {
4613                 vha->device_flags &= ~DFLG_NO_CABLE;
4614             }
4615             if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
4616                 ql_dbg(ql_dbg_taskm, vha, 0x801f,
4617                     "fw_state=%x 84xx=%x.\n", state[0],
4618                     state[2]);
4619                 if ((state[2] & FSTATE_LOGGED_IN) &&
4620                      (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
4621                     ql_dbg(ql_dbg_taskm, vha, 0x8028,
4622                         "Sending verify iocb.\n");
4623 
4624                     cs84xx_time = jiffies;
4625                     rval = qla84xx_init_chip(vha);
4626                     if (rval != QLA_SUCCESS) {
4627                         ql_log(ql_log_warn,
4628                             vha, 0x8007,
4629                             "Init chip failed.\n");
4630                         break;
4631                     }
4632 
4633                     /* Add time taken to initialize. */
4634                     cs84xx_time = jiffies - cs84xx_time;
4635                     wtime += cs84xx_time;
4636                     mtime += cs84xx_time;
4637                     ql_dbg(ql_dbg_taskm, vha, 0x8008,
4638                         "Increasing wait time by %ld. "
4639                         "New time %ld.\n", cs84xx_time,
4640                         wtime);
4641                 }
4642             } else if (state[0] == FSTATE_READY) {
4643                 ql_dbg(ql_dbg_taskm, vha, 0x8037,
4644                     "F/W Ready - OK.\n");
4645 
4646                 qla2x00_get_retry_cnt(vha, &ha->retry_count,
4647                     &ha->login_timeout, &ha->r_a_tov);
4648 
4649                 rval = QLA_SUCCESS;
4650                 break;
4651             }
4652 
4653             rval = QLA_FUNCTION_FAILED;
4654 
4655             if (atomic_read(&vha->loop_down_timer) &&
4656                 state[0] != FSTATE_READY) {
4657                 /* Loop down. Timeout on min_wait for states
4658                  * other than Wait for Login.
4659                  */
4660                 if (time_after_eq(jiffies, mtime)) {
4661                     ql_log(ql_log_info, vha, 0x8038,
4662                         "Cable is unplugged...\n");
4663 
4664                     vha->device_flags |= DFLG_NO_CABLE;
4665                     break;
4666                 }
4667             }
4668         } else {
4669             /* Mailbox cmd failed. Timeout on min_wait. */
4670             if (time_after_eq(jiffies, mtime) ||
4671                 ha->flags.isp82xx_fw_hung)
4672                 break;
4673         }
4674 
4675         if (time_after_eq(jiffies, wtime))
4676             break;
4677 
4678         /* Delay for a while */
4679         msleep(500);
4680     } while (1);
4681 
4682     ql_dbg(ql_dbg_taskm, vha, 0x803a,
4683         "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
4684         state[1], state[2], state[3], state[4], state[5], jiffies);
4685 
4686     if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
4687         ql_log(ql_log_warn, vha, 0x803b,
4688             "Firmware ready **** FAILED ****.\n");
4689     }
4690 
4691     return (rval);
4692 }
4693 
4694 /*
4695 *  qla2x00_configure_hba
4696 *      Setup adapter context.
4697 *
4698 * Input:
4699 *      ha = adapter state pointer.
4700 *
4701 * Returns:
4702 *      0 = success
4703 *
4704 * Context:
4705 *      Kernel context.
4706 */
4707 static int
4708 qla2x00_configure_hba(scsi_qla_host_t *vha)
4709 {
4710     int       rval;
4711     uint16_t      loop_id;
4712     uint16_t      topo;
4713     uint16_t      sw_cap;
4714     uint8_t       al_pa;
4715     uint8_t       area;
4716     uint8_t       domain;
4717     char        connect_type[22];
4718     struct qla_hw_data *ha = vha->hw;
4719     scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
4720     port_id_t id;
4721     unsigned long flags;
4722 
4723     /* Get host addresses. */
4724     rval = qla2x00_get_adapter_id(vha,
4725         &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
4726     if (rval != QLA_SUCCESS) {
4727         if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
4728             IS_CNA_CAPABLE(ha) ||
4729             (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
4730             ql_dbg(ql_dbg_disc, vha, 0x2008,
4731                 "Loop is in a transition state.\n");
4732         } else {
4733             ql_log(ql_log_warn, vha, 0x2009,
4734                 "Unable to get host loop ID.\n");
4735             if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
4736                 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
4737                 ql_log(ql_log_warn, vha, 0x1151,
4738                     "Doing link init.\n");
4739                 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
4740                     return rval;
4741             }
4742             set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4743         }
4744         return (rval);
4745     }
4746 
4747     if (topo == 4) {
4748         ql_log(ql_log_info, vha, 0x200a,
4749             "Cannot get topology - retrying.\n");
4750         return (QLA_FUNCTION_FAILED);
4751     }
4752 
4753     vha->loop_id = loop_id;
4754 
4755     /* initialize */
4756     ha->min_external_loopid = SNS_FIRST_LOOP_ID;
4757     ha->operating_mode = LOOP;
4758 
4759     switch (topo) {
4760     case 0:
4761         ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
4762         ha->switch_cap = 0;
4763         ha->current_topology = ISP_CFG_NL;
4764         strcpy(connect_type, "(Loop)");
4765         break;
4766 
4767     case 1:
4768         ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
4769         ha->switch_cap = sw_cap;
4770         ha->current_topology = ISP_CFG_FL;
4771         strcpy(connect_type, "(FL_Port)");
4772         break;
4773 
4774     case 2:
4775         ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
4776         ha->switch_cap = 0;
4777         ha->operating_mode = P2P;
4778         ha->current_topology = ISP_CFG_N;
4779         strcpy(connect_type, "(N_Port-to-N_Port)");
4780         break;
4781 
4782     case 3:
4783         ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
4784         ha->switch_cap = sw_cap;
4785         ha->operating_mode = P2P;
4786         ha->current_topology = ISP_CFG_F;
4787         strcpy(connect_type, "(F_Port)");
4788         break;
4789 
4790     default:
4791         ql_dbg(ql_dbg_disc, vha, 0x200f,
4792             "HBA in unknown topology %x, using NL.\n", topo);
4793         ha->switch_cap = 0;
4794         ha->current_topology = ISP_CFG_NL;
4795         strcpy(connect_type, "(Loop)");
4796         break;
4797     }
4798 
4799     /* Save Host port and loop ID. */
4800     /* byte order - Big Endian */
4801     id.b.domain = domain;
4802     id.b.area = area;
4803     id.b.al_pa = al_pa;
4804     id.b.rsvd_1 = 0;
4805     spin_lock_irqsave(&ha->hardware_lock, flags);
4806     if (vha->hw->flags.edif_enabled) {
4807         if (topo != 2)
4808             qlt_update_host_map(vha, id);
4809     } else if (!(topo == 2 && ha->flags.n2n_bigger))
4810         qlt_update_host_map(vha, id);
4811     spin_unlock_irqrestore(&ha->hardware_lock, flags);
4812 
4813     if (!vha->flags.init_done)
4814         ql_log(ql_log_info, vha, 0x2010,
4815             "Topology - %s, Host Loop address 0x%x.\n",
4816             connect_type, vha->loop_id);
4817 
4818     return(rval);
4819 }
4820 
4821 inline void
4822 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
4823                const char *def)
4824 {
4825     char *st, *en;
4826     uint16_t index;
4827     uint64_t zero[2] = { 0 };
4828     struct qla_hw_data *ha = vha->hw;
4829     int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
4830         !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
4831 
4832     if (len > sizeof(zero))
4833         len = sizeof(zero);
4834     if (memcmp(model, &zero, len) != 0) {
4835         memcpy(ha->model_number, model, len);
4836         st = en = ha->model_number;
4837         en += len - 1;
4838         while (en > st) {
4839             if (*en != 0x20 && *en != 0x00)
4840                 break;
4841             *en-- = '\0';
4842         }
4843 
4844         index = (ha->pdev->subsystem_device & 0xff);
4845         if (use_tbl &&
4846             ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4847             index < QLA_MODEL_NAMES)
4848             strlcpy(ha->model_desc,
4849                 qla2x00_model_name[index * 2 + 1],
4850                 sizeof(ha->model_desc));
4851     } else {
4852         index = (ha->pdev->subsystem_device & 0xff);
4853         if (use_tbl &&
4854             ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4855             index < QLA_MODEL_NAMES) {
4856             strlcpy(ha->model_number,
4857                 qla2x00_model_name[index * 2],
4858                 sizeof(ha->model_number));
4859             strlcpy(ha->model_desc,
4860                 qla2x00_model_name[index * 2 + 1],
4861                 sizeof(ha->model_desc));
4862         } else {
4863             strlcpy(ha->model_number, def,
4864                 sizeof(ha->model_number));
4865         }
4866     }
4867     if (IS_FWI2_CAPABLE(ha))
4868         qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
4869             sizeof(ha->model_desc));
4870 }
4871 
4872 /* On sparc systems, obtain port and node WWN from firmware
4873  * properties.
4874  */
4875 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
4876 {
4877 #ifdef CONFIG_SPARC
4878     struct qla_hw_data *ha = vha->hw;
4879     struct pci_dev *pdev = ha->pdev;
4880     struct device_node *dp = pci_device_to_OF_node(pdev);
4881     const u8 *val;
4882     int len;
4883 
4884     val = of_get_property(dp, "port-wwn", &len);
4885     if (val && len >= WWN_SIZE)
4886         memcpy(nv->port_name, val, WWN_SIZE);
4887 
4888     val = of_get_property(dp, "node-wwn", &len);
4889     if (val && len >= WWN_SIZE)
4890         memcpy(nv->node_name, val, WWN_SIZE);
4891 #endif
4892 }
4893 
4894 /*
4895 * NVRAM configuration for ISP 2xxx
4896 *
4897 * Input:
4898 *      ha                = adapter block pointer.
4899 *
4900 * Output:
4901 *      initialization control block in response_ring
4902 *      host adapters parameters in host adapter block
4903 *
4904 * Returns:
4905 *      0 = success.
4906 */
4907 int
4908 qla2x00_nvram_config(scsi_qla_host_t *vha)
4909 {
4910     int             rval;
4911     uint8_t         chksum = 0;
4912     uint16_t        cnt;
4913     uint8_t         *dptr1, *dptr2;
4914     struct qla_hw_data *ha = vha->hw;
4915     init_cb_t       *icb = ha->init_cb;
4916     nvram_t         *nv = ha->nvram;
4917     uint8_t         *ptr = ha->nvram;
4918     struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4919 
4920     rval = QLA_SUCCESS;
4921 
4922     /* Determine NVRAM starting address. */
4923     ha->nvram_size = sizeof(*nv);
4924     ha->nvram_base = 0;
4925     if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
4926         if ((rd_reg_word(&reg->ctrl_status) >> 14) == 1)
4927             ha->nvram_base = 0x80;
4928 
4929     /* Get NVRAM data and calculate checksum. */
4930     ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
4931     for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
4932         chksum += *ptr++;
4933 
4934     ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
4935         "Contents of NVRAM.\n");
4936     ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
4937         nv, ha->nvram_size);
4938 
4939     /* Bad NVRAM data, set defaults parameters. */
4940     if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
4941         nv->nvram_version < 1) {
4942         /* Reset NVRAM data. */
4943         ql_log(ql_log_warn, vha, 0x0064,
4944             "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n",
4945             chksum, nv->id, nv->nvram_version);
4946         ql_log(ql_log_warn, vha, 0x0065,
4947             "Falling back to "
4948             "functioning (yet invalid -- WWPN) defaults.\n");
4949 
4950         /*
4951          * Set default initialization control block.
4952          */
4953         memset(nv, 0, ha->nvram_size);
4954         nv->parameter_block_version = ICB_VERSION;
4955 
4956         if (IS_QLA23XX(ha)) {
4957             nv->firmware_options[0] = BIT_2 | BIT_1;
4958             nv->firmware_options[1] = BIT_7 | BIT_5;
4959             nv->add_firmware_options[0] = BIT_5;
4960             nv->add_firmware_options[1] = BIT_5 | BIT_4;
4961             nv->frame_payload_size = cpu_to_le16(2048);
4962             nv->special_options[1] = BIT_7;
4963         } else if (IS_QLA2200(ha)) {
4964             nv->firmware_options[0] = BIT_2 | BIT_1;
4965             nv->firmware_options[1] = BIT_7 | BIT_5;
4966             nv->add_firmware_options[0] = BIT_5;
4967             nv->add_firmware_options[1] = BIT_5 | BIT_4;
4968             nv->frame_payload_size = cpu_to_le16(1024);
4969         } else if (IS_QLA2100(ha)) {
4970             nv->firmware_options[0] = BIT_3 | BIT_1;
4971             nv->firmware_options[1] = BIT_5;
4972             nv->frame_payload_size = cpu_to_le16(1024);
4973         }
4974 
4975         nv->max_iocb_allocation = cpu_to_le16(256);
4976         nv->execution_throttle = cpu_to_le16(16);
4977         nv->retry_count = 8;
4978         nv->retry_delay = 1;
4979 
4980         nv->port_name[0] = 33;
4981         nv->port_name[3] = 224;
4982         nv->port_name[4] = 139;
4983 
4984         qla2xxx_nvram_wwn_from_ofw(vha, nv);
4985 
4986         nv->login_timeout = 4;
4987 
4988         /*
4989          * Set default host adapter parameters
4990          */
4991         nv->host_p[1] = BIT_2;
4992         nv->reset_delay = 5;
4993         nv->port_down_retry_count = 8;
4994         nv->max_luns_per_target = cpu_to_le16(8);
4995         nv->link_down_timeout = 60;
4996 
4997         rval = 1;
4998     }
4999 
5000     /* Reset Initialization control block */
5001     memset(icb, 0, ha->init_cb_size);
5002 
5003     /*
5004      * Setup driver NVRAM options.
5005      */
5006     nv->firmware_options[0] |= (BIT_6 | BIT_1);
5007     nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
5008     nv->firmware_options[1] |= (BIT_5 | BIT_0);
5009     nv->firmware_options[1] &= ~BIT_4;
5010 
5011     if (IS_QLA23XX(ha)) {
5012         nv->firmware_options[0] |= BIT_2;
5013         nv->firmware_options[0] &= ~BIT_3;
5014         nv->special_options[0] &= ~BIT_6;
5015         nv->add_firmware_options[1] |= BIT_5 | BIT_4;
5016 
5017         if (IS_QLA2300(ha)) {
5018             if (ha->fb_rev == FPM_2310) {
5019                 strcpy(ha->model_number, "QLA2310");
5020             } else {
5021                 strcpy(ha->model_number, "QLA2300");
5022             }
5023         } else {
5024             qla2x00_set_model_info(vha, nv->model_number,
5025                 sizeof(nv->model_number), "QLA23xx");
5026         }
5027     } else if (IS_QLA2200(ha)) {
5028         nv->firmware_options[0] |= BIT_2;
5029         /*
5030          * 'Point-to-point preferred, else loop' is not a safe
5031          * connection mode setting.
5032          */
5033         if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
5034             (BIT_5 | BIT_4)) {
5035             /* Force 'loop preferred, else point-to-point'. */
5036             nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
5037             nv->add_firmware_options[0] |= BIT_5;
5038         }
5039         strcpy(ha->model_number, "QLA22xx");
5040     } else /*if (IS_QLA2100(ha))*/ {
5041         strcpy(ha->model_number, "QLA2100");
5042     }
5043 
5044     /*
5045      * Copy over NVRAM RISC parameter block to initialization control block.
5046      */
5047     dptr1 = (uint8_t *)icb;
5048     dptr2 = (uint8_t *)&nv->parameter_block_version;
5049     cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
5050     while (cnt--)
5051         *dptr1++ = *dptr2++;
5052 
5053     /* Copy 2nd half. */
5054     dptr1 = (uint8_t *)icb->add_firmware_options;
5055     cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
5056     while (cnt--)
5057         *dptr1++ = *dptr2++;
5058     ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
5059     /* Use alternate WWN? */
5060     if (nv->host_p[1] & BIT_7) {
5061         memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5062         memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5063     }
5064 
5065     /* Prepare nodename */
5066     if ((icb->firmware_options[1] & BIT_6) == 0) {
5067         /*
5068          * Firmware will apply the following mask if the nodename was
5069          * not provided.
5070          */
5071         memcpy(icb->node_name, icb->port_name, WWN_SIZE);
5072         icb->node_name[0] &= 0xF0;
5073     }
5074 
5075     /*
5076      * Set host adapter parameters.
5077      */
5078 
5079     /*
5080      * BIT_7 in the host-parameters section allows for modification to
5081      * internal driver logging.
5082      */
5083     if (nv->host_p[0] & BIT_7)
5084         ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
5085     ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
5086     /* Always load RISC code on non ISP2[12]00 chips. */
5087     if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
5088         ha->flags.disable_risc_code_load = 0;
5089     ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
5090     ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
5091     ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
5092     ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
5093     ha->flags.disable_serdes = 0;
5094 
5095     ha->operating_mode =
5096         (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
5097 
5098     memcpy(ha->fw_seriallink_options, nv->seriallink_options,
5099         sizeof(ha->fw_seriallink_options));
5100 
5101     /* save HBA serial number */
5102     ha->serial0 = icb->port_name[5];
5103     ha->serial1 = icb->port_name[6];
5104     ha->serial2 = icb->port_name[7];
5105     memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5106     memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5107 
5108     icb->execution_throttle = cpu_to_le16(0xFFFF);
5109 
5110     ha->retry_count = nv->retry_count;
5111 
5112     /* Set minimum login_timeout to 4 seconds. */
5113     if (nv->login_timeout != ql2xlogintimeout)
5114         nv->login_timeout = ql2xlogintimeout;
5115     if (nv->login_timeout < 4)
5116         nv->login_timeout = 4;
5117     ha->login_timeout = nv->login_timeout;
5118 
5119     /* Set minimum RATOV to 100 tenths of a second. */
5120     ha->r_a_tov = 100;
5121 
5122     ha->loop_reset_delay = nv->reset_delay;
5123 
5124     /* Link Down Timeout = 0:
5125      *
5126      *  When Port Down timer expires we will start returning
5127      *  I/O's to OS with "DID_NO_CONNECT".
5128      *
5129      * Link Down Timeout != 0:
5130      *
5131      *   The driver waits for the link to come up after link down
5132      *   before returning I/Os to OS with "DID_NO_CONNECT".
5133      */
5134     if (nv->link_down_timeout == 0) {
5135         ha->loop_down_abort_time =
5136             (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
5137     } else {
5138         ha->link_down_timeout =  nv->link_down_timeout;
5139         ha->loop_down_abort_time =
5140             (LOOP_DOWN_TIME - ha->link_down_timeout);
5141     }
5142 
5143     /*
5144      * Need enough time to try and get the port back.
5145      */
5146     ha->port_down_retry_count = nv->port_down_retry_count;
5147     if (qlport_down_retry)
5148         ha->port_down_retry_count = qlport_down_retry;
5149     /* Set login_retry_count */
5150     ha->login_retry_count  = nv->retry_count;
5151     if (ha->port_down_retry_count == nv->port_down_retry_count &&
5152         ha->port_down_retry_count > 3)
5153         ha->login_retry_count = ha->port_down_retry_count;
5154     else if (ha->port_down_retry_count > (int)ha->login_retry_count)
5155         ha->login_retry_count = ha->port_down_retry_count;
5156     if (ql2xloginretrycount)
5157         ha->login_retry_count = ql2xloginretrycount;
5158 
5159     icb->lun_enables = cpu_to_le16(0);
5160     icb->command_resource_count = 0;
5161     icb->immediate_notify_resource_count = 0;
5162     icb->timeout = cpu_to_le16(0);
5163 
5164     if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5165         /* Enable RIO */
5166         icb->firmware_options[0] &= ~BIT_3;
5167         icb->add_firmware_options[0] &=
5168             ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
5169         icb->add_firmware_options[0] |= BIT_2;
5170         icb->response_accumulation_timer = 3;
5171         icb->interrupt_delay_timer = 5;
5172 
5173         vha->flags.process_response_queue = 1;
5174     } else {
5175         /* Enable ZIO. */
5176         if (!vha->flags.init_done) {
5177             ha->zio_mode = icb->add_firmware_options[0] &
5178                 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
5179             ha->zio_timer = icb->interrupt_delay_timer ?
5180                 icb->interrupt_delay_timer : 2;
5181         }
5182         icb->add_firmware_options[0] &=
5183             ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
5184         vha->flags.process_response_queue = 0;
5185         if (ha->zio_mode != QLA_ZIO_DISABLED) {
5186             ha->zio_mode = QLA_ZIO_MODE_6;
5187 
5188             ql_log(ql_log_info, vha, 0x0068,
5189                 "ZIO mode %d enabled; timer delay (%d us).\n",
5190                 ha->zio_mode, ha->zio_timer * 100);
5191 
5192             icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
5193             icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
5194             vha->flags.process_response_queue = 1;
5195         }
5196     }
5197 
5198     if (rval) {
5199         ql_log(ql_log_warn, vha, 0x0069,
5200             "NVRAM configuration failed.\n");
5201     }
5202     return (rval);
5203 }
5204 
5205 static void
5206 qla2x00_rport_del(void *data)
5207 {
5208     fc_port_t *fcport = data;
5209     struct fc_rport *rport;
5210     unsigned long flags;
5211 
5212     spin_lock_irqsave(fcport->vha->host->host_lock, flags);
5213     rport = fcport->drport ? fcport->drport : fcport->rport;
5214     fcport->drport = NULL;
5215     spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
5216     if (rport) {
5217         ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
5218             "%s %8phN. rport %p roles %x\n",
5219             __func__, fcport->port_name, rport,
5220             rport->roles);
5221 
5222         fc_remote_port_delete(rport);
5223     }
5224 }
5225 
5226 void qla2x00_set_fcport_state(fc_port_t *fcport, int state)
5227 {
5228     int old_state;
5229 
5230     old_state = atomic_read(&fcport->state);
5231     atomic_set(&fcport->state, state);
5232 
5233     /* Don't print state transitions during initial allocation of fcport */
5234     if (old_state && old_state != state) {
5235         ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
5236                "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n",
5237                fcport->port_name, port_state_str[old_state],
5238                port_state_str[state], fcport->d_id.b.domain,
5239                fcport->d_id.b.area, fcport->d_id.b.al_pa);
5240     }
5241 }
5242 
5243 /**
5244  * qla2x00_alloc_fcport() - Allocate a generic fcport.
5245  * @vha: HA context
5246  * @flags: allocation flags
5247  *
5248  * Returns a pointer to the allocated fcport, or NULL, if none available.
5249  */
5250 fc_port_t *
5251 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
5252 {
5253     fc_port_t *fcport;
5254 
5255     fcport = kzalloc(sizeof(fc_port_t), flags);
5256     if (!fcport)
5257         return NULL;
5258 
5259     fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
5260         sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
5261         flags);
5262     if (!fcport->ct_desc.ct_sns) {
5263         ql_log(ql_log_warn, vha, 0xd049,
5264             "Failed to allocate ct_sns request.\n");
5265         kfree(fcport);
5266         return NULL;
5267     }
5268 
5269     /* Setup fcport template structure. */
5270     fcport->vha = vha;
5271     fcport->port_type = FCT_UNKNOWN;
5272     fcport->loop_id = FC_NO_LOOP_ID;
5273     qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
5274     fcport->supported_classes = FC_COS_UNSPECIFIED;
5275     fcport->fp_speed = PORT_SPEED_UNKNOWN;
5276 
5277     fcport->disc_state = DSC_DELETED;
5278     fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
5279     fcport->deleted = QLA_SESS_DELETED;
5280     fcport->login_retry = vha->hw->login_retry_count;
5281     fcport->chip_reset = vha->hw->base_qpair->chip_reset;
5282     fcport->logout_on_delete = 1;
5283     fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
5284     fcport->tgt_short_link_down_cnt = 0;
5285     fcport->dev_loss_tmo = 0;
5286 
5287     if (!fcport->ct_desc.ct_sns) {
5288         ql_log(ql_log_warn, vha, 0xd049,
5289             "Failed to allocate ct_sns request.\n");
5290         kfree(fcport);
5291         return NULL;
5292     }
5293 
5294     INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
5295     INIT_WORK(&fcport->free_work, qlt_free_session_done);
5296     INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
5297     INIT_LIST_HEAD(&fcport->gnl_entry);
5298     INIT_LIST_HEAD(&fcport->list);
5299 
5300     INIT_LIST_HEAD(&fcport->sess_cmd_list);
5301     spin_lock_init(&fcport->sess_cmd_lock);
5302 
5303     spin_lock_init(&fcport->edif.sa_list_lock);
5304     INIT_LIST_HEAD(&fcport->edif.tx_sa_list);
5305     INIT_LIST_HEAD(&fcport->edif.rx_sa_list);
5306 
5307     spin_lock_init(&fcport->edif.indx_list_lock);
5308     INIT_LIST_HEAD(&fcport->edif.edif_indx_list);
5309 
5310     return fcport;
5311 }
5312 
5313 void
5314 qla2x00_free_fcport(fc_port_t *fcport)
5315 {
5316     if (fcport->ct_desc.ct_sns) {
5317         dma_free_coherent(&fcport->vha->hw->pdev->dev,
5318             sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
5319             fcport->ct_desc.ct_sns_dma);
5320 
5321         fcport->ct_desc.ct_sns = NULL;
5322     }
5323 
5324     qla_edif_flush_sa_ctl_lists(fcport);
5325     list_del(&fcport->list);
5326     qla2x00_clear_loop_id(fcport);
5327 
5328     qla_edif_list_del(fcport);
5329 
5330     kfree(fcport);
5331 }
5332 
5333 static void qla_get_login_template(scsi_qla_host_t *vha)
5334 {
5335     struct qla_hw_data *ha = vha->hw;
5336     int rval;
5337     u32 *bp, sz;
5338     __be32 *q;
5339 
5340     memset(ha->init_cb, 0, ha->init_cb_size);
5341     sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
5342     rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
5343                         ha->init_cb, sz);
5344     if (rval != QLA_SUCCESS) {
5345         ql_dbg(ql_dbg_init, vha, 0x00d1,
5346                "PLOGI ELS param read fail.\n");
5347         return;
5348     }
5349     q = (__be32 *)&ha->plogi_els_payld.fl_csp;
5350 
5351     bp = (uint32_t *)ha->init_cb;
5352     cpu_to_be32_array(q, bp, sz / 4);
5353     ha->flags.plogi_template_valid = 1;
5354 }
5355 
5356 /*
5357  * qla2x00_configure_loop
5358  *      Updates Fibre Channel Device Database with what is actually on loop.
5359  *
5360  * Input:
5361  *      ha                = adapter block pointer.
5362  *
5363  * Returns:
5364  *      0 = success.
5365  *      1 = error.
5366  *      2 = database was full and device was not configured.
5367  */
5368 static int
5369 qla2x00_configure_loop(scsi_qla_host_t *vha)
5370 {
5371     int  rval;
5372     unsigned long flags, save_flags;
5373     struct qla_hw_data *ha = vha->hw;
5374 
5375     rval = QLA_SUCCESS;
5376 
5377     /* Get Initiator ID */
5378     if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
5379         rval = qla2x00_configure_hba(vha);
5380         if (rval != QLA_SUCCESS) {
5381             ql_dbg(ql_dbg_disc, vha, 0x2013,
5382                 "Unable to configure HBA.\n");
5383             return (rval);
5384         }
5385     }
5386 
5387     save_flags = flags = vha->dpc_flags;
5388     ql_dbg(ql_dbg_disc, vha, 0x2014,
5389         "Configure loop -- dpc flags = 0x%lx.\n", flags);
5390 
5391     /*
5392      * If we have both an RSCN and PORT UPDATE pending then handle them
5393      * both at the same time.
5394      */
5395     clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5396     clear_bit(RSCN_UPDATE, &vha->dpc_flags);
5397 
5398     qla2x00_get_data_rate(vha);
5399     qla_get_login_template(vha);
5400 
5401     /* Determine what we need to do */
5402     if ((ha->current_topology == ISP_CFG_FL ||
5403         ha->current_topology == ISP_CFG_F) &&
5404         (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
5405 
5406         set_bit(RSCN_UPDATE, &flags);
5407         clear_bit(LOCAL_LOOP_UPDATE, &flags);
5408 
5409     } else if (ha->current_topology == ISP_CFG_NL ||
5410            ha->current_topology == ISP_CFG_N) {
5411         clear_bit(RSCN_UPDATE, &flags);
5412         set_bit(LOCAL_LOOP_UPDATE, &flags);
5413     } else if (!vha->flags.online ||
5414         (test_bit(ABORT_ISP_ACTIVE, &flags))) {
5415         set_bit(RSCN_UPDATE, &flags);
5416         set_bit(LOCAL_LOOP_UPDATE, &flags);
5417     }
5418 
5419     if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
5420         if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5421             ql_dbg(ql_dbg_disc, vha, 0x2015,
5422                 "Loop resync needed, failing.\n");
5423             rval = QLA_FUNCTION_FAILED;
5424         } else
5425             rval = qla2x00_configure_local_loop(vha);
5426     }
5427 
5428     if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
5429         if (LOOP_TRANSITION(vha)) {
5430             ql_dbg(ql_dbg_disc, vha, 0x2099,
5431                 "Needs RSCN update and loop transition.\n");
5432             rval = QLA_FUNCTION_FAILED;
5433         }
5434         else
5435             rval = qla2x00_configure_fabric(vha);
5436     }
5437 
5438     if (rval == QLA_SUCCESS) {
5439         if (atomic_read(&vha->loop_down_timer) ||
5440             test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5441             rval = QLA_FUNCTION_FAILED;
5442         } else {
5443             atomic_set(&vha->loop_state, LOOP_READY);
5444             ql_dbg(ql_dbg_disc, vha, 0x2069,
5445                 "LOOP READY.\n");
5446             ha->flags.fw_init_done = 1;
5447 
5448             /*
5449              * use link up to wake up app to get ready for
5450              * authentication.
5451              */
5452             if (ha->flags.edif_enabled && DBELL_INACTIVE(vha))
5453                 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP,
5454                               ha->link_data_rate);
5455 
5456             /*
5457              * Process any ATIO queue entries that came in
5458              * while we weren't online.
5459              */
5460             if (qla_tgt_mode_enabled(vha) ||
5461                 qla_dual_mode_enabled(vha)) {
5462                 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
5463                 qlt_24xx_process_atio_queue(vha, 0);
5464                 spin_unlock_irqrestore(&ha->tgt.atio_lock,
5465                     flags);
5466             }
5467         }
5468     }
5469 
5470     if (rval) {
5471         ql_dbg(ql_dbg_disc, vha, 0x206a,
5472             "%s *** FAILED ***.\n", __func__);
5473     } else {
5474         ql_dbg(ql_dbg_disc, vha, 0x206b,
5475             "%s: exiting normally. local port wwpn %8phN id %06x)\n",
5476             __func__, vha->port_name, vha->d_id.b24);
5477     }
5478 
5479     /* Restore state if a resync event occurred during processing */
5480     if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
5481         if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
5482             set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5483         if (test_bit(RSCN_UPDATE, &save_flags)) {
5484             set_bit(RSCN_UPDATE, &vha->dpc_flags);
5485         }
5486     }
5487 
5488     return (rval);
5489 }
5490 
5491 static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
5492 {
5493     unsigned long flags;
5494     fc_port_t *fcport;
5495 
5496     ql_dbg(ql_dbg_disc, vha, 0x206a, "%s %d.\n", __func__, __LINE__);
5497 
5498     if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags))
5499         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5500 
5501     list_for_each_entry(fcport, &vha->vp_fcports, list) {
5502         if (fcport->n2n_flag) {
5503             qla24xx_fcport_handle_login(vha, fcport);
5504             return QLA_SUCCESS;
5505         }
5506     }
5507 
5508     spin_lock_irqsave(&vha->work_lock, flags);
5509     vha->scan.scan_retry++;
5510     spin_unlock_irqrestore(&vha->work_lock, flags);
5511 
5512     if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5513         set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5514         set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5515     }
5516     return QLA_FUNCTION_FAILED;
5517 }
5518 
5519 static void
5520 qla_reinitialize_link(scsi_qla_host_t *vha)
5521 {
5522     int rval;
5523 
5524     atomic_set(&vha->loop_state, LOOP_DOWN);
5525     atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
5526     rval = qla2x00_full_login_lip(vha);
5527     if (rval == QLA_SUCCESS) {
5528         ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n");
5529     } else {
5530         ql_dbg(ql_dbg_disc, vha, 0xd051,
5531             "Link reinitialization failed (%d)\n", rval);
5532     }
5533 }
5534 
5535 /*
5536  * qla2x00_configure_local_loop
5537  *  Updates Fibre Channel Device Database with local loop devices.
5538  *
5539  * Input:
5540  *  ha = adapter block pointer.
5541  *
5542  * Returns:
5543  *  0 = success.
5544  */
5545 static int
5546 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
5547 {
5548     int     rval, rval2;
5549     int     found_devs;
5550     int     found;
5551     fc_port_t   *fcport, *new_fcport;
5552     uint16_t    index;
5553     uint16_t    entries;
5554     struct gid_list_info *gid;
5555     uint16_t    loop_id;
5556     uint8_t     domain, area, al_pa;
5557     struct qla_hw_data *ha = vha->hw;
5558     unsigned long flags;
5559 
5560     /* Inititae N2N login. */
5561     if (N2N_TOPO(ha))
5562         return qla2x00_configure_n2n_loop(vha);
5563 
5564     found_devs = 0;
5565     new_fcport = NULL;
5566     entries = MAX_FIBRE_DEVICES_LOOP;
5567 
5568     /* Get list of logged in devices. */
5569     memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
5570     rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
5571         &entries);
5572     if (rval != QLA_SUCCESS)
5573         goto err;
5574 
5575     ql_dbg(ql_dbg_disc, vha, 0x2011,
5576         "Entries in ID list (%d).\n", entries);
5577     ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
5578         ha->gid_list, entries * sizeof(*ha->gid_list));
5579 
5580     if (entries == 0) {
5581         spin_lock_irqsave(&vha->work_lock, flags);
5582         vha->scan.scan_retry++;
5583         spin_unlock_irqrestore(&vha->work_lock, flags);
5584 
5585         if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5586             u8 loop_map_entries = 0;
5587             int rc;
5588 
5589             rc = qla2x00_get_fcal_position_map(vha, NULL,
5590                         &loop_map_entries);
5591             if (rc == QLA_SUCCESS && loop_map_entries > 1) {
5592                 /*
5593                  * There are devices that are still not logged
5594                  * in. Reinitialize to give them a chance.
5595                  */
5596                 qla_reinitialize_link(vha);
5597                 return QLA_FUNCTION_FAILED;
5598             }
5599             set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5600             set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5601         }
5602     } else {
5603         vha->scan.scan_retry = 0;
5604     }
5605 
5606     list_for_each_entry(fcport, &vha->vp_fcports, list) {
5607         fcport->scan_state = QLA_FCPORT_SCAN;
5608     }
5609 
5610     /* Allocate temporary fcport for any new fcports discovered. */
5611     new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5612     if (new_fcport == NULL) {
5613         ql_log(ql_log_warn, vha, 0x2012,
5614             "Memory allocation failed for fcport.\n");
5615         rval = QLA_MEMORY_ALLOC_FAILED;
5616         goto err;
5617     }
5618     new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5619 
5620     /* Add devices to port list. */
5621     gid = ha->gid_list;
5622     for (index = 0; index < entries; index++) {
5623         domain = gid->domain;
5624         area = gid->area;
5625         al_pa = gid->al_pa;
5626         if (IS_QLA2100(ha) || IS_QLA2200(ha))
5627             loop_id = gid->loop_id_2100;
5628         else
5629             loop_id = le16_to_cpu(gid->loop_id);
5630         gid = (void *)gid + ha->gid_list_info_size;
5631 
5632         /* Bypass reserved domain fields. */
5633         if ((domain & 0xf0) == 0xf0)
5634             continue;
5635 
5636         /* Bypass if not same domain and area of adapter. */
5637         if (area && domain && ((area != vha->d_id.b.area) ||
5638             (domain != vha->d_id.b.domain)) &&
5639             (ha->current_topology == ISP_CFG_NL))
5640             continue;
5641 
5642 
5643         /* Bypass invalid local loop ID. */
5644         if (loop_id > LAST_LOCAL_LOOP_ID)
5645             continue;
5646 
5647         memset(new_fcport->port_name, 0, WWN_SIZE);
5648 
5649         /* Fill in member data. */
5650         new_fcport->d_id.b.domain = domain;
5651         new_fcport->d_id.b.area = area;
5652         new_fcport->d_id.b.al_pa = al_pa;
5653         new_fcport->loop_id = loop_id;
5654         new_fcport->scan_state = QLA_FCPORT_FOUND;
5655 
5656         rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
5657         if (rval2 != QLA_SUCCESS) {
5658             ql_dbg(ql_dbg_disc, vha, 0x2097,
5659                 "Failed to retrieve fcport information "
5660                 "-- get_port_database=%x, loop_id=0x%04x.\n",
5661                 rval2, new_fcport->loop_id);
5662             /* Skip retry if N2N */
5663             if (ha->current_topology != ISP_CFG_N) {
5664                 ql_dbg(ql_dbg_disc, vha, 0x2105,
5665                     "Scheduling resync.\n");
5666                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5667                 continue;
5668             }
5669         }
5670 
5671         spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5672         /* Check for matching device in port list. */
5673         found = 0;
5674         fcport = NULL;
5675         list_for_each_entry(fcport, &vha->vp_fcports, list) {
5676             if (memcmp(new_fcport->port_name, fcport->port_name,
5677                 WWN_SIZE))
5678                 continue;
5679 
5680             fcport->flags &= ~FCF_FABRIC_DEVICE;
5681             fcport->loop_id = new_fcport->loop_id;
5682             fcport->port_type = new_fcport->port_type;
5683             fcport->d_id.b24 = new_fcport->d_id.b24;
5684             memcpy(fcport->node_name, new_fcport->node_name,
5685                 WWN_SIZE);
5686             fcport->scan_state = QLA_FCPORT_FOUND;
5687             if (fcport->login_retry == 0) {
5688                 fcport->login_retry = vha->hw->login_retry_count;
5689                 ql_dbg(ql_dbg_disc, vha, 0x2135,
5690                     "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
5691                     fcport->port_name, fcport->loop_id,
5692                     fcport->login_retry);
5693             }
5694             found++;
5695             break;
5696         }
5697 
5698         if (!found) {
5699             /* New device, add to fcports list. */
5700             list_add_tail(&new_fcport->list, &vha->vp_fcports);
5701 
5702             /* Allocate a new replacement fcport. */
5703             fcport = new_fcport;
5704 
5705             spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5706 
5707             new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5708 
5709             if (new_fcport == NULL) {
5710                 ql_log(ql_log_warn, vha, 0xd031,
5711                     "Failed to allocate memory for fcport.\n");
5712                 rval = QLA_MEMORY_ALLOC_FAILED;
5713                 goto err;
5714             }
5715             spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5716             new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5717         }
5718 
5719         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5720 
5721         /* Base iIDMA settings on HBA port speed. */
5722         fcport->fp_speed = ha->link_data_rate;
5723 
5724         found_devs++;
5725     }
5726 
5727     list_for_each_entry(fcport, &vha->vp_fcports, list) {
5728         if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5729             break;
5730 
5731         if (fcport->scan_state == QLA_FCPORT_SCAN) {
5732             if ((qla_dual_mode_enabled(vha) ||
5733                 qla_ini_mode_enabled(vha)) &&
5734                 atomic_read(&fcport->state) == FCS_ONLINE) {
5735                 qla2x00_mark_device_lost(vha, fcport,
5736                     ql2xplogiabsentdevice);
5737                 if (fcport->loop_id != FC_NO_LOOP_ID &&
5738                     (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5739                     fcport->port_type != FCT_INITIATOR &&
5740                     fcport->port_type != FCT_BROADCAST) {
5741                     ql_dbg(ql_dbg_disc, vha, 0x20f0,
5742                         "%s %d %8phC post del sess\n",
5743                         __func__, __LINE__,
5744                         fcport->port_name);
5745 
5746                     qlt_schedule_sess_for_deletion(fcport);
5747                     continue;
5748                 }
5749             }
5750         }
5751 
5752         if (fcport->scan_state == QLA_FCPORT_FOUND)
5753             qla24xx_fcport_handle_login(vha, fcport);
5754     }
5755 
5756     qla2x00_free_fcport(new_fcport);
5757 
5758     return rval;
5759 
5760 err:
5761     ql_dbg(ql_dbg_disc, vha, 0x2098,
5762            "Configure local loop error exit: rval=%x.\n", rval);
5763     return rval;
5764 }
5765 
5766 static void
5767 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5768 {
5769     int rval;
5770     uint16_t mb[MAILBOX_REGISTER_COUNT];
5771     struct qla_hw_data *ha = vha->hw;
5772 
5773     if (!IS_IIDMA_CAPABLE(ha))
5774         return;
5775 
5776     if (atomic_read(&fcport->state) != FCS_ONLINE)
5777         return;
5778 
5779     if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
5780         fcport->fp_speed > ha->link_data_rate ||
5781         !ha->flags.gpsc_supported)
5782         return;
5783 
5784     rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
5785         mb);
5786     if (rval != QLA_SUCCESS) {
5787         ql_dbg(ql_dbg_disc, vha, 0x2004,
5788             "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
5789             fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
5790     } else {
5791         ql_dbg(ql_dbg_disc, vha, 0x2005,
5792             "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
5793             qla2x00_get_link_speed_str(ha, fcport->fp_speed),
5794             fcport->fp_speed, fcport->port_name);
5795     }
5796 }
5797 
5798 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5799 {
5800     qla2x00_iidma_fcport(vha, fcport);
5801     qla24xx_update_fcport_fcp_prio(vha, fcport);
5802 }
5803 
5804 int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5805 {
5806     struct qla_work_evt *e;
5807 
5808     e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
5809     if (!e)
5810         return QLA_FUNCTION_FAILED;
5811 
5812     e->u.fcport.fcport = fcport;
5813     return qla2x00_post_work(vha, e);
5814 }
5815 
5816 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
5817 static void
5818 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
5819 {
5820     struct fc_rport_identifiers rport_ids;
5821     struct fc_rport *rport;
5822     unsigned long flags;
5823 
5824     if (atomic_read(&fcport->state) == FCS_ONLINE)
5825         return;
5826 
5827     rport_ids.node_name = wwn_to_u64(fcport->node_name);
5828     rport_ids.port_name = wwn_to_u64(fcport->port_name);
5829     rport_ids.port_id = fcport->d_id.b.domain << 16 |
5830         fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
5831     rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
5832     fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
5833     if (!rport) {
5834         ql_log(ql_log_warn, vha, 0x2006,
5835             "Unable to allocate fc remote port.\n");
5836         return;
5837     }
5838 
5839     spin_lock_irqsave(fcport->vha->host->host_lock, flags);
5840     *((fc_port_t **)rport->dd_data) = fcport;
5841     spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
5842     fcport->dev_loss_tmo = rport->dev_loss_tmo;
5843 
5844     rport->supported_classes = fcport->supported_classes;
5845 
5846     rport_ids.roles = FC_PORT_ROLE_UNKNOWN;
5847     if (fcport->port_type == FCT_INITIATOR)
5848         rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
5849     if (fcport->port_type == FCT_TARGET)
5850         rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
5851     if (fcport->port_type & FCT_NVME_INITIATOR)
5852         rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
5853     if (fcport->port_type & FCT_NVME_TARGET)
5854         rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
5855     if (fcport->port_type & FCT_NVME_DISCOVERY)
5856         rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
5857 
5858     fc_remote_port_rolechg(rport, rport_ids.roles);
5859 
5860     ql_dbg(ql_dbg_disc, vha, 0x20ee,
5861         "%s: %8phN. rport %ld:0:%d (%p) is %s mode\n",
5862         __func__, fcport->port_name, vha->host_no,
5863         rport->scsi_target_id, rport,
5864         (fcport->port_type == FCT_TARGET) ? "tgt" :
5865         ((fcport->port_type & FCT_NVME) ? "nvme" : "ini"));
5866 }
5867 
5868 /*
5869  * qla2x00_update_fcport
5870  *  Updates device on list.
5871  *
5872  * Input:
5873  *  ha = adapter block pointer.
5874  *  fcport = port structure pointer.
5875  *
5876  * Return:
5877  *  0  - Success
5878  *  BIT_0 - error
5879  *
5880  * Context:
5881  *  Kernel context.
5882  */
5883 void
5884 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5885 {
5886     if (IS_SW_RESV_ADDR(fcport->d_id))
5887         return;
5888 
5889     ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
5890         __func__, fcport->port_name);
5891 
5892     qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
5893     fcport->login_retry = vha->hw->login_retry_count;
5894     fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5895     fcport->deleted = 0;
5896     if (vha->hw->current_topology == ISP_CFG_NL)
5897         fcport->logout_on_delete = 0;
5898     else
5899         fcport->logout_on_delete = 1;
5900     fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
5901 
5902     if (fcport->tgt_link_down_time < fcport->dev_loss_tmo) {
5903         fcport->tgt_short_link_down_cnt++;
5904         fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
5905     }
5906 
5907     switch (vha->hw->current_topology) {
5908     case ISP_CFG_N:
5909     case ISP_CFG_NL:
5910         fcport->keep_nport_handle = 1;
5911         break;
5912     default:
5913         break;
5914     }
5915 
5916     qla2x00_iidma_fcport(vha, fcport);
5917 
5918     qla2x00_dfs_create_rport(vha, fcport);
5919 
5920     qla24xx_update_fcport_fcp_prio(vha, fcport);
5921 
5922     switch (vha->host->active_mode) {
5923     case MODE_INITIATOR:
5924         qla2x00_reg_remote_port(vha, fcport);
5925         break;
5926     case MODE_TARGET:
5927         if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5928             !vha->vha_tgt.qla_tgt->tgt_stopped)
5929             qlt_fc_port_added(vha, fcport);
5930         break;
5931     case MODE_DUAL:
5932         qla2x00_reg_remote_port(vha, fcport);
5933         if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5934             !vha->vha_tgt.qla_tgt->tgt_stopped)
5935             qlt_fc_port_added(vha, fcport);
5936         break;
5937     default:
5938         break;
5939     }
5940 
5941     if (NVME_TARGET(vha->hw, fcport))
5942         qla_nvme_register_remote(vha, fcport);
5943 
5944     qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5945 
5946     if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
5947         if (fcport->id_changed) {
5948             fcport->id_changed = 0;
5949             ql_dbg(ql_dbg_disc, vha, 0x20d7,
5950                 "%s %d %8phC post gfpnid fcp_cnt %d\n",
5951                 __func__, __LINE__, fcport->port_name,
5952                 vha->fcport_count);
5953             qla24xx_post_gfpnid_work(vha, fcport);
5954         } else {
5955             ql_dbg(ql_dbg_disc, vha, 0x20d7,
5956                 "%s %d %8phC post gpsc fcp_cnt %d\n",
5957                 __func__, __LINE__, fcport->port_name,
5958                 vha->fcport_count);
5959             qla24xx_post_gpsc_work(vha, fcport);
5960         }
5961     }
5962 
5963     qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
5964 }
5965 
5966 void qla_register_fcport_fn(struct work_struct *work)
5967 {
5968     fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
5969     u32 rscn_gen = fcport->rscn_gen;
5970     u16 data[2];
5971 
5972     if (IS_SW_RESV_ADDR(fcport->d_id))
5973         return;
5974 
5975     qla2x00_update_fcport(fcport->vha, fcport);
5976 
5977     ql_dbg(ql_dbg_disc, fcport->vha, 0x911e,
5978            "%s rscn gen %d/%d next DS %d\n", __func__,
5979            rscn_gen, fcport->rscn_gen, fcport->next_disc_state);
5980 
5981     if (rscn_gen != fcport->rscn_gen) {
5982         /* RSCN(s) came in while registration */
5983         switch (fcport->next_disc_state) {
5984         case DSC_DELETE_PEND:
5985             qlt_schedule_sess_for_deletion(fcport);
5986             break;
5987         case DSC_ADISC:
5988             data[0] = data[1] = 0;
5989             qla2x00_post_async_adisc_work(fcport->vha, fcport,
5990                 data);
5991             break;
5992         default:
5993             break;
5994         }
5995     }
5996 }
5997 
5998 /*
5999  * qla2x00_configure_fabric
6000  *      Setup SNS devices with loop ID's.
6001  *
6002  * Input:
6003  *      ha = adapter block pointer.
6004  *
6005  * Returns:
6006  *      0 = success.
6007  *      BIT_0 = error
6008  */
6009 static int
6010 qla2x00_configure_fabric(scsi_qla_host_t *vha)
6011 {
6012     int rval;
6013     fc_port_t   *fcport;
6014     uint16_t    mb[MAILBOX_REGISTER_COUNT];
6015     uint16_t    loop_id;
6016     LIST_HEAD(new_fcports);
6017     struct qla_hw_data *ha = vha->hw;
6018     int     discovery_gen;
6019 
6020     /* If FL port exists, then SNS is present */
6021     if (IS_FWI2_CAPABLE(ha))
6022         loop_id = NPH_F_PORT;
6023     else
6024         loop_id = SNS_FL_PORT;
6025     rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
6026     if (rval != QLA_SUCCESS) {
6027         ql_dbg(ql_dbg_disc, vha, 0x20a0,
6028             "MBX_GET_PORT_NAME failed, No FL Port.\n");
6029 
6030         vha->device_flags &= ~SWITCH_FOUND;
6031         return (QLA_SUCCESS);
6032     }
6033     vha->device_flags |= SWITCH_FOUND;
6034 
6035     rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_port_name, 0);
6036     if (rval != QLA_SUCCESS)
6037         ql_dbg(ql_dbg_disc, vha, 0x20ff,
6038             "Failed to get Fabric Port Name\n");
6039 
6040     if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6041         rval = qla2x00_send_change_request(vha, 0x3, 0);
6042         if (rval != QLA_SUCCESS)
6043             ql_log(ql_log_warn, vha, 0x121,
6044                 "Failed to enable receiving of RSCN requests: 0x%x.\n",
6045                 rval);
6046     }
6047 
6048     do {
6049         qla2x00_mgmt_svr_login(vha);
6050 
6051         /* Ensure we are logged into the SNS. */
6052         loop_id = NPH_SNS_LID(ha);
6053         rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
6054             0xfc, mb, BIT_1|BIT_0);
6055         if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
6056             ql_dbg(ql_dbg_disc, vha, 0x20a1,
6057                 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
6058                 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
6059             set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6060             return rval;
6061         }
6062 
6063         /* FDMI support. */
6064         if (ql2xfdmienable &&
6065             test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
6066             qla2x00_fdmi_register(vha);
6067 
6068         if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
6069             if (qla2x00_rft_id(vha)) {
6070                 /* EMPTY */
6071                 ql_dbg(ql_dbg_disc, vha, 0x20a2,
6072                     "Register FC-4 TYPE failed.\n");
6073                 if (test_bit(LOOP_RESYNC_NEEDED,
6074                     &vha->dpc_flags))
6075                     break;
6076             }
6077             if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
6078                 /* EMPTY */
6079                 ql_dbg(ql_dbg_disc, vha, 0x209a,
6080                     "Register FC-4 Features failed.\n");
6081                 if (test_bit(LOOP_RESYNC_NEEDED,
6082                     &vha->dpc_flags))
6083                     break;
6084             }
6085             if (vha->flags.nvme_enabled) {
6086                 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
6087                     ql_dbg(ql_dbg_disc, vha, 0x2049,
6088                         "Register NVME FC Type Features failed.\n");
6089                 }
6090             }
6091             if (qla2x00_rnn_id(vha)) {
6092                 /* EMPTY */
6093                 ql_dbg(ql_dbg_disc, vha, 0x2104,
6094                     "Register Node Name failed.\n");
6095                 if (test_bit(LOOP_RESYNC_NEEDED,
6096                     &vha->dpc_flags))
6097                     break;
6098             } else if (qla2x00_rsnn_nn(vha)) {
6099                 /* EMPTY */
6100                 ql_dbg(ql_dbg_disc, vha, 0x209b,
6101                     "Register Symbolic Node Name failed.\n");
6102                 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6103                     break;
6104             }
6105         }
6106 
6107 
6108         /* Mark the time right before querying FW for connected ports.
6109          * This process is long, asynchronous and by the time it's done,
6110          * collected information might not be accurate anymore. E.g.
6111          * disconnected port might have re-connected and a brand new
6112          * session has been created. In this case session's generation
6113          * will be newer than discovery_gen. */
6114         qlt_do_generation_tick(vha, &discovery_gen);
6115 
6116         if (USE_ASYNC_SCAN(ha)) {
6117             rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
6118                 NULL);
6119             if (rval)
6120                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6121         } else  {
6122             list_for_each_entry(fcport, &vha->vp_fcports, list)
6123                 fcport->scan_state = QLA_FCPORT_SCAN;
6124 
6125             rval = qla2x00_find_all_fabric_devs(vha);
6126         }
6127         if (rval != QLA_SUCCESS)
6128             break;
6129     } while (0);
6130 
6131     if (!vha->nvme_local_port && vha->flags.nvme_enabled)
6132         qla_nvme_register_hba(vha);
6133 
6134     if (rval)
6135         ql_dbg(ql_dbg_disc, vha, 0x2068,
6136             "Configure fabric error exit rval=%d.\n", rval);
6137 
6138     return (rval);
6139 }
6140 
6141 /*
6142  * qla2x00_find_all_fabric_devs
6143  *
6144  * Input:
6145  *  ha = adapter block pointer.
6146  *  dev = database device entry pointer.
6147  *
6148  * Returns:
6149  *  0 = success.
6150  *
6151  * Context:
6152  *  Kernel context.
6153  */
6154 static int
6155 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
6156 {
6157     int     rval;
6158     uint16_t    loop_id;
6159     fc_port_t   *fcport, *new_fcport;
6160     int     found;
6161 
6162     sw_info_t   *swl;
6163     int     swl_idx;
6164     int     first_dev, last_dev;
6165     port_id_t   wrap = {}, nxt_d_id;
6166     struct qla_hw_data *ha = vha->hw;
6167     struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
6168     unsigned long flags;
6169 
6170     rval = QLA_SUCCESS;
6171 
6172     /* Try GID_PT to get device list, else GAN. */
6173     if (!ha->swl)
6174         ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
6175             GFP_KERNEL);
6176     swl = ha->swl;
6177     if (!swl) {
6178         /*EMPTY*/
6179         ql_dbg(ql_dbg_disc, vha, 0x209c,
6180             "GID_PT allocations failed, fallback on GA_NXT.\n");
6181     } else {
6182         memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
6183         if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
6184             swl = NULL;
6185             if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6186                 return rval;
6187         } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
6188             swl = NULL;
6189             if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6190                 return rval;
6191         } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
6192             swl = NULL;
6193             if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6194                 return rval;
6195         } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
6196             swl = NULL;
6197             if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6198                 return rval;
6199         }
6200 
6201         /* If other queries succeeded probe for FC-4 type */
6202         if (swl) {
6203             qla2x00_gff_id(vha, swl);
6204             if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6205                 return rval;
6206         }
6207     }
6208     swl_idx = 0;
6209 
6210     /* Allocate temporary fcport for any new fcports discovered. */
6211     new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6212     if (new_fcport == NULL) {
6213         ql_log(ql_log_warn, vha, 0x209d,
6214             "Failed to allocate memory for fcport.\n");
6215         return (QLA_MEMORY_ALLOC_FAILED);
6216     }
6217     new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
6218     /* Set start port ID scan at adapter ID. */
6219     first_dev = 1;
6220     last_dev = 0;
6221 
6222     /* Starting free loop ID. */
6223     loop_id = ha->min_external_loopid;
6224     for (; loop_id <= ha->max_loop_id; loop_id++) {
6225         if (qla2x00_is_reserved_id(vha, loop_id))
6226             continue;
6227 
6228         if (ha->current_topology == ISP_CFG_FL &&
6229             (atomic_read(&vha->loop_down_timer) ||
6230              LOOP_TRANSITION(vha))) {
6231             atomic_set(&vha->loop_down_timer, 0);
6232             set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6233             set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
6234             break;
6235         }
6236 
6237         if (swl != NULL) {
6238             if (last_dev) {
6239                 wrap.b24 = new_fcport->d_id.b24;
6240             } else {
6241                 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
6242                 memcpy(new_fcport->node_name,
6243                     swl[swl_idx].node_name, WWN_SIZE);
6244                 memcpy(new_fcport->port_name,
6245                     swl[swl_idx].port_name, WWN_SIZE);
6246                 memcpy(new_fcport->fabric_port_name,
6247                     swl[swl_idx].fabric_port_name, WWN_SIZE);
6248                 new_fcport->fp_speed = swl[swl_idx].fp_speed;
6249                 new_fcport->fc4_type = swl[swl_idx].fc4_type;
6250 
6251                 new_fcport->nvme_flag = 0;
6252                 if (vha->flags.nvme_enabled &&
6253                     swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) {
6254                     ql_log(ql_log_info, vha, 0x2131,
6255                         "FOUND: NVME port %8phC as FC Type 28h\n",
6256                         new_fcport->port_name);
6257                 }
6258 
6259                 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
6260                     last_dev = 1;
6261                 }
6262                 swl_idx++;
6263             }
6264         } else {
6265             /* Send GA_NXT to the switch */
6266             rval = qla2x00_ga_nxt(vha, new_fcport);
6267             if (rval != QLA_SUCCESS) {
6268                 ql_log(ql_log_warn, vha, 0x209e,
6269                     "SNS scan failed -- assuming "
6270                     "zero-entry result.\n");
6271                 rval = QLA_SUCCESS;
6272                 break;
6273             }
6274         }
6275 
6276         /* If wrap on switch device list, exit. */
6277         if (first_dev) {
6278             wrap.b24 = new_fcport->d_id.b24;
6279             first_dev = 0;
6280         } else if (new_fcport->d_id.b24 == wrap.b24) {
6281             ql_dbg(ql_dbg_disc, vha, 0x209f,
6282                 "Device wrap (%02x%02x%02x).\n",
6283                 new_fcport->d_id.b.domain,
6284                 new_fcport->d_id.b.area,
6285                 new_fcport->d_id.b.al_pa);
6286             break;
6287         }
6288 
6289         /* Bypass if same physical adapter. */
6290         if (new_fcport->d_id.b24 == base_vha->d_id.b24)
6291             continue;
6292 
6293         /* Bypass virtual ports of the same host. */
6294         if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
6295             continue;
6296 
6297         /* Bypass if same domain and area of adapter. */
6298         if (((new_fcport->d_id.b24 & 0xffff00) ==
6299             (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
6300             ISP_CFG_FL)
6301                 continue;
6302 
6303         /* Bypass reserved domain fields. */
6304         if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
6305             continue;
6306 
6307         /* Bypass ports whose FCP-4 type is not FCP_SCSI */
6308         if (ql2xgffidenable &&
6309             (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) &&
6310             new_fcport->fc4_type != 0))
6311             continue;
6312 
6313         spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6314 
6315         /* Locate matching device in database. */
6316         found = 0;
6317         list_for_each_entry(fcport, &vha->vp_fcports, list) {
6318             if (memcmp(new_fcport->port_name, fcport->port_name,
6319                 WWN_SIZE))
6320                 continue;
6321 
6322             fcport->scan_state = QLA_FCPORT_FOUND;
6323 
6324             found++;
6325 
6326             /* Update port state. */
6327             memcpy(fcport->fabric_port_name,
6328                 new_fcport->fabric_port_name, WWN_SIZE);
6329             fcport->fp_speed = new_fcport->fp_speed;
6330 
6331             /*
6332              * If address the same and state FCS_ONLINE
6333              * (or in target mode), nothing changed.
6334              */
6335             if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
6336                 (atomic_read(&fcport->state) == FCS_ONLINE ||
6337                  (vha->host->active_mode == MODE_TARGET))) {
6338                 break;
6339             }
6340 
6341             if (fcport->login_retry == 0)
6342                 fcport->login_retry =
6343                     vha->hw->login_retry_count;
6344             /*
6345              * If device was not a fabric device before.
6346              */
6347             if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
6348                 fcport->d_id.b24 = new_fcport->d_id.b24;
6349                 qla2x00_clear_loop_id(fcport);
6350                 fcport->flags |= (FCF_FABRIC_DEVICE |
6351                     FCF_LOGIN_NEEDED);
6352                 break;
6353             }
6354 
6355             /*
6356              * Port ID changed or device was marked to be updated;
6357              * Log it out if still logged in and mark it for
6358              * relogin later.
6359              */
6360             if (qla_tgt_mode_enabled(base_vha)) {
6361                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
6362                      "port changed FC ID, %8phC"
6363                      " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
6364                      fcport->port_name,
6365                      fcport->d_id.b.domain,
6366                      fcport->d_id.b.area,
6367                      fcport->d_id.b.al_pa,
6368                      fcport->loop_id,
6369                      new_fcport->d_id.b.domain,
6370                      new_fcport->d_id.b.area,
6371                      new_fcport->d_id.b.al_pa);
6372                 fcport->d_id.b24 = new_fcport->d_id.b24;
6373                 break;
6374             }
6375 
6376             fcport->d_id.b24 = new_fcport->d_id.b24;
6377             fcport->flags |= FCF_LOGIN_NEEDED;
6378             break;
6379         }
6380 
6381         if (found && NVME_TARGET(vha->hw, fcport)) {
6382             if (fcport->disc_state == DSC_DELETE_PEND) {
6383                 qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
6384                 vha->fcport_count--;
6385                 fcport->login_succ = 0;
6386             }
6387         }
6388 
6389         if (found) {
6390             spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6391             continue;
6392         }
6393         /* If device was not in our fcports list, then add it. */
6394         new_fcport->scan_state = QLA_FCPORT_FOUND;
6395         list_add_tail(&new_fcport->list, &vha->vp_fcports);
6396 
6397         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6398 
6399 
6400         /* Allocate a new replacement fcport. */
6401         nxt_d_id.b24 = new_fcport->d_id.b24;
6402         new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6403         if (new_fcport == NULL) {
6404             ql_log(ql_log_warn, vha, 0xd032,
6405                 "Memory allocation failed for fcport.\n");
6406             return (QLA_MEMORY_ALLOC_FAILED);
6407         }
6408         new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
6409         new_fcport->d_id.b24 = nxt_d_id.b24;
6410     }
6411 
6412     qla2x00_free_fcport(new_fcport);
6413 
6414     /*
6415      * Logout all previous fabric dev marked lost, except FCP2 devices.
6416      */
6417     list_for_each_entry(fcport, &vha->vp_fcports, list) {
6418         if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
6419             break;
6420 
6421         if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
6422             continue;
6423 
6424         if (fcport->scan_state == QLA_FCPORT_SCAN) {
6425             if ((qla_dual_mode_enabled(vha) ||
6426                 qla_ini_mode_enabled(vha)) &&
6427                 atomic_read(&fcport->state) == FCS_ONLINE) {
6428                 qla2x00_mark_device_lost(vha, fcport,
6429                     ql2xplogiabsentdevice);
6430                 if (fcport->loop_id != FC_NO_LOOP_ID &&
6431                     (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
6432                     fcport->port_type != FCT_INITIATOR &&
6433                     fcport->port_type != FCT_BROADCAST) {
6434                     ql_dbg(ql_dbg_disc, vha, 0x20f0,
6435                         "%s %d %8phC post del sess\n",
6436                         __func__, __LINE__,
6437                         fcport->port_name);
6438                     qlt_schedule_sess_for_deletion(fcport);
6439                     continue;
6440                 }
6441             }
6442         }
6443 
6444         if (fcport->scan_state == QLA_FCPORT_FOUND &&
6445             (fcport->flags & FCF_LOGIN_NEEDED) != 0)
6446             qla24xx_fcport_handle_login(vha, fcport);
6447     }
6448     return (rval);
6449 }
6450 
6451 /* FW does not set aside Loop id for MGMT Server/FFFFFAh */
6452 int
6453 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
6454 {
6455     int loop_id = FC_NO_LOOP_ID;
6456     int lid = NPH_MGMT_SERVER - vha->vp_idx;
6457     unsigned long flags;
6458     struct qla_hw_data *ha = vha->hw;
6459 
6460     if (vha->vp_idx == 0) {
6461         set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
6462         return NPH_MGMT_SERVER;
6463     }
6464 
6465     /* pick id from high and work down to low */
6466     spin_lock_irqsave(&ha->vport_slock, flags);
6467     for (; lid > 0; lid--) {
6468         if (!test_bit(lid, vha->hw->loop_id_map)) {
6469             set_bit(lid, vha->hw->loop_id_map);
6470             loop_id = lid;
6471             break;
6472         }
6473     }
6474     spin_unlock_irqrestore(&ha->vport_slock, flags);
6475 
6476     return loop_id;
6477 }
6478 
6479 /*
6480  * qla2x00_fabric_login
6481  *  Issue fabric login command.
6482  *
6483  * Input:
6484  *  ha = adapter block pointer.
6485  *  device = pointer to FC device type structure.
6486  *
6487  * Returns:
6488  *      0 - Login successfully
6489  *      1 - Login failed
6490  *      2 - Initiator device
6491  *      3 - Fatal error
6492  */
6493 int
6494 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
6495     uint16_t *next_loopid)
6496 {
6497     int rval;
6498     int retry;
6499     uint16_t tmp_loopid;
6500     uint16_t mb[MAILBOX_REGISTER_COUNT];
6501     struct qla_hw_data *ha = vha->hw;
6502 
6503     retry = 0;
6504     tmp_loopid = 0;
6505 
6506     for (;;) {
6507         ql_dbg(ql_dbg_disc, vha, 0x2000,
6508             "Trying Fabric Login w/loop id 0x%04x for port "
6509             "%02x%02x%02x.\n",
6510             fcport->loop_id, fcport->d_id.b.domain,
6511             fcport->d_id.b.area, fcport->d_id.b.al_pa);
6512 
6513         /* Login fcport on switch. */
6514         rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
6515             fcport->d_id.b.domain, fcport->d_id.b.area,
6516             fcport->d_id.b.al_pa, mb, BIT_0);
6517         if (rval != QLA_SUCCESS) {
6518             return rval;
6519         }
6520         if (mb[0] == MBS_PORT_ID_USED) {
6521             /*
6522              * Device has another loop ID.  The firmware team
6523              * recommends the driver perform an implicit login with
6524              * the specified ID again. The ID we just used is save
6525              * here so we return with an ID that can be tried by
6526              * the next login.
6527              */
6528             retry++;
6529             tmp_loopid = fcport->loop_id;
6530             fcport->loop_id = mb[1];
6531 
6532             ql_dbg(ql_dbg_disc, vha, 0x2001,
6533                 "Fabric Login: port in use - next loop "
6534                 "id=0x%04x, port id= %02x%02x%02x.\n",
6535                 fcport->loop_id, fcport->d_id.b.domain,
6536                 fcport->d_id.b.area, fcport->d_id.b.al_pa);
6537 
6538         } else if (mb[0] == MBS_COMMAND_COMPLETE) {
6539             /*
6540              * Login succeeded.
6541              */
6542             if (retry) {
6543                 /* A retry occurred before. */
6544                 *next_loopid = tmp_loopid;
6545             } else {
6546                 /*
6547                  * No retry occurred before. Just increment the
6548                  * ID value for next login.
6549                  */
6550                 *next_loopid = (fcport->loop_id + 1);
6551             }
6552 
6553             if (mb[1] & BIT_0) {
6554                 fcport->port_type = FCT_INITIATOR;
6555             } else {
6556                 fcport->port_type = FCT_TARGET;
6557                 if (mb[1] & BIT_1) {
6558                     fcport->flags |= FCF_FCP2_DEVICE;
6559                 }
6560             }
6561 
6562             if (mb[10] & BIT_0)
6563                 fcport->supported_classes |= FC_COS_CLASS2;
6564             if (mb[10] & BIT_1)
6565                 fcport->supported_classes |= FC_COS_CLASS3;
6566 
6567             if (IS_FWI2_CAPABLE(ha)) {
6568                 if (mb[10] & BIT_7)
6569                     fcport->flags |=
6570                         FCF_CONF_COMP_SUPPORTED;
6571             }
6572 
6573             rval = QLA_SUCCESS;
6574             break;
6575         } else if (mb[0] == MBS_LOOP_ID_USED) {
6576             /*
6577              * Loop ID already used, try next loop ID.
6578              */
6579             fcport->loop_id++;
6580             rval = qla2x00_find_new_loop_id(vha, fcport);
6581             if (rval != QLA_SUCCESS) {
6582                 /* Ran out of loop IDs to use */
6583                 break;
6584             }
6585         } else if (mb[0] == MBS_COMMAND_ERROR) {
6586             /*
6587              * Firmware possibly timed out during login. If NO
6588              * retries are left to do then the device is declared
6589              * dead.
6590              */
6591             *next_loopid = fcport->loop_id;
6592             ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6593                 fcport->d_id.b.domain, fcport->d_id.b.area,
6594                 fcport->d_id.b.al_pa);
6595             qla2x00_mark_device_lost(vha, fcport, 1);
6596 
6597             rval = 1;
6598             break;
6599         } else {
6600             /*
6601              * unrecoverable / not handled error
6602              */
6603             ql_dbg(ql_dbg_disc, vha, 0x2002,
6604                 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
6605                 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
6606                 fcport->d_id.b.area, fcport->d_id.b.al_pa,
6607                 fcport->loop_id, jiffies);
6608 
6609             *next_loopid = fcport->loop_id;
6610             ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6611                 fcport->d_id.b.domain, fcport->d_id.b.area,
6612                 fcport->d_id.b.al_pa);
6613             qla2x00_clear_loop_id(fcport);
6614             fcport->login_retry = 0;
6615 
6616             rval = 3;
6617             break;
6618         }
6619     }
6620 
6621     return (rval);
6622 }
6623 
6624 /*
6625  * qla2x00_local_device_login
6626  *  Issue local device login command.
6627  *
6628  * Input:
6629  *  ha = adapter block pointer.
6630  *  loop_id = loop id of device to login to.
6631  *
6632  * Returns (Where's the #define!!!!):
6633  *      0 - Login successfully
6634  *      1 - Login failed
6635  *      3 - Fatal error
6636  */
6637 int
6638 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
6639 {
6640     int     rval;
6641     uint16_t    mb[MAILBOX_REGISTER_COUNT];
6642 
6643     memset(mb, 0, sizeof(mb));
6644     rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
6645     if (rval == QLA_SUCCESS) {
6646         /* Interrogate mailbox registers for any errors */
6647         if (mb[0] == MBS_COMMAND_ERROR)
6648             rval = 1;
6649         else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
6650             /* device not in PCB table */
6651             rval = 3;
6652     }
6653 
6654     return (rval);
6655 }
6656 
6657 /*
6658  *  qla2x00_loop_resync
6659  *      Resync with fibre channel devices.
6660  *
6661  * Input:
6662  *      ha = adapter block pointer.
6663  *
6664  * Returns:
6665  *      0 = success
6666  */
6667 int
6668 qla2x00_loop_resync(scsi_qla_host_t *vha)
6669 {
6670     int rval = QLA_SUCCESS;
6671     uint32_t wait_time;
6672 
6673     clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6674     if (vha->flags.online) {
6675         if (!(rval = qla2x00_fw_ready(vha))) {
6676             /* Wait at most MAX_TARGET RSCNs for a stable link. */
6677             wait_time = 256;
6678             do {
6679                 if (!IS_QLAFX00(vha->hw)) {
6680                     /*
6681                      * Issue a marker after FW becomes
6682                      * ready.
6683                      */
6684                     qla2x00_marker(vha, vha->hw->base_qpair,
6685                         0, 0, MK_SYNC_ALL);
6686                     vha->marker_needed = 0;
6687                 }
6688 
6689                 /* Remap devices on Loop. */
6690                 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6691 
6692                 if (IS_QLAFX00(vha->hw))
6693                     qlafx00_configure_devices(vha);
6694                 else
6695                     qla2x00_configure_loop(vha);
6696 
6697                 wait_time--;
6698             } while (!atomic_read(&vha->loop_down_timer) &&
6699                 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6700                 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
6701                 &vha->dpc_flags)));
6702         }
6703     }
6704 
6705     if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6706         return (QLA_FUNCTION_FAILED);
6707 
6708     if (rval)
6709         ql_dbg(ql_dbg_disc, vha, 0x206c,
6710             "%s *** FAILED ***.\n", __func__);
6711 
6712     return (rval);
6713 }
6714 
6715 /*
6716 * qla2x00_perform_loop_resync
6717 * Description: This function will set the appropriate flags and call
6718 *              qla2x00_loop_resync. If successful loop will be resynced
6719 * Arguments : scsi_qla_host_t pointer
6720 * returm    : Success or Failure
6721 */
6722 
6723 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
6724 {
6725     int32_t rval = 0;
6726 
6727     if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
6728         /*Configure the flags so that resync happens properly*/
6729         atomic_set(&ha->loop_down_timer, 0);
6730         if (!(ha->device_flags & DFLG_NO_CABLE)) {
6731             atomic_set(&ha->loop_state, LOOP_UP);
6732             set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
6733             set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
6734             set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
6735 
6736             rval = qla2x00_loop_resync(ha);
6737         } else
6738             atomic_set(&ha->loop_state, LOOP_DEAD);
6739 
6740         clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
6741     }
6742 
6743     return rval;
6744 }
6745 
6746 void
6747 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
6748 {
6749     fc_port_t *fcport;
6750     struct scsi_qla_host *vha, *tvp;
6751     struct qla_hw_data *ha = base_vha->hw;
6752     unsigned long flags;
6753 
6754     spin_lock_irqsave(&ha->vport_slock, flags);
6755     /* Go with deferred removal of rport references. */
6756     list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) {
6757         atomic_inc(&vha->vref_count);
6758         list_for_each_entry(fcport, &vha->vp_fcports, list) {
6759             if (fcport->drport &&
6760                 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
6761                 spin_unlock_irqrestore(&ha->vport_slock, flags);
6762                 qla2x00_rport_del(fcport);
6763 
6764                 spin_lock_irqsave(&ha->vport_slock, flags);
6765             }
6766         }
6767         atomic_dec(&vha->vref_count);
6768         wake_up(&vha->vref_waitq);
6769     }
6770     spin_unlock_irqrestore(&ha->vport_slock, flags);
6771 }
6772 
6773 /* Assumes idc_lock always held on entry */
6774 void
6775 qla83xx_reset_ownership(scsi_qla_host_t *vha)
6776 {
6777     struct qla_hw_data *ha = vha->hw;
6778     uint32_t drv_presence, drv_presence_mask;
6779     uint32_t dev_part_info1, dev_part_info2, class_type;
6780     uint32_t class_type_mask = 0x3;
6781     uint16_t fcoe_other_function = 0xffff, i;
6782 
6783     if (IS_QLA8044(ha)) {
6784         drv_presence = qla8044_rd_direct(vha,
6785             QLA8044_CRB_DRV_ACTIVE_INDEX);
6786         dev_part_info1 = qla8044_rd_direct(vha,
6787             QLA8044_CRB_DEV_PART_INFO_INDEX);
6788         dev_part_info2 = qla8044_rd_direct(vha,
6789             QLA8044_CRB_DEV_PART_INFO2);
6790     } else {
6791         qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6792         qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
6793         qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
6794     }
6795     for (i = 0; i < 8; i++) {
6796         class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
6797         if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6798             (i != ha->portnum)) {
6799             fcoe_other_function = i;
6800             break;
6801         }
6802     }
6803     if (fcoe_other_function == 0xffff) {
6804         for (i = 0; i < 8; i++) {
6805             class_type = ((dev_part_info2 >> (i * 4)) &
6806                 class_type_mask);
6807             if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6808                 ((i + 8) != ha->portnum)) {
6809                 fcoe_other_function = i + 8;
6810                 break;
6811             }
6812         }
6813     }
6814     /*
6815      * Prepare drv-presence mask based on fcoe functions present.
6816      * However consider only valid physical fcoe function numbers (0-15).
6817      */
6818     drv_presence_mask = ~((1 << (ha->portnum)) |
6819             ((fcoe_other_function == 0xffff) ?
6820              0 : (1 << (fcoe_other_function))));
6821 
6822     /* We are the reset owner iff:
6823      *    - No other protocol drivers present.
6824      *    - This is the lowest among fcoe functions. */
6825     if (!(drv_presence & drv_presence_mask) &&
6826             (ha->portnum < fcoe_other_function)) {
6827         ql_dbg(ql_dbg_p3p, vha, 0xb07f,
6828             "This host is Reset owner.\n");
6829         ha->flags.nic_core_reset_owner = 1;
6830     }
6831 }
6832 
6833 static int
6834 __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
6835 {
6836     int rval = QLA_SUCCESS;
6837     struct qla_hw_data *ha = vha->hw;
6838     uint32_t drv_ack;
6839 
6840     rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6841     if (rval == QLA_SUCCESS) {
6842         drv_ack |= (1 << ha->portnum);
6843         rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6844     }
6845 
6846     return rval;
6847 }
6848 
6849 static int
6850 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
6851 {
6852     int rval = QLA_SUCCESS;
6853     struct qla_hw_data *ha = vha->hw;
6854     uint32_t drv_ack;
6855 
6856     rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6857     if (rval == QLA_SUCCESS) {
6858         drv_ack &= ~(1 << ha->portnum);
6859         rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6860     }
6861 
6862     return rval;
6863 }
6864 
6865 /* Assumes idc-lock always held on entry */
6866 void
6867 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
6868 {
6869     struct qla_hw_data *ha = vha->hw;
6870     uint32_t idc_audit_reg = 0, duration_secs = 0;
6871 
6872     switch (audit_type) {
6873     case IDC_AUDIT_TIMESTAMP:
6874         ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
6875         idc_audit_reg = (ha->portnum) |
6876             (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
6877         qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6878         break;
6879 
6880     case IDC_AUDIT_COMPLETION:
6881         duration_secs = ((jiffies_to_msecs(jiffies) -
6882             jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
6883         idc_audit_reg = (ha->portnum) |
6884             (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
6885         qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6886         break;
6887 
6888     default:
6889         ql_log(ql_log_warn, vha, 0xb078,
6890             "Invalid audit type specified.\n");
6891         break;
6892     }
6893 }
6894 
6895 /* Assumes idc_lock always held on entry */
6896 static int
6897 qla83xx_initiating_reset(scsi_qla_host_t *vha)
6898 {
6899     struct qla_hw_data *ha = vha->hw;
6900     uint32_t  idc_control, dev_state;
6901 
6902     __qla83xx_get_idc_control(vha, &idc_control);
6903     if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
6904         ql_log(ql_log_info, vha, 0xb080,
6905             "NIC Core reset has been disabled. idc-control=0x%x\n",
6906             idc_control);
6907         return QLA_FUNCTION_FAILED;
6908     }
6909 
6910     /* Set NEED-RESET iff in READY state and we are the reset-owner */
6911     qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6912     if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
6913         qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
6914             QLA8XXX_DEV_NEED_RESET);
6915         ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
6916         qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
6917     } else {
6918         ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n",
6919                 qdev_state(dev_state));
6920 
6921         /* SV: XXX: Is timeout required here? */
6922         /* Wait for IDC state change READY -> NEED_RESET */
6923         while (dev_state == QLA8XXX_DEV_READY) {
6924             qla83xx_idc_unlock(vha, 0);
6925             msleep(200);
6926             qla83xx_idc_lock(vha, 0);
6927             qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6928         }
6929     }
6930 
6931     /* Send IDC ack by writing to drv-ack register */
6932     __qla83xx_set_drv_ack(vha);
6933 
6934     return QLA_SUCCESS;
6935 }
6936 
6937 int
6938 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
6939 {
6940     return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6941 }
6942 
6943 int
6944 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
6945 {
6946     return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6947 }
6948 
6949 static int
6950 qla83xx_check_driver_presence(scsi_qla_host_t *vha)
6951 {
6952     uint32_t drv_presence = 0;
6953     struct qla_hw_data *ha = vha->hw;
6954 
6955     qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6956     if (drv_presence & (1 << ha->portnum))
6957         return QLA_SUCCESS;
6958     else
6959         return QLA_TEST_FAILED;
6960 }
6961 
6962 int
6963 qla83xx_nic_core_reset(scsi_qla_host_t *vha)
6964 {
6965     int rval = QLA_SUCCESS;
6966     struct qla_hw_data *ha = vha->hw;
6967 
6968     ql_dbg(ql_dbg_p3p, vha, 0xb058,
6969         "Entered  %s().\n", __func__);
6970 
6971     if (vha->device_flags & DFLG_DEV_FAILED) {
6972         ql_log(ql_log_warn, vha, 0xb059,
6973             "Device in unrecoverable FAILED state.\n");
6974         return QLA_FUNCTION_FAILED;
6975     }
6976 
6977     qla83xx_idc_lock(vha, 0);
6978 
6979     if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
6980         ql_log(ql_log_warn, vha, 0xb05a,
6981             "Function=0x%x has been removed from IDC participation.\n",
6982             ha->portnum);
6983         rval = QLA_FUNCTION_FAILED;
6984         goto exit;
6985     }
6986 
6987     qla83xx_reset_ownership(vha);
6988 
6989     rval = qla83xx_initiating_reset(vha);
6990 
6991     /*
6992      * Perform reset if we are the reset-owner,
6993      * else wait till IDC state changes to READY/FAILED.
6994      */
6995     if (rval == QLA_SUCCESS) {
6996         rval = qla83xx_idc_state_handler(vha);
6997 
6998         if (rval == QLA_SUCCESS)
6999             ha->flags.nic_core_hung = 0;
7000         __qla83xx_clear_drv_ack(vha);
7001     }
7002 
7003 exit:
7004     qla83xx_idc_unlock(vha, 0);
7005 
7006     ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
7007 
7008     return rval;
7009 }
7010 
7011 int
7012 qla2xxx_mctp_dump(scsi_qla_host_t *vha)
7013 {
7014     struct qla_hw_data *ha = vha->hw;
7015     int rval = QLA_FUNCTION_FAILED;
7016 
7017     if (!IS_MCTP_CAPABLE(ha)) {
7018         /* This message can be removed from the final version */
7019         ql_log(ql_log_info, vha, 0x506d,
7020             "This board is not MCTP capable\n");
7021         return rval;
7022     }
7023 
7024     if (!ha->mctp_dump) {
7025         ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
7026             MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
7027 
7028         if (!ha->mctp_dump) {
7029             ql_log(ql_log_warn, vha, 0x506e,
7030                 "Failed to allocate memory for mctp dump\n");
7031             return rval;
7032         }
7033     }
7034 
7035 #define MCTP_DUMP_STR_ADDR  0x00000000
7036     rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
7037         MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
7038     if (rval != QLA_SUCCESS) {
7039         ql_log(ql_log_warn, vha, 0x506f,
7040             "Failed to capture mctp dump\n");
7041     } else {
7042         ql_log(ql_log_info, vha, 0x5070,
7043             "Mctp dump capture for host (%ld/%p).\n",
7044             vha->host_no, ha->mctp_dump);
7045         ha->mctp_dumped = 1;
7046     }
7047 
7048     if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
7049         ha->flags.nic_core_reset_hdlr_active = 1;
7050         rval = qla83xx_restart_nic_firmware(vha);
7051         if (rval)
7052             /* NIC Core reset failed. */
7053             ql_log(ql_log_warn, vha, 0x5071,
7054                 "Failed to restart nic firmware\n");
7055         else
7056             ql_dbg(ql_dbg_p3p, vha, 0xb084,
7057                 "Restarted NIC firmware successfully.\n");
7058         ha->flags.nic_core_reset_hdlr_active = 0;
7059     }
7060 
7061     return rval;
7062 
7063 }
7064 
7065 /*
7066 * qla2x00_quiesce_io
7067 * Description: This function will block the new I/Os
7068 *              Its not aborting any I/Os as context
7069 *              is not destroyed during quiescence
7070 * Arguments: scsi_qla_host_t
7071 * return   : void
7072 */
7073 void
7074 qla2x00_quiesce_io(scsi_qla_host_t *vha)
7075 {
7076     struct qla_hw_data *ha = vha->hw;
7077     struct scsi_qla_host *vp, *tvp;
7078     unsigned long flags;
7079 
7080     ql_dbg(ql_dbg_dpc, vha, 0x401d,
7081         "Quiescing I/O - ha=%p.\n", ha);
7082 
7083     atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
7084     if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
7085         atomic_set(&vha->loop_state, LOOP_DOWN);
7086         qla2x00_mark_all_devices_lost(vha);
7087 
7088         spin_lock_irqsave(&ha->vport_slock, flags);
7089         list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7090             atomic_inc(&vp->vref_count);
7091             spin_unlock_irqrestore(&ha->vport_slock, flags);
7092 
7093             qla2x00_mark_all_devices_lost(vp);
7094 
7095             spin_lock_irqsave(&ha->vport_slock, flags);
7096             atomic_dec(&vp->vref_count);
7097         }
7098         spin_unlock_irqrestore(&ha->vport_slock, flags);
7099     } else {
7100         if (!atomic_read(&vha->loop_down_timer))
7101             atomic_set(&vha->loop_down_timer,
7102                     LOOP_DOWN_TIME);
7103     }
7104     /* Wait for pending cmds to complete */
7105     WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST)
7106              != QLA_SUCCESS);
7107 }
7108 
7109 void
7110 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
7111 {
7112     struct qla_hw_data *ha = vha->hw;
7113     struct scsi_qla_host *vp, *tvp;
7114     unsigned long flags;
7115     fc_port_t *fcport;
7116     u16 i;
7117 
7118     /* For ISP82XX, driver waits for completion of the commands.
7119      * online flag should be set.
7120      */
7121     if (!(IS_P3P_TYPE(ha)))
7122         vha->flags.online = 0;
7123     ha->flags.chip_reset_done = 0;
7124     clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
7125     vha->qla_stats.total_isp_aborts++;
7126 
7127     ql_log(ql_log_info, vha, 0x00af,
7128         "Performing ISP error recovery - ha=%p.\n", ha);
7129 
7130     ha->flags.purge_mbox = 1;
7131     /* For ISP82XX, reset_chip is just disabling interrupts.
7132      * Driver waits for the completion of the commands.
7133      * the interrupts need to be enabled.
7134      */
7135     if (!(IS_P3P_TYPE(ha)))
7136         ha->isp_ops->reset_chip(vha);
7137 
7138     ha->link_data_rate = PORT_SPEED_UNKNOWN;
7139     SAVE_TOPO(ha);
7140     ha->flags.rida_fmt2 = 0;
7141     ha->flags.n2n_ae = 0;
7142     ha->flags.lip_ae = 0;
7143     ha->current_topology = 0;
7144     QLA_FW_STOPPED(ha);
7145     ha->flags.fw_init_done = 0;
7146     ha->chip_reset++;
7147     ha->base_qpair->chip_reset = ha->chip_reset;
7148     ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0;
7149     ha->base_qpair->prev_completion_cnt = 0;
7150     for (i = 0; i < ha->max_qpairs; i++) {
7151         if (ha->queue_pair_map[i]) {
7152             ha->queue_pair_map[i]->chip_reset =
7153                 ha->base_qpair->chip_reset;
7154             ha->queue_pair_map[i]->cmd_cnt =
7155                 ha->queue_pair_map[i]->cmd_completion_cnt = 0;
7156             ha->base_qpair->prev_completion_cnt = 0;
7157         }
7158     }
7159 
7160     /* purge MBox commands */
7161     if (atomic_read(&ha->num_pend_mbx_stage3)) {
7162         clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
7163         complete(&ha->mbx_intr_comp);
7164     }
7165 
7166     i = 0;
7167     while (atomic_read(&ha->num_pend_mbx_stage3) ||
7168         atomic_read(&ha->num_pend_mbx_stage2) ||
7169         atomic_read(&ha->num_pend_mbx_stage1)) {
7170         msleep(20);
7171         i++;
7172         if (i > 50)
7173             break;
7174     }
7175     ha->flags.purge_mbox = 0;
7176 
7177     atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
7178     if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
7179         atomic_set(&vha->loop_state, LOOP_DOWN);
7180         qla2x00_mark_all_devices_lost(vha);
7181 
7182         spin_lock_irqsave(&ha->vport_slock, flags);
7183         list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7184             atomic_inc(&vp->vref_count);
7185             spin_unlock_irqrestore(&ha->vport_slock, flags);
7186 
7187             qla2x00_mark_all_devices_lost(vp);
7188 
7189             spin_lock_irqsave(&ha->vport_slock, flags);
7190             atomic_dec(&vp->vref_count);
7191         }
7192         spin_unlock_irqrestore(&ha->vport_slock, flags);
7193     } else {
7194         if (!atomic_read(&vha->loop_down_timer))
7195             atomic_set(&vha->loop_down_timer,
7196                 LOOP_DOWN_TIME);
7197     }
7198 
7199     /* Clear all async request states across all VPs. */
7200     list_for_each_entry(fcport, &vha->vp_fcports, list) {
7201         fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7202         fcport->scan_state = 0;
7203     }
7204     spin_lock_irqsave(&ha->vport_slock, flags);
7205     list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7206         atomic_inc(&vp->vref_count);
7207         spin_unlock_irqrestore(&ha->vport_slock, flags);
7208 
7209         list_for_each_entry(fcport, &vp->vp_fcports, list)
7210             fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7211 
7212         spin_lock_irqsave(&ha->vport_slock, flags);
7213         atomic_dec(&vp->vref_count);
7214     }
7215     spin_unlock_irqrestore(&ha->vport_slock, flags);
7216 
7217     /* Make sure for ISP 82XX IO DMA is complete */
7218     if (IS_P3P_TYPE(ha)) {
7219         qla82xx_chip_reset_cleanup(vha);
7220         ql_log(ql_log_info, vha, 0x00b4,
7221                "Done chip reset cleanup.\n");
7222 
7223         /* Done waiting for pending commands. Reset online flag */
7224         vha->flags.online = 0;
7225     }
7226 
7227     /* Requeue all commands in outstanding command list. */
7228     qla2x00_abort_all_cmds(vha, DID_RESET << 16);
7229     /* memory barrier */
7230     wmb();
7231 }
7232 
7233 /*
7234 *  qla2x00_abort_isp
7235 *      Resets ISP and aborts all outstanding commands.
7236 *
7237 * Input:
7238 *      ha           = adapter block pointer.
7239 *
7240 * Returns:
7241 *      0 = success
7242 */
7243 int
7244 qla2x00_abort_isp(scsi_qla_host_t *vha)
7245 {
7246     int rval;
7247     uint8_t        status = 0;
7248     struct qla_hw_data *ha = vha->hw;
7249     struct scsi_qla_host *vp, *tvp;
7250     struct req_que *req = ha->req_q_map[0];
7251     unsigned long flags;
7252 
7253     if (vha->flags.online) {
7254         qla2x00_abort_isp_cleanup(vha);
7255 
7256         vha->dport_status |= DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
7257         vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
7258 
7259         if (vha->hw->flags.port_isolated)
7260             return status;
7261 
7262         if (qla2x00_isp_reg_stat(ha)) {
7263             ql_log(ql_log_info, vha, 0x803f,
7264                    "ISP Abort - ISP reg disconnect, exiting.\n");
7265             return status;
7266         }
7267 
7268         if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
7269             ha->flags.chip_reset_done = 1;
7270             vha->flags.online = 1;
7271             status = 0;
7272             clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7273             return status;
7274         }
7275 
7276         if (IS_QLA8031(ha)) {
7277             ql_dbg(ql_dbg_p3p, vha, 0xb05c,
7278                 "Clearing fcoe driver presence.\n");
7279             if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
7280                 ql_dbg(ql_dbg_p3p, vha, 0xb073,
7281                     "Error while clearing DRV-Presence.\n");
7282         }
7283 
7284         if (unlikely(pci_channel_offline(ha->pdev) &&
7285             ha->flags.pci_channel_io_perm_failure)) {
7286             clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7287             status = 0;
7288             return status;
7289         }
7290 
7291         switch (vha->qlini_mode) {
7292         case QLA2XXX_INI_MODE_DISABLED:
7293             if (!qla_tgt_mode_enabled(vha))
7294                 return 0;
7295             break;
7296         case QLA2XXX_INI_MODE_DUAL:
7297             if (!qla_dual_mode_enabled(vha) &&
7298                 !qla_ini_mode_enabled(vha))
7299                 return 0;
7300             break;
7301         case QLA2XXX_INI_MODE_ENABLED:
7302         default:
7303             break;
7304         }
7305 
7306         ha->isp_ops->get_flash_version(vha, req->ring);
7307 
7308         if (qla2x00_isp_reg_stat(ha)) {
7309             ql_log(ql_log_info, vha, 0x803f,
7310                    "ISP Abort - ISP reg disconnect pre nvram config, exiting.\n");
7311             return status;
7312         }
7313         ha->isp_ops->nvram_config(vha);
7314 
7315         if (qla2x00_isp_reg_stat(ha)) {
7316             ql_log(ql_log_info, vha, 0x803f,
7317                    "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");
7318             return status;
7319         }
7320         if (!qla2x00_restart_isp(vha)) {
7321             clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7322 
7323             if (!atomic_read(&vha->loop_down_timer)) {
7324                 /*
7325                  * Issue marker command only when we are going
7326                  * to start the I/O .
7327                  */
7328                 vha->marker_needed = 1;
7329             }
7330 
7331             vha->flags.online = 1;
7332 
7333             ha->isp_ops->enable_intrs(ha);
7334 
7335             ha->isp_abort_cnt = 0;
7336             clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7337 
7338             if (IS_QLA81XX(ha) || IS_QLA8031(ha))
7339                 qla2x00_get_fw_version(vha);
7340             if (ha->fce) {
7341                 ha->flags.fce_enabled = 1;
7342                 memset(ha->fce, 0,
7343                     fce_calc_size(ha->fce_bufs));
7344                 rval = qla2x00_enable_fce_trace(vha,
7345                     ha->fce_dma, ha->fce_bufs, ha->fce_mb,
7346                     &ha->fce_bufs);
7347                 if (rval) {
7348                     ql_log(ql_log_warn, vha, 0x8033,
7349                         "Unable to reinitialize FCE "
7350                         "(%d).\n", rval);
7351                     ha->flags.fce_enabled = 0;
7352                 }
7353             }
7354 
7355             if (ha->eft) {
7356                 memset(ha->eft, 0, EFT_SIZE);
7357                 rval = qla2x00_enable_eft_trace(vha,
7358                     ha->eft_dma, EFT_NUM_BUFFERS);
7359                 if (rval) {
7360                     ql_log(ql_log_warn, vha, 0x8034,
7361                         "Unable to reinitialize EFT "
7362                         "(%d).\n", rval);
7363                 }
7364             }
7365         } else {    /* failed the ISP abort */
7366             vha->flags.online = 1;
7367             if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
7368                 if (ha->isp_abort_cnt == 0) {
7369                     ql_log(ql_log_fatal, vha, 0x8035,
7370                         "ISP error recover failed - "
7371                         "board disabled.\n");
7372                     /*
7373                      * The next call disables the board
7374                      * completely.
7375                      */
7376                     qla2x00_abort_isp_cleanup(vha);
7377                     vha->flags.online = 0;
7378                     clear_bit(ISP_ABORT_RETRY,
7379                         &vha->dpc_flags);
7380                     status = 0;
7381                 } else { /* schedule another ISP abort */
7382                     ha->isp_abort_cnt--;
7383                     ql_dbg(ql_dbg_taskm, vha, 0x8020,
7384                         "ISP abort - retry remaining %d.\n",
7385                         ha->isp_abort_cnt);
7386                     status = 1;
7387                 }
7388             } else {
7389                 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
7390                 ql_dbg(ql_dbg_taskm, vha, 0x8021,
7391                     "ISP error recovery - retrying (%d) "
7392                     "more times.\n", ha->isp_abort_cnt);
7393                 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
7394                 status = 1;
7395             }
7396         }
7397 
7398     }
7399 
7400     if (vha->hw->flags.port_isolated) {
7401         qla2x00_abort_isp_cleanup(vha);
7402         return status;
7403     }
7404 
7405     if (!status) {
7406         ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
7407         qla2x00_configure_hba(vha);
7408         spin_lock_irqsave(&ha->vport_slock, flags);
7409         list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7410             if (vp->vp_idx) {
7411                 atomic_inc(&vp->vref_count);
7412                 spin_unlock_irqrestore(&ha->vport_slock, flags);
7413 
7414                 qla2x00_vp_abort_isp(vp);
7415 
7416                 spin_lock_irqsave(&ha->vport_slock, flags);
7417                 atomic_dec(&vp->vref_count);
7418             }
7419         }
7420         spin_unlock_irqrestore(&ha->vport_slock, flags);
7421 
7422         if (IS_QLA8031(ha)) {
7423             ql_dbg(ql_dbg_p3p, vha, 0xb05d,
7424                 "Setting back fcoe driver presence.\n");
7425             if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
7426                 ql_dbg(ql_dbg_p3p, vha, 0xb074,
7427                     "Error while setting DRV-Presence.\n");
7428         }
7429     } else {
7430         ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
7431                __func__);
7432     }
7433 
7434     return(status);
7435 }
7436 
7437 /*
7438 *  qla2x00_restart_isp
7439 *      restarts the ISP after a reset
7440 *
7441 * Input:
7442 *      ha = adapter block pointer.
7443 *
7444 * Returns:
7445 *      0 = success
7446 */
7447 static int
7448 qla2x00_restart_isp(scsi_qla_host_t *vha)
7449 {
7450     int status;
7451     struct qla_hw_data *ha = vha->hw;
7452 
7453     /* If firmware needs to be loaded */
7454     if (qla2x00_isp_firmware(vha)) {
7455         vha->flags.online = 0;
7456         status = ha->isp_ops->chip_diag(vha);
7457         if (status)
7458             return status;
7459         status = qla2x00_setup_chip(vha);
7460         if (status)
7461             return status;
7462     }
7463 
7464     status = qla2x00_init_rings(vha);
7465     if (status)
7466         return status;
7467 
7468     clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7469     ha->flags.chip_reset_done = 1;
7470 
7471     /* Initialize the queues in use */
7472     qla25xx_init_queues(ha);
7473 
7474     status = qla2x00_fw_ready(vha);
7475     if (status) {
7476         /* if no cable then assume it's good */
7477         return vha->device_flags & DFLG_NO_CABLE ? 0 : status;
7478     }
7479 
7480     /* Issue a marker after FW becomes ready. */
7481     qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
7482     set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
7483 
7484     return 0;
7485 }
7486 
7487 static int
7488 qla25xx_init_queues(struct qla_hw_data *ha)
7489 {
7490     struct rsp_que *rsp = NULL;
7491     struct req_que *req = NULL;
7492     struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7493     int ret = -1;
7494     int i;
7495 
7496     for (i = 1; i < ha->max_rsp_queues; i++) {
7497         rsp = ha->rsp_q_map[i];
7498         if (rsp && test_bit(i, ha->rsp_qid_map)) {
7499             rsp->options &= ~BIT_0;
7500             ret = qla25xx_init_rsp_que(base_vha, rsp);
7501             if (ret != QLA_SUCCESS)
7502                 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
7503                     "%s Rsp que: %d init failed.\n",
7504                     __func__, rsp->id);
7505             else
7506                 ql_dbg(ql_dbg_init, base_vha, 0x0100,
7507                     "%s Rsp que: %d inited.\n",
7508                     __func__, rsp->id);
7509         }
7510     }
7511     for (i = 1; i < ha->max_req_queues; i++) {
7512         req = ha->req_q_map[i];
7513         if (req && test_bit(i, ha->req_qid_map)) {
7514             /* Clear outstanding commands array. */
7515             req->options &= ~BIT_0;
7516             ret = qla25xx_init_req_que(base_vha, req);
7517             if (ret != QLA_SUCCESS)
7518                 ql_dbg(ql_dbg_init, base_vha, 0x0101,
7519                     "%s Req que: %d init failed.\n",
7520                     __func__, req->id);
7521             else
7522                 ql_dbg(ql_dbg_init, base_vha, 0x0102,
7523                     "%s Req que: %d inited.\n",
7524                     __func__, req->id);
7525         }
7526     }
7527     return ret;
7528 }
7529 
7530 /*
7531 * qla2x00_reset_adapter
7532 *      Reset adapter.
7533 *
7534 * Input:
7535 *      ha = adapter block pointer.
7536 */
7537 int
7538 qla2x00_reset_adapter(scsi_qla_host_t *vha)
7539 {
7540     unsigned long flags = 0;
7541     struct qla_hw_data *ha = vha->hw;
7542     struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
7543 
7544     vha->flags.online = 0;
7545     ha->isp_ops->disable_intrs(ha);
7546 
7547     spin_lock_irqsave(&ha->hardware_lock, flags);
7548     wrt_reg_word(&reg->hccr, HCCR_RESET_RISC);
7549     rd_reg_word(&reg->hccr);            /* PCI Posting. */
7550     wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
7551     rd_reg_word(&reg->hccr);            /* PCI Posting. */
7552     spin_unlock_irqrestore(&ha->hardware_lock, flags);
7553 
7554     return QLA_SUCCESS;
7555 }
7556 
7557 int
7558 qla24xx_reset_adapter(scsi_qla_host_t *vha)
7559 {
7560     unsigned long flags = 0;
7561     struct qla_hw_data *ha = vha->hw;
7562     struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
7563 
7564     if (IS_P3P_TYPE(ha))
7565         return QLA_SUCCESS;
7566 
7567     vha->flags.online = 0;
7568     ha->isp_ops->disable_intrs(ha);
7569 
7570     spin_lock_irqsave(&ha->hardware_lock, flags);
7571     wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
7572     rd_reg_dword(&reg->hccr);
7573     wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
7574     rd_reg_dword(&reg->hccr);
7575     spin_unlock_irqrestore(&ha->hardware_lock, flags);
7576 
7577     if (IS_NOPOLLING_TYPE(ha))
7578         ha->isp_ops->enable_intrs(ha);
7579 
7580     return QLA_SUCCESS;
7581 }
7582 
7583 /* On sparc systems, obtain port and node WWN from firmware
7584  * properties.
7585  */
7586 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
7587     struct nvram_24xx *nv)
7588 {
7589 #ifdef CONFIG_SPARC
7590     struct qla_hw_data *ha = vha->hw;
7591     struct pci_dev *pdev = ha->pdev;
7592     struct device_node *dp = pci_device_to_OF_node(pdev);
7593     const u8 *val;
7594     int len;
7595 
7596     val = of_get_property(dp, "port-wwn", &len);
7597     if (val && len >= WWN_SIZE)
7598         memcpy(nv->port_name, val, WWN_SIZE);
7599 
7600     val = of_get_property(dp, "node-wwn", &len);
7601     if (val && len >= WWN_SIZE)
7602         memcpy(nv->node_name, val, WWN_SIZE);
7603 #endif
7604 }
7605 
7606 int
7607 qla24xx_nvram_config(scsi_qla_host_t *vha)
7608 {
7609     int   rval;
7610     struct init_cb_24xx *icb;
7611     struct nvram_24xx *nv;
7612     __le32 *dptr;
7613     uint8_t  *dptr1, *dptr2;
7614     uint32_t chksum;
7615     uint16_t cnt;
7616     struct qla_hw_data *ha = vha->hw;
7617 
7618     rval = QLA_SUCCESS;
7619     icb = (struct init_cb_24xx *)ha->init_cb;
7620     nv = ha->nvram;
7621 
7622     /* Determine NVRAM starting address. */
7623     if (ha->port_no == 0) {
7624         ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
7625         ha->vpd_base = FA_NVRAM_VPD0_ADDR;
7626     } else {
7627         ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
7628         ha->vpd_base = FA_NVRAM_VPD1_ADDR;
7629     }
7630 
7631     ha->nvram_size = sizeof(*nv);
7632     ha->vpd_size = FA_NVRAM_VPD_SIZE;
7633 
7634     /* Get VPD data into cache */
7635     ha->vpd = ha->nvram + VPD_OFFSET;
7636     ha->isp_ops->read_nvram(vha, ha->vpd,
7637         ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
7638 
7639     /* Get NVRAM data into cache and calculate checksum. */
7640     dptr = (__force __le32 *)nv;
7641     ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
7642     for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7643         chksum += le32_to_cpu(*dptr);
7644 
7645     ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
7646         "Contents of NVRAM\n");
7647     ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
7648         nv, ha->nvram_size);
7649 
7650     /* Bad NVRAM data, set defaults parameters. */
7651     if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
7652         le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
7653         /* Reset NVRAM data. */
7654         ql_log(ql_log_warn, vha, 0x006b,
7655             "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
7656             chksum, nv->id, nv->nvram_version);
7657         ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv));
7658         ql_log(ql_log_warn, vha, 0x006c,
7659             "Falling back to functioning (yet invalid -- WWPN) "
7660             "defaults.\n");
7661 
7662         /*
7663          * Set default initialization control block.
7664          */
7665         memset(nv, 0, ha->nvram_size);
7666         nv->nvram_version = cpu_to_le16(ICB_VERSION);
7667         nv->version = cpu_to_le16(ICB_VERSION);
7668         nv->frame_payload_size = cpu_to_le16(2048);
7669         nv->execution_throttle = cpu_to_le16(0xFFFF);
7670         nv->exchange_count = cpu_to_le16(0);
7671         nv->hard_address = cpu_to_le16(124);
7672         nv->port_name[0] = 0x21;
7673         nv->port_name[1] = 0x00 + ha->port_no + 1;
7674         nv->port_name[2] = 0x00;
7675         nv->port_name[3] = 0xe0;
7676         nv->port_name[4] = 0x8b;
7677         nv->port_name[5] = 0x1c;
7678         nv->port_name[6] = 0x55;
7679         nv->port_name[7] = 0x86;
7680         nv->node_name[0] = 0x20;
7681         nv->node_name[1] = 0x00;
7682         nv->node_name[2] = 0x00;
7683         nv->node_name[3] = 0xe0;
7684         nv->node_name[4] = 0x8b;
7685         nv->node_name[5] = 0x1c;
7686         nv->node_name[6] = 0x55;
7687         nv->node_name[7] = 0x86;
7688         qla24xx_nvram_wwn_from_ofw(vha, nv);
7689         nv->login_retry_count = cpu_to_le16(8);
7690         nv->interrupt_delay_timer = cpu_to_le16(0);
7691         nv->login_timeout = cpu_to_le16(0);
7692         nv->firmware_options_1 =
7693             cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7694         nv->firmware_options_2 = cpu_to_le32(2 << 4);
7695         nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7696         nv->firmware_options_3 = cpu_to_le32(2 << 13);
7697         nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7698         nv->efi_parameters = cpu_to_le32(0);
7699         nv->reset_delay = 5;
7700         nv->max_luns_per_target = cpu_to_le16(128);
7701         nv->port_down_retry_count = cpu_to_le16(30);
7702         nv->link_down_timeout = cpu_to_le16(30);
7703 
7704         rval = 1;
7705     }
7706 
7707     if (qla_tgt_mode_enabled(vha)) {
7708         /* Don't enable full login after initial LIP */
7709         nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
7710         /* Don't enable LIP full login for initiator */
7711         nv->host_p &= cpu_to_le32(~BIT_10);
7712     }
7713 
7714     qlt_24xx_config_nvram_stage1(vha, nv);
7715 
7716     /* Reset Initialization control block */
7717     memset(icb, 0, ha->init_cb_size);
7718 
7719     /* Copy 1st segment. */
7720     dptr1 = (uint8_t *)icb;
7721     dptr2 = (uint8_t *)&nv->version;
7722     cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7723     while (cnt--)
7724         *dptr1++ = *dptr2++;
7725 
7726     icb->login_retry_count = nv->login_retry_count;
7727     icb->link_down_on_nos = nv->link_down_on_nos;
7728 
7729     /* Copy 2nd segment. */
7730     dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7731     dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7732     cnt = (uint8_t *)&icb->reserved_3 -
7733         (uint8_t *)&icb->interrupt_delay_timer;
7734     while (cnt--)
7735         *dptr1++ = *dptr2++;
7736     ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
7737     /*
7738      * Setup driver NVRAM options.
7739      */
7740     qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
7741         "QLA2462");
7742 
7743     qlt_24xx_config_nvram_stage2(vha, icb);
7744 
7745     if (nv->host_p & cpu_to_le32(BIT_15)) {
7746         /* Use alternate WWN? */
7747         memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7748         memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7749     }
7750 
7751     /* Prepare nodename */
7752     if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
7753         /*
7754          * Firmware will apply the following mask if the nodename was
7755          * not provided.
7756          */
7757         memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7758         icb->node_name[0] &= 0xF0;
7759     }
7760 
7761     /* Set host adapter parameters. */
7762     ha->flags.disable_risc_code_load = 0;
7763     ha->flags.enable_lip_reset = 0;
7764     ha->flags.enable_lip_full_login =
7765         le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
7766     ha->flags.enable_target_reset =
7767         le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
7768     ha->flags.enable_led_scheme = 0;
7769     ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
7770 
7771     ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7772         (BIT_6 | BIT_5 | BIT_4)) >> 4;
7773 
7774     memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
7775         sizeof(ha->fw_seriallink_options24));
7776 
7777     /* save HBA serial number */
7778     ha->serial0 = icb->port_name[5];
7779     ha->serial1 = icb->port_name[6];
7780     ha->serial2 = icb->port_name[7];
7781     memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7782     memcpy(vha->port_name, icb->port_name, WWN_SIZE);
7783 
7784     icb->execution_throttle = cpu_to_le16(0xFFFF);
7785 
7786     ha->retry_count = le16_to_cpu(nv->login_retry_count);
7787 
7788     /* Set minimum login_timeout to 4 seconds. */
7789     if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7790         nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7791     if (le16_to_cpu(nv->login_timeout) < 4)
7792         nv->login_timeout = cpu_to_le16(4);
7793     ha->login_timeout = le16_to_cpu(nv->login_timeout);
7794 
7795     /* Set minimum RATOV to 100 tenths of a second. */
7796     ha->r_a_tov = 100;
7797 
7798     ha->loop_reset_delay = nv->reset_delay;
7799 
7800     /* Link Down Timeout = 0:
7801      *
7802      *  When Port Down timer expires we will start returning
7803      *  I/O's to OS with "DID_NO_CONNECT".
7804      *
7805      * Link Down Timeout != 0:
7806      *
7807      *   The driver waits for the link to come up after link down
7808      *   before returning I/Os to OS with "DID_NO_CONNECT".
7809      */
7810     if (le16_to_cpu(nv->link_down_timeout) == 0) {
7811         ha->loop_down_abort_time =
7812             (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7813     } else {
7814         ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7815         ha->loop_down_abort_time =
7816             (LOOP_DOWN_TIME - ha->link_down_timeout);
7817     }
7818 
7819     /* Need enough time to try and get the port back. */
7820     ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7821     if (qlport_down_retry)
7822         ha->port_down_retry_count = qlport_down_retry;
7823 
7824     /* Set login_retry_count */
7825     ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
7826     if (ha->port_down_retry_count ==
7827         le16_to_cpu(nv->port_down_retry_count) &&
7828         ha->port_down_retry_count > 3)
7829         ha->login_retry_count = ha->port_down_retry_count;
7830     else if (ha->port_down_retry_count > (int)ha->login_retry_count)
7831         ha->login_retry_count = ha->port_down_retry_count;
7832     if (ql2xloginretrycount)
7833         ha->login_retry_count = ql2xloginretrycount;
7834 
7835     /* N2N: driver will initiate Login instead of FW */
7836     icb->firmware_options_3 |= cpu_to_le32(BIT_8);
7837 
7838     /* Enable ZIO. */
7839     if (!vha->flags.init_done) {
7840         ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
7841             (BIT_3 | BIT_2 | BIT_1 | BIT_0);
7842         ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
7843             le16_to_cpu(icb->interrupt_delay_timer) : 2;
7844     }
7845     icb->firmware_options_2 &= cpu_to_le32(
7846         ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
7847     if (ha->zio_mode != QLA_ZIO_DISABLED) {
7848         ha->zio_mode = QLA_ZIO_MODE_6;
7849 
7850         ql_log(ql_log_info, vha, 0x006f,
7851             "ZIO mode %d enabled; timer delay (%d us).\n",
7852             ha->zio_mode, ha->zio_timer * 100);
7853 
7854         icb->firmware_options_2 |= cpu_to_le32(
7855             (uint32_t)ha->zio_mode);
7856         icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
7857     }
7858 
7859     if (rval) {
7860         ql_log(ql_log_warn, vha, 0x0070,
7861             "NVRAM configuration failed.\n");
7862     }
7863     return (rval);
7864 }
7865 
7866 static void
7867 qla27xx_print_image(struct scsi_qla_host *vha, char *name,
7868     struct qla27xx_image_status *image_status)
7869 {
7870     ql_dbg(ql_dbg_init, vha, 0x018b,
7871         "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n",
7872         name, "status",
7873         image_status->image_status_mask,
7874         le16_to_cpu(image_status->generation),
7875         image_status->ver_major,
7876         image_status->ver_minor,
7877         image_status->bitmap,
7878         le32_to_cpu(image_status->checksum),
7879         le32_to_cpu(image_status->signature));
7880 }
7881 
7882 static bool
7883 qla28xx_check_aux_image_status_signature(
7884     struct qla27xx_image_status *image_status)
7885 {
7886     ulong signature = le32_to_cpu(image_status->signature);
7887 
7888     return signature != QLA28XX_AUX_IMG_STATUS_SIGN;
7889 }
7890 
7891 static bool
7892 qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
7893 {
7894     ulong signature = le32_to_cpu(image_status->signature);
7895 
7896     return
7897         signature != QLA27XX_IMG_STATUS_SIGN &&
7898         signature != QLA28XX_IMG_STATUS_SIGN;
7899 }
7900 
7901 static ulong
7902 qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
7903 {
7904     __le32 *p = (__force __le32 *)image_status;
7905     uint n = sizeof(*image_status) / sizeof(*p);
7906     uint32_t sum = 0;
7907 
7908     for ( ; n--; p++)
7909         sum += le32_to_cpup(p);
7910 
7911     return sum;
7912 }
7913 
7914 static inline uint
7915 qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask)
7916 {
7917     return aux->bitmap & bitmask ?
7918         QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE;
7919 }
7920 
7921 static void
7922 qla28xx_component_status(
7923     struct active_regions *active_regions, struct qla27xx_image_status *aux)
7924 {
7925     active_regions->aux.board_config =
7926         qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG);
7927 
7928     active_regions->aux.vpd_nvram =
7929         qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM);
7930 
7931     active_regions->aux.npiv_config_0_1 =
7932         qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1);
7933 
7934     active_regions->aux.npiv_config_2_3 =
7935         qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
7936 }
7937 
7938 static int
7939 qla27xx_compare_image_generation(
7940     struct qla27xx_image_status *pri_image_status,
7941     struct qla27xx_image_status *sec_image_status)
7942 {
7943     /* calculate generation delta as uint16 (this accounts for wrap) */
7944     int16_t delta =
7945         le16_to_cpu(pri_image_status->generation) -
7946         le16_to_cpu(sec_image_status->generation);
7947 
7948     ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta);
7949 
7950     return delta;
7951 }
7952 
7953 void
7954 qla28xx_get_aux_images(
7955     struct scsi_qla_host *vha, struct active_regions *active_regions)
7956 {
7957     struct qla_hw_data *ha = vha->hw;
7958     struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
7959     bool valid_pri_image = false, valid_sec_image = false;
7960     bool active_pri_image = false, active_sec_image = false;
7961 
7962     if (!ha->flt_region_aux_img_status_pri) {
7963         ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
7964         goto check_sec_image;
7965     }
7966 
7967     qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status,
7968         ha->flt_region_aux_img_status_pri,
7969         sizeof(pri_aux_image_status) >> 2);
7970     qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
7971 
7972     if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
7973         ql_dbg(ql_dbg_init, vha, 0x018b,
7974             "Primary aux image signature (%#x) not valid\n",
7975             le32_to_cpu(pri_aux_image_status.signature));
7976         goto check_sec_image;
7977     }
7978 
7979     if (qla27xx_image_status_checksum(&pri_aux_image_status)) {
7980         ql_dbg(ql_dbg_init, vha, 0x018c,
7981             "Primary aux image checksum failed\n");
7982         goto check_sec_image;
7983     }
7984 
7985     valid_pri_image = true;
7986 
7987     if (pri_aux_image_status.image_status_mask & 1) {
7988         ql_dbg(ql_dbg_init, vha, 0x018d,
7989             "Primary aux image is active\n");
7990         active_pri_image = true;
7991     }
7992 
7993 check_sec_image:
7994     if (!ha->flt_region_aux_img_status_sec) {
7995         ql_dbg(ql_dbg_init, vha, 0x018a,
7996             "Secondary aux image not addressed\n");
7997         goto check_valid_image;
7998     }
7999 
8000     qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status,
8001         ha->flt_region_aux_img_status_sec,
8002         sizeof(sec_aux_image_status) >> 2);
8003     qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
8004 
8005     if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
8006         ql_dbg(ql_dbg_init, vha, 0x018b,
8007             "Secondary aux image signature (%#x) not valid\n",
8008             le32_to_cpu(sec_aux_image_status.signature));
8009         goto check_valid_image;
8010     }
8011 
8012     if (qla27xx_image_status_checksum(&sec_aux_image_status)) {
8013         ql_dbg(ql_dbg_init, vha, 0x018c,
8014             "Secondary aux image checksum failed\n");
8015         goto check_valid_image;
8016     }
8017 
8018     valid_sec_image = true;
8019 
8020     if (sec_aux_image_status.image_status_mask & 1) {
8021         ql_dbg(ql_dbg_init, vha, 0x018d,
8022             "Secondary aux image is active\n");
8023         active_sec_image = true;
8024     }
8025 
8026 check_valid_image:
8027     if (valid_pri_image && active_pri_image &&
8028         valid_sec_image && active_sec_image) {
8029         if (qla27xx_compare_image_generation(&pri_aux_image_status,
8030             &sec_aux_image_status) >= 0) {
8031             qla28xx_component_status(active_regions,
8032                 &pri_aux_image_status);
8033         } else {
8034             qla28xx_component_status(active_regions,
8035                 &sec_aux_image_status);
8036         }
8037     } else if (valid_pri_image && active_pri_image) {
8038         qla28xx_component_status(active_regions, &pri_aux_image_status);
8039     } else if (valid_sec_image && active_sec_image) {
8040         qla28xx_component_status(active_regions, &sec_aux_image_status);
8041     }
8042 
8043     ql_dbg(ql_dbg_init, vha, 0x018f,
8044         "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n",
8045         active_regions->aux.board_config,
8046         active_regions->aux.vpd_nvram,
8047         active_regions->aux.npiv_config_0_1,
8048         active_regions->aux.npiv_config_2_3);
8049 }
8050 
8051 void
8052 qla27xx_get_active_image(struct scsi_qla_host *vha,
8053     struct active_regions *active_regions)
8054 {
8055     struct qla_hw_data *ha = vha->hw;
8056     struct qla27xx_image_status pri_image_status, sec_image_status;
8057     bool valid_pri_image = false, valid_sec_image = false;
8058     bool active_pri_image = false, active_sec_image = false;
8059 
8060     if (!ha->flt_region_img_status_pri) {
8061         ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
8062         goto check_sec_image;
8063     }
8064 
8065     if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status,
8066         ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
8067         QLA_SUCCESS) {
8068         WARN_ON_ONCE(true);
8069         goto check_sec_image;
8070     }
8071     qla27xx_print_image(vha, "Primary image", &pri_image_status);
8072 
8073     if (qla27xx_check_image_status_signature(&pri_image_status)) {
8074         ql_dbg(ql_dbg_init, vha, 0x018b,
8075             "Primary image signature (%#x) not valid\n",
8076             le32_to_cpu(pri_image_status.signature));
8077         goto check_sec_image;
8078     }
8079 
8080     if (qla27xx_image_status_checksum(&pri_image_status)) {
8081         ql_dbg(ql_dbg_init, vha, 0x018c,
8082             "Primary image checksum failed\n");
8083         goto check_sec_image;
8084     }
8085 
8086     valid_pri_image = true;
8087 
8088     if (pri_image_status.image_status_mask & 1) {
8089         ql_dbg(ql_dbg_init, vha, 0x018d,
8090             "Primary image is active\n");
8091         active_pri_image = true;
8092     }
8093 
8094 check_sec_image:
8095     if (!ha->flt_region_img_status_sec) {
8096         ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n");
8097         goto check_valid_image;
8098     }
8099 
8100     qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
8101         ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
8102     qla27xx_print_image(vha, "Secondary image", &sec_image_status);
8103 
8104     if (qla27xx_check_image_status_signature(&sec_image_status)) {
8105         ql_dbg(ql_dbg_init, vha, 0x018b,
8106             "Secondary image signature (%#x) not valid\n",
8107             le32_to_cpu(sec_image_status.signature));
8108         goto check_valid_image;
8109     }
8110 
8111     if (qla27xx_image_status_checksum(&sec_image_status)) {
8112         ql_dbg(ql_dbg_init, vha, 0x018c,
8113             "Secondary image checksum failed\n");
8114         goto check_valid_image;
8115     }
8116 
8117     valid_sec_image = true;
8118 
8119     if (sec_image_status.image_status_mask & 1) {
8120         ql_dbg(ql_dbg_init, vha, 0x018d,
8121             "Secondary image is active\n");
8122         active_sec_image = true;
8123     }
8124 
8125 check_valid_image:
8126     if (valid_pri_image && active_pri_image)
8127         active_regions->global = QLA27XX_PRIMARY_IMAGE;
8128 
8129     if (valid_sec_image && active_sec_image) {
8130         if (!active_regions->global ||
8131             qla27xx_compare_image_generation(
8132             &pri_image_status, &sec_image_status) < 0) {
8133             active_regions->global = QLA27XX_SECONDARY_IMAGE;
8134         }
8135     }
8136 
8137     ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n",
8138         active_regions->global == QLA27XX_DEFAULT_IMAGE ?
8139         "default (boot/fw)" :
8140         active_regions->global == QLA27XX_PRIMARY_IMAGE ?
8141         "primary" :
8142         active_regions->global == QLA27XX_SECONDARY_IMAGE ?
8143         "secondary" : "invalid",
8144         active_regions->global);
8145 }
8146 
8147 bool qla24xx_risc_firmware_invalid(uint32_t *dword)
8148 {
8149     return
8150         !(dword[4] | dword[5] | dword[6] | dword[7]) ||
8151         !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]);
8152 }
8153 
8154 static int
8155 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
8156     uint32_t faddr)
8157 {
8158     int rval;
8159     uint templates, segments, fragment;
8160     ulong i;
8161     uint j;
8162     ulong dlen;
8163     uint32_t *dcode;
8164     uint32_t risc_addr, risc_size, risc_attr = 0;
8165     struct qla_hw_data *ha = vha->hw;
8166     struct req_que *req = ha->req_q_map[0];
8167     struct fwdt *fwdt = ha->fwdt;
8168 
8169     ql_dbg(ql_dbg_init, vha, 0x008b,
8170         "FW: Loading firmware from flash (%x).\n", faddr);
8171 
8172     dcode = (uint32_t *)req->ring;
8173     qla24xx_read_flash_data(vha, dcode, faddr, 8);
8174     if (qla24xx_risc_firmware_invalid(dcode)) {
8175         ql_log(ql_log_fatal, vha, 0x008c,
8176             "Unable to verify the integrity of flash firmware "
8177             "image.\n");
8178         ql_log(ql_log_fatal, vha, 0x008d,
8179             "Firmware data: %08x %08x %08x %08x.\n",
8180             dcode[0], dcode[1], dcode[2], dcode[3]);
8181 
8182         return QLA_FUNCTION_FAILED;
8183     }
8184 
8185     dcode = (uint32_t *)req->ring;
8186     *srisc_addr = 0;
8187     segments = FA_RISC_CODE_SEGMENTS;
8188     for (j = 0; j < segments; j++) {
8189         ql_dbg(ql_dbg_init, vha, 0x008d,
8190             "-> Loading segment %u...\n", j);
8191         qla24xx_read_flash_data(vha, dcode, faddr, 10);
8192         risc_addr = be32_to_cpu((__force __be32)dcode[2]);
8193         risc_size = be32_to_cpu((__force __be32)dcode[3]);
8194         if (!*srisc_addr) {
8195             *srisc_addr = risc_addr;
8196             risc_attr = be32_to_cpu((__force __be32)dcode[9]);
8197         }
8198 
8199         dlen = ha->fw_transfer_size >> 2;
8200         for (fragment = 0; risc_size; fragment++) {
8201             if (dlen > risc_size)
8202                 dlen = risc_size;
8203 
8204             ql_dbg(ql_dbg_init, vha, 0x008e,
8205                 "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
8206                 fragment, risc_addr, faddr, dlen);
8207             qla24xx_read_flash_data(vha, dcode, faddr, dlen);
8208             for (i = 0; i < dlen; i++)
8209                 dcode[i] = swab32(dcode[i]);
8210 
8211             rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
8212             if (rval) {
8213                 ql_log(ql_log_fatal, vha, 0x008f,
8214                     "-> Failed load firmware fragment %u.\n",
8215                     fragment);
8216                 return QLA_FUNCTION_FAILED;
8217             }
8218 
8219             faddr += dlen;
8220             risc_addr += dlen;
8221             risc_size -= dlen;
8222         }
8223     }
8224 
8225     if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8226         return QLA_SUCCESS;
8227 
8228     templates = (risc_attr & BIT_9) ? 2 : 1;
8229     ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
8230     for (j = 0; j < templates; j++, fwdt++) {
8231         vfree(fwdt->template);
8232         fwdt->template = NULL;
8233         fwdt->length = 0;
8234 
8235         dcode = (uint32_t *)req->ring;
8236         qla24xx_read_flash_data(vha, dcode, faddr, 7);
8237         risc_size = be32_to_cpu((__force __be32)dcode[2]);
8238         ql_dbg(ql_dbg_init, vha, 0x0161,
8239             "-> fwdt%u template array at %#x (%#x dwords)\n",
8240             j, faddr, risc_size);
8241         if (!risc_size || !~risc_size) {
8242             ql_dbg(ql_dbg_init, vha, 0x0162,
8243                 "-> fwdt%u failed to read array\n", j);
8244             goto failed;
8245         }
8246 
8247         /* skip header and ignore checksum */
8248         faddr += 7;
8249         risc_size -= 8;
8250 
8251         ql_dbg(ql_dbg_init, vha, 0x0163,
8252             "-> fwdt%u template allocate template %#x words...\n",
8253             j, risc_size);
8254         fwdt->template = vmalloc(risc_size * sizeof(*dcode));
8255         if (!fwdt->template) {
8256             ql_log(ql_log_warn, vha, 0x0164,
8257                 "-> fwdt%u failed allocate template.\n", j);
8258             goto failed;
8259         }
8260 
8261         dcode = fwdt->template;
8262         qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
8263 
8264         if (!qla27xx_fwdt_template_valid(dcode)) {
8265             ql_log(ql_log_warn, vha, 0x0165,
8266                 "-> fwdt%u failed template validate\n", j);
8267             goto failed;
8268         }
8269 
8270         dlen = qla27xx_fwdt_template_size(dcode);
8271         ql_dbg(ql_dbg_init, vha, 0x0166,
8272             "-> fwdt%u template size %#lx bytes (%#lx words)\n",
8273             j, dlen, dlen / sizeof(*dcode));
8274         if (dlen > risc_size * sizeof(*dcode)) {
8275             ql_log(ql_log_warn, vha, 0x0167,
8276                 "-> fwdt%u template exceeds array (%-lu bytes)\n",
8277                 j, dlen - risc_size * sizeof(*dcode));
8278             goto failed;
8279         }
8280 
8281         fwdt->length = dlen;
8282         ql_dbg(ql_dbg_init, vha, 0x0168,
8283             "-> fwdt%u loaded template ok\n", j);
8284 
8285         faddr += risc_size + 1;
8286     }
8287 
8288     return QLA_SUCCESS;
8289 
8290 failed:
8291     vfree(fwdt->template);
8292     fwdt->template = NULL;
8293     fwdt->length = 0;
8294 
8295     return QLA_SUCCESS;
8296 }
8297 
8298 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
8299 
8300 int
8301 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8302 {
8303     int rval;
8304     int i, fragment;
8305     uint16_t *wcode;
8306     __be16   *fwcode;
8307     uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
8308     struct fw_blob *blob;
8309     struct qla_hw_data *ha = vha->hw;
8310     struct req_que *req = ha->req_q_map[0];
8311 
8312     /* Load firmware blob. */
8313     blob = qla2x00_request_firmware(vha);
8314     if (!blob) {
8315         ql_log(ql_log_info, vha, 0x0083,
8316             "Firmware image unavailable.\n");
8317         ql_log(ql_log_info, vha, 0x0084,
8318             "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
8319         return QLA_FUNCTION_FAILED;
8320     }
8321 
8322     rval = QLA_SUCCESS;
8323 
8324     wcode = (uint16_t *)req->ring;
8325     *srisc_addr = 0;
8326     fwcode = (__force __be16 *)blob->fw->data;
8327     fwclen = 0;
8328 
8329     /* Validate firmware image by checking version. */
8330     if (blob->fw->size < 8 * sizeof(uint16_t)) {
8331         ql_log(ql_log_fatal, vha, 0x0085,
8332             "Unable to verify integrity of firmware image (%zd).\n",
8333             blob->fw->size);
8334         goto fail_fw_integrity;
8335     }
8336     for (i = 0; i < 4; i++)
8337         wcode[i] = be16_to_cpu(fwcode[i + 4]);
8338     if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
8339         wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
8340         wcode[2] == 0 && wcode[3] == 0)) {
8341         ql_log(ql_log_fatal, vha, 0x0086,
8342             "Unable to verify integrity of firmware image.\n");
8343         ql_log(ql_log_fatal, vha, 0x0087,
8344             "Firmware data: %04x %04x %04x %04x.\n",
8345             wcode[0], wcode[1], wcode[2], wcode[3]);
8346         goto fail_fw_integrity;
8347     }
8348 
8349     seg = blob->segs;
8350     while (*seg && rval == QLA_SUCCESS) {
8351         risc_addr = *seg;
8352         *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
8353         risc_size = be16_to_cpu(fwcode[3]);
8354 
8355         /* Validate firmware image size. */
8356         fwclen += risc_size * sizeof(uint16_t);
8357         if (blob->fw->size < fwclen) {
8358             ql_log(ql_log_fatal, vha, 0x0088,
8359                 "Unable to verify integrity of firmware image "
8360                 "(%zd).\n", blob->fw->size);
8361             goto fail_fw_integrity;
8362         }
8363 
8364         fragment = 0;
8365         while (risc_size > 0 && rval == QLA_SUCCESS) {
8366             wlen = (uint16_t)(ha->fw_transfer_size >> 1);
8367             if (wlen > risc_size)
8368                 wlen = risc_size;
8369             ql_dbg(ql_dbg_init, vha, 0x0089,
8370                 "Loading risc segment@ risc addr %x number of "
8371                 "words 0x%x.\n", risc_addr, wlen);
8372 
8373             for (i = 0; i < wlen; i++)
8374                 wcode[i] = swab16((__force u32)fwcode[i]);
8375 
8376             rval = qla2x00_load_ram(vha, req->dma, risc_addr,
8377                 wlen);
8378             if (rval) {
8379                 ql_log(ql_log_fatal, vha, 0x008a,
8380                     "Failed to load segment %d of firmware.\n",
8381                     fragment);
8382                 break;
8383             }
8384 
8385             fwcode += wlen;
8386             risc_addr += wlen;
8387             risc_size -= wlen;
8388             fragment++;
8389         }
8390 
8391         /* Next segment. */
8392         seg++;
8393     }
8394     return rval;
8395 
8396 fail_fw_integrity:
8397     return QLA_FUNCTION_FAILED;
8398 }
8399 
8400 static int
8401 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8402 {
8403     int rval;
8404     uint templates, segments, fragment;
8405     uint32_t *dcode;
8406     ulong dlen;
8407     uint32_t risc_addr, risc_size, risc_attr = 0;
8408     ulong i;
8409     uint j;
8410     struct fw_blob *blob;
8411     __be32 *fwcode;
8412     struct qla_hw_data *ha = vha->hw;
8413     struct req_que *req = ha->req_q_map[0];
8414     struct fwdt *fwdt = ha->fwdt;
8415 
8416     ql_dbg(ql_dbg_init, vha, 0x0090,
8417         "-> FW: Loading via request-firmware.\n");
8418 
8419     blob = qla2x00_request_firmware(vha);
8420     if (!blob) {
8421         ql_log(ql_log_warn, vha, 0x0092,
8422             "-> Firmware file not found.\n");
8423 
8424         return QLA_FUNCTION_FAILED;
8425     }
8426 
8427     fwcode = (__force __be32 *)blob->fw->data;
8428     dcode = (__force uint32_t *)fwcode;
8429     if (qla24xx_risc_firmware_invalid(dcode)) {
8430         ql_log(ql_log_fatal, vha, 0x0093,
8431             "Unable to verify integrity of firmware image (%zd).\n",
8432             blob->fw->size);
8433         ql_log(ql_log_fatal, vha, 0x0095,
8434             "Firmware data: %08x %08x %08x %08x.\n",
8435             dcode[0], dcode[1], dcode[2], dcode[3]);
8436         return QLA_FUNCTION_FAILED;
8437     }
8438 
8439     dcode = (uint32_t *)req->ring;
8440     *srisc_addr = 0;
8441     segments = FA_RISC_CODE_SEGMENTS;
8442     for (j = 0; j < segments; j++) {
8443         ql_dbg(ql_dbg_init, vha, 0x0096,
8444             "-> Loading segment %u...\n", j);
8445         risc_addr = be32_to_cpu(fwcode[2]);
8446         risc_size = be32_to_cpu(fwcode[3]);
8447 
8448         if (!*srisc_addr) {
8449             *srisc_addr = risc_addr;
8450             risc_attr = be32_to_cpu(fwcode[9]);
8451         }
8452 
8453         dlen = ha->fw_transfer_size >> 2;
8454         for (fragment = 0; risc_size; fragment++) {
8455             if (dlen > risc_size)
8456                 dlen = risc_size;
8457 
8458             ql_dbg(ql_dbg_init, vha, 0x0097,
8459                 "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n",
8460                 fragment, risc_addr,
8461                 (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data),
8462                 dlen);
8463 
8464             for (i = 0; i < dlen; i++)
8465                 dcode[i] = swab32((__force u32)fwcode[i]);
8466 
8467             rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
8468             if (rval) {
8469                 ql_log(ql_log_fatal, vha, 0x0098,
8470                     "-> Failed load firmware fragment %u.\n",
8471                     fragment);
8472                 return QLA_FUNCTION_FAILED;
8473             }
8474 
8475             fwcode += dlen;
8476             risc_addr += dlen;
8477             risc_size -= dlen;
8478         }
8479     }
8480 
8481     if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8482         return QLA_SUCCESS;
8483 
8484     templates = (risc_attr & BIT_9) ? 2 : 1;
8485     ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
8486     for (j = 0; j < templates; j++, fwdt++) {
8487         vfree(fwdt->template);
8488         fwdt->template = NULL;
8489         fwdt->length = 0;
8490 
8491         risc_size = be32_to_cpu(fwcode[2]);
8492         ql_dbg(ql_dbg_init, vha, 0x0171,
8493             "-> fwdt%u template array at %#x (%#x dwords)\n",
8494             j, (uint32_t)((void *)fwcode - (void *)blob->fw->data),
8495             risc_size);
8496         if (!risc_size || !~risc_size) {
8497             ql_dbg(ql_dbg_init, vha, 0x0172,
8498                 "-> fwdt%u failed to read array\n", j);
8499             goto failed;
8500         }
8501 
8502         /* skip header and ignore checksum */
8503         fwcode += 7;
8504         risc_size -= 8;
8505 
8506         ql_dbg(ql_dbg_init, vha, 0x0173,
8507             "-> fwdt%u template allocate template %#x words...\n",
8508             j, risc_size);
8509         fwdt->template = vmalloc(risc_size * sizeof(*dcode));
8510         if (!fwdt->template) {
8511             ql_log(ql_log_warn, vha, 0x0174,
8512                 "-> fwdt%u failed allocate template.\n", j);
8513             goto failed;
8514         }
8515 
8516         dcode = fwdt->template;
8517         for (i = 0; i < risc_size; i++)
8518             dcode[i] = (__force u32)fwcode[i];
8519 
8520         if (!qla27xx_fwdt_template_valid(dcode)) {
8521             ql_log(ql_log_warn, vha, 0x0175,
8522                 "-> fwdt%u failed template validate\n", j);
8523             goto failed;
8524         }
8525 
8526         dlen = qla27xx_fwdt_template_size(dcode);
8527         ql_dbg(ql_dbg_init, vha, 0x0176,
8528             "-> fwdt%u template size %#lx bytes (%#lx words)\n",
8529             j, dlen, dlen / sizeof(*dcode));
8530         if (dlen > risc_size * sizeof(*dcode)) {
8531             ql_log(ql_log_warn, vha, 0x0177,
8532                 "-> fwdt%u template exceeds array (%-lu bytes)\n",
8533                 j, dlen - risc_size * sizeof(*dcode));
8534             goto failed;
8535         }
8536 
8537         fwdt->length = dlen;
8538         ql_dbg(ql_dbg_init, vha, 0x0178,
8539             "-> fwdt%u loaded template ok\n", j);
8540 
8541         fwcode += risc_size + 1;
8542     }
8543 
8544     return QLA_SUCCESS;
8545 
8546 failed:
8547     vfree(fwdt->template);
8548     fwdt->template = NULL;
8549     fwdt->length = 0;
8550 
8551     return QLA_SUCCESS;
8552 }
8553 
8554 int
8555 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8556 {
8557     int rval;
8558 
8559     if (ql2xfwloadbin == 1)
8560         return qla81xx_load_risc(vha, srisc_addr);
8561 
8562     /*
8563      * FW Load priority:
8564      * 1) Firmware via request-firmware interface (.bin file).
8565      * 2) Firmware residing in flash.
8566      */
8567     rval = qla24xx_load_risc_blob(vha, srisc_addr);
8568     if (rval == QLA_SUCCESS)
8569         return rval;
8570 
8571     return qla24xx_load_risc_flash(vha, srisc_addr,
8572         vha->hw->flt_region_fw);
8573 }
8574 
8575 int
8576 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8577 {
8578     int rval;
8579     struct qla_hw_data *ha = vha->hw;
8580     struct active_regions active_regions = { };
8581 
8582     if (ql2xfwloadbin == 2)
8583         goto try_blob_fw;
8584 
8585     /* FW Load priority:
8586      * 1) Firmware residing in flash.
8587      * 2) Firmware via request-firmware interface (.bin file).
8588      * 3) Golden-Firmware residing in flash -- (limited operation).
8589      */
8590 
8591     if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8592         goto try_primary_fw;
8593 
8594     qla27xx_get_active_image(vha, &active_regions);
8595 
8596     if (active_regions.global != QLA27XX_SECONDARY_IMAGE)
8597         goto try_primary_fw;
8598 
8599     ql_dbg(ql_dbg_init, vha, 0x008b,
8600         "Loading secondary firmware image.\n");
8601     rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec);
8602     if (!rval)
8603         return rval;
8604 
8605 try_primary_fw:
8606     ql_dbg(ql_dbg_init, vha, 0x008b,
8607         "Loading primary firmware image.\n");
8608     rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
8609     if (!rval)
8610         return rval;
8611 
8612 try_blob_fw:
8613     rval = qla24xx_load_risc_blob(vha, srisc_addr);
8614     if (!rval || !ha->flt_region_gold_fw)
8615         return rval;
8616 
8617     ql_log(ql_log_info, vha, 0x0099,
8618         "Attempting to fallback to golden firmware.\n");
8619     rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
8620     if (rval)
8621         return rval;
8622 
8623     ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n");
8624     ha->flags.running_gold_fw = 1;
8625     return rval;
8626 }
8627 
8628 void
8629 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
8630 {
8631     int ret, retries;
8632     struct qla_hw_data *ha = vha->hw;
8633 
8634     if (ha->flags.pci_channel_io_perm_failure)
8635         return;
8636     if (!IS_FWI2_CAPABLE(ha))
8637         return;
8638     if (!ha->fw_major_version)
8639         return;
8640     if (!ha->flags.fw_started)
8641         return;
8642 
8643     ret = qla2x00_stop_firmware(vha);
8644     for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
8645         ret != QLA_INVALID_COMMAND && retries ; retries--) {
8646         ha->isp_ops->reset_chip(vha);
8647         if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
8648             continue;
8649         if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
8650             continue;
8651         ql_log(ql_log_info, vha, 0x8015,
8652             "Attempting retry of stop-firmware command.\n");
8653         ret = qla2x00_stop_firmware(vha);
8654     }
8655 
8656     QLA_FW_STOPPED(ha);
8657     ha->flags.fw_init_done = 0;
8658 }
8659 
8660 int
8661 qla24xx_configure_vhba(scsi_qla_host_t *vha)
8662 {
8663     int rval = QLA_SUCCESS;
8664     int rval2;
8665     uint16_t mb[MAILBOX_REGISTER_COUNT];
8666     struct qla_hw_data *ha = vha->hw;
8667     struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
8668 
8669     if (!vha->vp_idx)
8670         return -EINVAL;
8671 
8672     rval = qla2x00_fw_ready(base_vha);
8673 
8674     if (rval == QLA_SUCCESS) {
8675         clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8676         qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
8677     }
8678 
8679     vha->flags.management_server_logged_in = 0;
8680 
8681     /* Login to SNS first */
8682     rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
8683         BIT_1);
8684     if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
8685         if (rval2 == QLA_MEMORY_ALLOC_FAILED)
8686             ql_dbg(ql_dbg_init, vha, 0x0120,
8687                 "Failed SNS login: loop_id=%x, rval2=%d\n",
8688                 NPH_SNS, rval2);
8689         else
8690             ql_dbg(ql_dbg_init, vha, 0x0103,
8691                 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
8692                 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
8693                 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
8694         return (QLA_FUNCTION_FAILED);
8695     }
8696 
8697     atomic_set(&vha->loop_down_timer, 0);
8698     atomic_set(&vha->loop_state, LOOP_UP);
8699     set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
8700     set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
8701     rval = qla2x00_loop_resync(base_vha);
8702 
8703     return rval;
8704 }
8705 
8706 /* 84XX Support **************************************************************/
8707 
8708 static LIST_HEAD(qla_cs84xx_list);
8709 static DEFINE_MUTEX(qla_cs84xx_mutex);
8710 
8711 static struct qla_chip_state_84xx *
8712 qla84xx_get_chip(struct scsi_qla_host *vha)
8713 {
8714     struct qla_chip_state_84xx *cs84xx;
8715     struct qla_hw_data *ha = vha->hw;
8716 
8717     mutex_lock(&qla_cs84xx_mutex);
8718 
8719     /* Find any shared 84xx chip. */
8720     list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
8721         if (cs84xx->bus == ha->pdev->bus) {
8722             kref_get(&cs84xx->kref);
8723             goto done;
8724         }
8725     }
8726 
8727     cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
8728     if (!cs84xx)
8729         goto done;
8730 
8731     kref_init(&cs84xx->kref);
8732     spin_lock_init(&cs84xx->access_lock);
8733     mutex_init(&cs84xx->fw_update_mutex);
8734     cs84xx->bus = ha->pdev->bus;
8735 
8736     list_add_tail(&cs84xx->list, &qla_cs84xx_list);
8737 done:
8738     mutex_unlock(&qla_cs84xx_mutex);
8739     return cs84xx;
8740 }
8741 
8742 static void
8743 __qla84xx_chip_release(struct kref *kref)
8744 {
8745     struct qla_chip_state_84xx *cs84xx =
8746         container_of(kref, struct qla_chip_state_84xx, kref);
8747 
8748     mutex_lock(&qla_cs84xx_mutex);
8749     list_del(&cs84xx->list);
8750     mutex_unlock(&qla_cs84xx_mutex);
8751     kfree(cs84xx);
8752 }
8753 
8754 void
8755 qla84xx_put_chip(struct scsi_qla_host *vha)
8756 {
8757     struct qla_hw_data *ha = vha->hw;
8758 
8759     if (ha->cs84xx)
8760         kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
8761 }
8762 
8763 static int
8764 qla84xx_init_chip(scsi_qla_host_t *vha)
8765 {
8766     int rval;
8767     uint16_t status[2];
8768     struct qla_hw_data *ha = vha->hw;
8769 
8770     mutex_lock(&ha->cs84xx->fw_update_mutex);
8771 
8772     rval = qla84xx_verify_chip(vha, status);
8773 
8774     mutex_unlock(&ha->cs84xx->fw_update_mutex);
8775 
8776     return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED :
8777         QLA_SUCCESS;
8778 }
8779 
8780 /* 81XX Support **************************************************************/
8781 
8782 int
8783 qla81xx_nvram_config(scsi_qla_host_t *vha)
8784 {
8785     int   rval;
8786     struct init_cb_81xx *icb;
8787     struct nvram_81xx *nv;
8788     __le32 *dptr;
8789     uint8_t  *dptr1, *dptr2;
8790     uint32_t chksum;
8791     uint16_t cnt;
8792     struct qla_hw_data *ha = vha->hw;
8793     uint32_t faddr;
8794     struct active_regions active_regions = { };
8795 
8796     rval = QLA_SUCCESS;
8797     icb = (struct init_cb_81xx *)ha->init_cb;
8798     nv = ha->nvram;
8799 
8800     /* Determine NVRAM starting address. */
8801     ha->nvram_size = sizeof(*nv);
8802     ha->vpd_size = FA_NVRAM_VPD_SIZE;
8803     if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
8804         ha->vpd_size = FA_VPD_SIZE_82XX;
8805 
8806     if (IS_QLA28XX(ha) || IS_QLA27XX(ha))
8807         qla28xx_get_aux_images(vha, &active_regions);
8808 
8809     /* Get VPD data into cache */
8810     ha->vpd = ha->nvram + VPD_OFFSET;
8811 
8812     faddr = ha->flt_region_vpd;
8813     if (IS_QLA28XX(ha)) {
8814         if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
8815             faddr = ha->flt_region_vpd_sec;
8816         ql_dbg(ql_dbg_init, vha, 0x0110,
8817             "Loading %s nvram image.\n",
8818             active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8819             "primary" : "secondary");
8820     }
8821     ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size);
8822 
8823     /* Get NVRAM data into cache and calculate checksum. */
8824     faddr = ha->flt_region_nvram;
8825     if (IS_QLA28XX(ha)) {
8826         if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
8827             faddr = ha->flt_region_nvram_sec;
8828     }
8829     ql_dbg(ql_dbg_init, vha, 0x0110,
8830         "Loading %s nvram image.\n",
8831         active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8832         "primary" : "secondary");
8833     ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
8834 
8835     dptr = (__force __le32 *)nv;
8836     for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
8837         chksum += le32_to_cpu(*dptr);
8838 
8839     ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
8840         "Contents of NVRAM:\n");
8841     ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
8842         nv, ha->nvram_size);
8843 
8844     /* Bad NVRAM data, set defaults parameters. */
8845     if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
8846         le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
8847         /* Reset NVRAM data. */
8848         ql_log(ql_log_info, vha, 0x0073,
8849             "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
8850             chksum, nv->id, le16_to_cpu(nv->nvram_version));
8851         ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv));
8852         ql_log(ql_log_info, vha, 0x0074,
8853             "Falling back to functioning (yet invalid -- WWPN) "
8854             "defaults.\n");
8855 
8856         /*
8857          * Set default initialization control block.
8858          */
8859         memset(nv, 0, ha->nvram_size);
8860         nv->nvram_version = cpu_to_le16(ICB_VERSION);
8861         nv->version = cpu_to_le16(ICB_VERSION);
8862         nv->frame_payload_size = cpu_to_le16(2048);
8863         nv->execution_throttle = cpu_to_le16(0xFFFF);
8864         nv->exchange_count = cpu_to_le16(0);
8865         nv->port_name[0] = 0x21;
8866         nv->port_name[1] = 0x00 + ha->port_no + 1;
8867         nv->port_name[2] = 0x00;
8868         nv->port_name[3] = 0xe0;
8869         nv->port_name[4] = 0x8b;
8870         nv->port_name[5] = 0x1c;
8871         nv->port_name[6] = 0x55;
8872         nv->port_name[7] = 0x86;
8873         nv->node_name[0] = 0x20;
8874         nv->node_name[1] = 0x00;
8875         nv->node_name[2] = 0x00;
8876         nv->node_name[3] = 0xe0;
8877         nv->node_name[4] = 0x8b;
8878         nv->node_name[5] = 0x1c;
8879         nv->node_name[6] = 0x55;
8880         nv->node_name[7] = 0x86;
8881         nv->login_retry_count = cpu_to_le16(8);
8882         nv->interrupt_delay_timer = cpu_to_le16(0);
8883         nv->login_timeout = cpu_to_le16(0);
8884         nv->firmware_options_1 =
8885             cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
8886         nv->firmware_options_2 = cpu_to_le32(2 << 4);
8887         nv->firmware_options_2 |= cpu_to_le32(BIT_12);
8888         nv->firmware_options_3 = cpu_to_le32(2 << 13);
8889         nv->host_p = cpu_to_le32(BIT_11|BIT_10);
8890         nv->efi_parameters = cpu_to_le32(0);
8891         nv->reset_delay = 5;
8892         nv->max_luns_per_target = cpu_to_le16(128);
8893         nv->port_down_retry_count = cpu_to_le16(30);
8894         nv->link_down_timeout = cpu_to_le16(180);
8895         nv->enode_mac[0] = 0x00;
8896         nv->enode_mac[1] = 0xC0;
8897         nv->enode_mac[2] = 0xDD;
8898         nv->enode_mac[3] = 0x04;
8899         nv->enode_mac[4] = 0x05;
8900         nv->enode_mac[5] = 0x06 + ha->port_no + 1;
8901 
8902         rval = 1;
8903     }
8904 
8905     if (IS_T10_PI_CAPABLE(ha))
8906         nv->frame_payload_size &= cpu_to_le16(~7);
8907 
8908     qlt_81xx_config_nvram_stage1(vha, nv);
8909 
8910     /* Reset Initialization control block */
8911     memset(icb, 0, ha->init_cb_size);
8912 
8913     /* Copy 1st segment. */
8914     dptr1 = (uint8_t *)icb;
8915     dptr2 = (uint8_t *)&nv->version;
8916     cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
8917     while (cnt--)
8918         *dptr1++ = *dptr2++;
8919 
8920     icb->login_retry_count = nv->login_retry_count;
8921 
8922     /* Copy 2nd segment. */
8923     dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
8924     dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
8925     cnt = (uint8_t *)&icb->reserved_5 -
8926         (uint8_t *)&icb->interrupt_delay_timer;
8927     while (cnt--)
8928         *dptr1++ = *dptr2++;
8929 
8930     memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
8931     /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
8932     if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
8933         icb->enode_mac[0] = 0x00;
8934         icb->enode_mac[1] = 0xC0;
8935         icb->enode_mac[2] = 0xDD;
8936         icb->enode_mac[3] = 0x04;
8937         icb->enode_mac[4] = 0x05;
8938         icb->enode_mac[5] = 0x06 + ha->port_no + 1;
8939     }
8940 
8941     /* Use extended-initialization control block. */
8942     memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
8943     ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
8944     /*
8945      * Setup driver NVRAM options.
8946      */
8947     qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
8948         "QLE8XXX");
8949 
8950     qlt_81xx_config_nvram_stage2(vha, icb);
8951 
8952     /* Use alternate WWN? */
8953     if (nv->host_p & cpu_to_le32(BIT_15)) {
8954         memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
8955         memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
8956     }
8957 
8958     /* Prepare nodename */
8959     if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
8960         /*
8961          * Firmware will apply the following mask if the nodename was
8962          * not provided.
8963          */
8964         memcpy(icb->node_name, icb->port_name, WWN_SIZE);
8965         icb->node_name[0] &= 0xF0;
8966     }
8967 
8968     if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
8969         if ((nv->enhanced_features & BIT_7) == 0)
8970             ha->flags.scm_supported_a = 1;
8971     }
8972 
8973     /* Set host adapter parameters. */
8974     ha->flags.disable_risc_code_load = 0;
8975     ha->flags.enable_lip_reset = 0;
8976     ha->flags.enable_lip_full_login =
8977         le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
8978     ha->flags.enable_target_reset =
8979         le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
8980     ha->flags.enable_led_scheme = 0;
8981     ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
8982 
8983     ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
8984         (BIT_6 | BIT_5 | BIT_4)) >> 4;
8985 
8986     /* save HBA serial number */
8987     ha->serial0 = icb->port_name[5];
8988     ha->serial1 = icb->port_name[6];
8989     ha->serial2 = icb->port_name[7];
8990     memcpy(vha->node_name, icb->node_name, WWN_SIZE);
8991     memcpy(vha->port_name, icb->port_name, WWN_SIZE);
8992 
8993     icb->execution_throttle = cpu_to_le16(0xFFFF);
8994 
8995     ha->retry_count = le16_to_cpu(nv->login_retry_count);
8996 
8997     /* Set minimum login_timeout to 4 seconds. */
8998     if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
8999         nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
9000     if (le16_to_cpu(nv->login_timeout) < 4)
9001         nv->login_timeout = cpu_to_le16(4);
9002     ha->login_timeout = le16_to_cpu(nv->login_timeout);
9003 
9004     /* Set minimum RATOV to 100 tenths of a second. */
9005     ha->r_a_tov = 100;
9006 
9007     ha->loop_reset_delay = nv->reset_delay;
9008 
9009     /* Link Down Timeout = 0:
9010      *
9011      *  When Port Down timer expires we will start returning
9012      *  I/O's to OS with "DID_NO_CONNECT".
9013      *
9014      * Link Down Timeout != 0:
9015      *
9016      *   The driver waits for the link to come up after link down
9017      *   before returning I/Os to OS with "DID_NO_CONNECT".
9018      */
9019     if (le16_to_cpu(nv->link_down_timeout) == 0) {
9020         ha->loop_down_abort_time =
9021             (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
9022     } else {
9023         ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
9024         ha->loop_down_abort_time =
9025             (LOOP_DOWN_TIME - ha->link_down_timeout);
9026     }
9027 
9028     /* Need enough time to try and get the port back. */
9029     ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
9030     if (qlport_down_retry)
9031         ha->port_down_retry_count = qlport_down_retry;
9032 
9033     /* Set login_retry_count */
9034     ha->login_retry_count  = le16_to_cpu(nv->login_retry_count);
9035     if (ha->port_down_retry_count ==
9036         le16_to_cpu(nv->port_down_retry_count) &&
9037         ha->port_down_retry_count > 3)
9038         ha->login_retry_count = ha->port_down_retry_count;
9039     else if (ha->port_down_retry_count > (int)ha->login_retry_count)
9040         ha->login_retry_count = ha->port_down_retry_count;
9041     if (ql2xloginretrycount)
9042         ha->login_retry_count = ql2xloginretrycount;
9043 
9044     /* if not running MSI-X we need handshaking on interrupts */
9045     if (!vha->hw->flags.msix_enabled &&
9046         (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)))
9047         icb->firmware_options_2 |= cpu_to_le32(BIT_22);
9048 
9049     /* Enable ZIO. */
9050     if (!vha->flags.init_done) {
9051         ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
9052             (BIT_3 | BIT_2 | BIT_1 | BIT_0);
9053         ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
9054             le16_to_cpu(icb->interrupt_delay_timer) : 2;
9055     }
9056     icb->firmware_options_2 &= cpu_to_le32(
9057         ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
9058     vha->flags.process_response_queue = 0;
9059     if (ha->zio_mode != QLA_ZIO_DISABLED) {
9060         ha->zio_mode = QLA_ZIO_MODE_6;
9061 
9062         ql_log(ql_log_info, vha, 0x0075,
9063             "ZIO mode %d enabled; timer delay (%d us).\n",
9064             ha->zio_mode,
9065             ha->zio_timer * 100);
9066 
9067         icb->firmware_options_2 |= cpu_to_le32(
9068             (uint32_t)ha->zio_mode);
9069         icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
9070         vha->flags.process_response_queue = 1;
9071     }
9072 
9073      /* enable RIDA Format2 */
9074     icb->firmware_options_3 |= cpu_to_le32(BIT_0);
9075 
9076     /* N2N: driver will initiate Login instead of FW */
9077     icb->firmware_options_3 |= cpu_to_le32(BIT_8);
9078 
9079     /* Determine NVMe/FCP priority for target ports */
9080     ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
9081 
9082     if (rval) {
9083         ql_log(ql_log_warn, vha, 0x0076,
9084             "NVRAM configuration failed.\n");
9085     }
9086     return (rval);
9087 }
9088 
9089 int
9090 qla82xx_restart_isp(scsi_qla_host_t *vha)
9091 {
9092     int status, rval;
9093     struct qla_hw_data *ha = vha->hw;
9094     struct scsi_qla_host *vp, *tvp;
9095     unsigned long flags;
9096 
9097     status = qla2x00_init_rings(vha);
9098     if (!status) {
9099         clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
9100         ha->flags.chip_reset_done = 1;
9101 
9102         status = qla2x00_fw_ready(vha);
9103         if (!status) {
9104             /* Issue a marker after FW becomes ready. */
9105             qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
9106             vha->flags.online = 1;
9107             set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
9108         }
9109 
9110         /* if no cable then assume it's good */
9111         if ((vha->device_flags & DFLG_NO_CABLE))
9112             status = 0;
9113     }
9114 
9115     if (!status) {
9116         clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
9117 
9118         if (!atomic_read(&vha->loop_down_timer)) {
9119             /*
9120              * Issue marker command only when we are going
9121              * to start the I/O .
9122              */
9123             vha->marker_needed = 1;
9124         }
9125 
9126         ha->isp_ops->enable_intrs(ha);
9127 
9128         ha->isp_abort_cnt = 0;
9129         clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
9130 
9131         /* Update the firmware version */
9132         status = qla82xx_check_md_needed(vha);
9133 
9134         if (ha->fce) {
9135             ha->flags.fce_enabled = 1;
9136             memset(ha->fce, 0,
9137                 fce_calc_size(ha->fce_bufs));
9138             rval = qla2x00_enable_fce_trace(vha,
9139                 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
9140                 &ha->fce_bufs);
9141             if (rval) {
9142                 ql_log(ql_log_warn, vha, 0x8001,
9143                     "Unable to reinitialize FCE (%d).\n",
9144                     rval);
9145                 ha->flags.fce_enabled = 0;
9146             }
9147         }
9148 
9149         if (ha->eft) {
9150             memset(ha->eft, 0, EFT_SIZE);
9151             rval = qla2x00_enable_eft_trace(vha,
9152                 ha->eft_dma, EFT_NUM_BUFFERS);
9153             if (rval) {
9154                 ql_log(ql_log_warn, vha, 0x8010,
9155                     "Unable to reinitialize EFT (%d).\n",
9156                     rval);
9157             }
9158         }
9159     }
9160 
9161     if (!status) {
9162         ql_dbg(ql_dbg_taskm, vha, 0x8011,
9163             "qla82xx_restart_isp succeeded.\n");
9164 
9165         spin_lock_irqsave(&ha->vport_slock, flags);
9166         list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
9167             if (vp->vp_idx) {
9168                 atomic_inc(&vp->vref_count);
9169                 spin_unlock_irqrestore(&ha->vport_slock, flags);
9170 
9171                 qla2x00_vp_abort_isp(vp);
9172 
9173                 spin_lock_irqsave(&ha->vport_slock, flags);
9174                 atomic_dec(&vp->vref_count);
9175             }
9176         }
9177         spin_unlock_irqrestore(&ha->vport_slock, flags);
9178 
9179     } else {
9180         ql_log(ql_log_warn, vha, 0x8016,
9181             "qla82xx_restart_isp **** FAILED ****.\n");
9182     }
9183 
9184     return status;
9185 }
9186 
9187 /*
9188  * qla24xx_get_fcp_prio
9189  *  Gets the fcp cmd priority value for the logged in port.
9190  *  Looks for a match of the port descriptors within
9191  *  each of the fcp prio config entries. If a match is found,
9192  *  the tag (priority) value is returned.
9193  *
9194  * Input:
9195  *  vha = scsi host structure pointer.
9196  *  fcport = port structure pointer.
9197  *
9198  * Return:
9199  *  non-zero (if found)
9200  *  -1 (if not found)
9201  *
9202  * Context:
9203  *  Kernel context
9204  */
9205 static int
9206 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
9207 {
9208     int i, entries;
9209     uint8_t pid_match, wwn_match;
9210     int priority;
9211     uint32_t pid1, pid2;
9212     uint64_t wwn1, wwn2;
9213     struct qla_fcp_prio_entry *pri_entry;
9214     struct qla_hw_data *ha = vha->hw;
9215 
9216     if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
9217         return -1;
9218 
9219     priority = -1;
9220     entries = ha->fcp_prio_cfg->num_entries;
9221     pri_entry = &ha->fcp_prio_cfg->entry[0];
9222 
9223     for (i = 0; i < entries; i++) {
9224         pid_match = wwn_match = 0;
9225 
9226         if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
9227             pri_entry++;
9228             continue;
9229         }
9230 
9231         /* check source pid for a match */
9232         if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
9233             pid1 = pri_entry->src_pid & INVALID_PORT_ID;
9234             pid2 = vha->d_id.b24 & INVALID_PORT_ID;
9235             if (pid1 == INVALID_PORT_ID)
9236                 pid_match++;
9237             else if (pid1 == pid2)
9238                 pid_match++;
9239         }
9240 
9241         /* check destination pid for a match */
9242         if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
9243             pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
9244             pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
9245             if (pid1 == INVALID_PORT_ID)
9246                 pid_match++;
9247             else if (pid1 == pid2)
9248                 pid_match++;
9249         }
9250 
9251         /* check source WWN for a match */
9252         if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
9253             wwn1 = wwn_to_u64(vha->port_name);
9254             wwn2 = wwn_to_u64(pri_entry->src_wwpn);
9255             if (wwn2 == (uint64_t)-1)
9256                 wwn_match++;
9257             else if (wwn1 == wwn2)
9258                 wwn_match++;
9259         }
9260 
9261         /* check destination WWN for a match */
9262         if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
9263             wwn1 = wwn_to_u64(fcport->port_name);
9264             wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
9265             if (wwn2 == (uint64_t)-1)
9266                 wwn_match++;
9267             else if (wwn1 == wwn2)
9268                 wwn_match++;
9269         }
9270 
9271         if (pid_match == 2 || wwn_match == 2) {
9272             /* Found a matching entry */
9273             if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
9274                 priority = pri_entry->tag;
9275             break;
9276         }
9277 
9278         pri_entry++;
9279     }
9280 
9281     return priority;
9282 }
9283 
9284 /*
9285  * qla24xx_update_fcport_fcp_prio
9286  *  Activates fcp priority for the logged in fc port
9287  *
9288  * Input:
9289  *  vha = scsi host structure pointer.
9290  *  fcp = port structure pointer.
9291  *
9292  * Return:
9293  *  QLA_SUCCESS or QLA_FUNCTION_FAILED
9294  *
9295  * Context:
9296  *  Kernel context.
9297  */
9298 int
9299 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
9300 {
9301     int ret;
9302     int priority;
9303     uint16_t mb[5];
9304 
9305     if (fcport->port_type != FCT_TARGET ||
9306         fcport->loop_id == FC_NO_LOOP_ID)
9307         return QLA_FUNCTION_FAILED;
9308 
9309     priority = qla24xx_get_fcp_prio(vha, fcport);
9310     if (priority < 0)
9311         return QLA_FUNCTION_FAILED;
9312 
9313     if (IS_P3P_TYPE(vha->hw)) {
9314         fcport->fcp_prio = priority & 0xf;
9315         return QLA_SUCCESS;
9316     }
9317 
9318     ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
9319     if (ret == QLA_SUCCESS) {
9320         if (fcport->fcp_prio != priority)
9321             ql_dbg(ql_dbg_user, vha, 0x709e,
9322                 "Updated FCP_CMND priority - value=%d loop_id=%d "
9323                 "port_id=%02x%02x%02x.\n", priority,
9324                 fcport->loop_id, fcport->d_id.b.domain,
9325                 fcport->d_id.b.area, fcport->d_id.b.al_pa);
9326         fcport->fcp_prio = priority & 0xf;
9327     } else
9328         ql_dbg(ql_dbg_user, vha, 0x704f,
9329             "Unable to update FCP_CMND priority - ret=0x%x for "
9330             "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
9331             fcport->d_id.b.domain, fcport->d_id.b.area,
9332             fcport->d_id.b.al_pa);
9333     return  ret;
9334 }
9335 
9336 /*
9337  * qla24xx_update_all_fcp_prio
9338  *  Activates fcp priority for all the logged in ports
9339  *
9340  * Input:
9341  *  ha = adapter block pointer.
9342  *
9343  * Return:
9344  *  QLA_SUCCESS or QLA_FUNCTION_FAILED
9345  *
9346  * Context:
9347  *  Kernel context.
9348  */
9349 int
9350 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
9351 {
9352     int ret;
9353     fc_port_t *fcport;
9354 
9355     ret = QLA_FUNCTION_FAILED;
9356     /* We need to set priority for all logged in ports */
9357     list_for_each_entry(fcport, &vha->vp_fcports, list)
9358         ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
9359 
9360     return ret;
9361 }
9362 
9363 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
9364     int vp_idx, bool startqp)
9365 {
9366     int rsp_id = 0;
9367     int  req_id = 0;
9368     int i;
9369     struct qla_hw_data *ha = vha->hw;
9370     uint16_t qpair_id = 0;
9371     struct qla_qpair *qpair = NULL;
9372     struct qla_msix_entry *msix;
9373 
9374     if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
9375         ql_log(ql_log_warn, vha, 0x00181,
9376             "FW/Driver is not multi-queue capable.\n");
9377         return NULL;
9378     }
9379 
9380     if (ql2xmqsupport || ql2xnvmeenable) {
9381         qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
9382         if (qpair == NULL) {
9383             ql_log(ql_log_warn, vha, 0x0182,
9384                 "Failed to allocate memory for queue pair.\n");
9385             return NULL;
9386         }
9387 
9388         qpair->hw = vha->hw;
9389         qpair->vha = vha;
9390         qpair->qp_lock_ptr = &qpair->qp_lock;
9391         spin_lock_init(&qpair->qp_lock);
9392         qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
9393 
9394         /* Assign available que pair id */
9395         mutex_lock(&ha->mq_lock);
9396         qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
9397         if (ha->num_qpairs >= ha->max_qpairs) {
9398             mutex_unlock(&ha->mq_lock);
9399             ql_log(ql_log_warn, vha, 0x0183,
9400                 "No resources to create additional q pair.\n");
9401             goto fail_qid_map;
9402         }
9403         ha->num_qpairs++;
9404         set_bit(qpair_id, ha->qpair_qid_map);
9405         ha->queue_pair_map[qpair_id] = qpair;
9406         qpair->id = qpair_id;
9407         qpair->vp_idx = vp_idx;
9408         qpair->fw_started = ha->flags.fw_started;
9409         INIT_LIST_HEAD(&qpair->hints_list);
9410         qpair->chip_reset = ha->base_qpair->chip_reset;
9411         qpair->enable_class_2 = ha->base_qpair->enable_class_2;
9412         qpair->enable_explicit_conf =
9413             ha->base_qpair->enable_explicit_conf;
9414 
9415         for (i = 0; i < ha->msix_count; i++) {
9416             msix = &ha->msix_entries[i];
9417             if (msix->in_use)
9418                 continue;
9419             qpair->msix = msix;
9420             ql_dbg(ql_dbg_multiq, vha, 0xc00f,
9421                 "Vector %x selected for qpair\n", msix->vector);
9422             break;
9423         }
9424         if (!qpair->msix) {
9425             ql_log(ql_log_warn, vha, 0x0184,
9426                 "Out of MSI-X vectors!.\n");
9427             goto fail_msix;
9428         }
9429 
9430         qpair->msix->in_use = 1;
9431         list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
9432         qpair->pdev = ha->pdev;
9433         if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
9434             qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
9435 
9436         mutex_unlock(&ha->mq_lock);
9437 
9438         /* Create response queue first */
9439         rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
9440         if (!rsp_id) {
9441             ql_log(ql_log_warn, vha, 0x0185,
9442                 "Failed to create response queue.\n");
9443             goto fail_rsp;
9444         }
9445 
9446         qpair->rsp = ha->rsp_q_map[rsp_id];
9447 
9448         /* Create request queue */
9449         req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
9450             startqp);
9451         if (!req_id) {
9452             ql_log(ql_log_warn, vha, 0x0186,
9453                 "Failed to create request queue.\n");
9454             goto fail_req;
9455         }
9456 
9457         qpair->req = ha->req_q_map[req_id];
9458         qpair->rsp->req = qpair->req;
9459         qpair->rsp->qpair = qpair;
9460         /* init qpair to this cpu. Will adjust at run time. */
9461         qla_cpu_update(qpair, raw_smp_processor_id());
9462 
9463         if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
9464             if (ha->fw_attributes & BIT_4)
9465                 qpair->difdix_supported = 1;
9466         }
9467 
9468         qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
9469         if (!qpair->srb_mempool) {
9470             ql_log(ql_log_warn, vha, 0xd036,
9471                 "Failed to create srb mempool for qpair %d\n",
9472                 qpair->id);
9473             goto fail_mempool;
9474         }
9475 
9476         /* Mark as online */
9477         qpair->online = 1;
9478 
9479         if (!vha->flags.qpairs_available)
9480             vha->flags.qpairs_available = 1;
9481 
9482         ql_dbg(ql_dbg_multiq, vha, 0xc00d,
9483             "Request/Response queue pair created, id %d\n",
9484             qpair->id);
9485         ql_dbg(ql_dbg_init, vha, 0x0187,
9486             "Request/Response queue pair created, id %d\n",
9487             qpair->id);
9488     }
9489     return qpair;
9490 
9491 fail_mempool:
9492 fail_req:
9493     qla25xx_delete_rsp_que(vha, qpair->rsp);
9494 fail_rsp:
9495     mutex_lock(&ha->mq_lock);
9496     qpair->msix->in_use = 0;
9497     list_del(&qpair->qp_list_elem);
9498     if (list_empty(&vha->qp_list))
9499         vha->flags.qpairs_available = 0;
9500 fail_msix:
9501     ha->queue_pair_map[qpair_id] = NULL;
9502     clear_bit(qpair_id, ha->qpair_qid_map);
9503     ha->num_qpairs--;
9504     mutex_unlock(&ha->mq_lock);
9505 fail_qid_map:
9506     kfree(qpair);
9507     return NULL;
9508 }
9509 
9510 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
9511 {
9512     int ret = QLA_FUNCTION_FAILED;
9513     struct qla_hw_data *ha = qpair->hw;
9514 
9515     qpair->delete_in_progress = 1;
9516 
9517     ret = qla25xx_delete_req_que(vha, qpair->req);
9518     if (ret != QLA_SUCCESS)
9519         goto fail;
9520 
9521     ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
9522     if (ret != QLA_SUCCESS)
9523         goto fail;
9524 
9525     mutex_lock(&ha->mq_lock);
9526     ha->queue_pair_map[qpair->id] = NULL;
9527     clear_bit(qpair->id, ha->qpair_qid_map);
9528     ha->num_qpairs--;
9529     list_del(&qpair->qp_list_elem);
9530     if (list_empty(&vha->qp_list)) {
9531         vha->flags.qpairs_available = 0;
9532         vha->flags.qpairs_req_created = 0;
9533         vha->flags.qpairs_rsp_created = 0;
9534     }
9535     mempool_destroy(qpair->srb_mempool);
9536     kfree(qpair);
9537     mutex_unlock(&ha->mq_lock);
9538 
9539     return QLA_SUCCESS;
9540 fail:
9541     return ret;
9542 }
9543 
9544 uint64_t
9545 qla2x00_count_set_bits(uint32_t num)
9546 {
9547     /* Brian Kernighan's Algorithm */
9548     u64 count = 0;
9549 
9550     while (num) {
9551         num &= (num - 1);
9552         count++;
9553     }
9554     return count;
9555 }
9556 
9557 uint64_t
9558 qla2x00_get_num_tgts(scsi_qla_host_t *vha)
9559 {
9560     fc_port_t *f, *tf;
9561     u64 count = 0;
9562 
9563     f = NULL;
9564     tf = NULL;
9565 
9566     list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
9567         if (f->port_type != FCT_TARGET)
9568             continue;
9569         count++;
9570     }
9571     return count;
9572 }
9573 
9574 int qla2xxx_reset_stats(struct Scsi_Host *host, u32 flags)
9575 {
9576     scsi_qla_host_t *vha = shost_priv(host);
9577     fc_port_t *fcport = NULL;
9578     unsigned long int_flags;
9579 
9580     if (flags & QLA2XX_HW_ERROR)
9581         vha->hw_err_cnt = 0;
9582     if (flags & QLA2XX_SHT_LNK_DWN)
9583         vha->short_link_down_cnt = 0;
9584     if (flags & QLA2XX_INT_ERR)
9585         vha->interface_err_cnt = 0;
9586     if (flags & QLA2XX_CMD_TIMEOUT)
9587         vha->cmd_timeout_cnt = 0;
9588     if (flags & QLA2XX_RESET_CMD_ERR)
9589         vha->reset_cmd_err_cnt = 0;
9590     if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
9591         spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
9592         list_for_each_entry(fcport, &vha->vp_fcports, list) {
9593             fcport->tgt_short_link_down_cnt = 0;
9594             fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
9595         }
9596         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
9597     }
9598     vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
9599     return 0;
9600 }
9601 
9602 int qla2xxx_start_stats(struct Scsi_Host *host, u32 flags)
9603 {
9604     return qla2xxx_reset_stats(host, flags);
9605 }
9606 
9607 int qla2xxx_stop_stats(struct Scsi_Host *host, u32 flags)
9608 {
9609     return qla2xxx_reset_stats(host, flags);
9610 }
9611 
9612 int qla2xxx_get_ini_stats(struct Scsi_Host *host, u32 flags,
9613               void *data, u64 size)
9614 {
9615     scsi_qla_host_t *vha = shost_priv(host);
9616     struct ql_vnd_host_stats_resp *resp = (struct ql_vnd_host_stats_resp *)data;
9617     struct ql_vnd_stats *rsp_data = &resp->stats;
9618     u64 ini_entry_count = 0;
9619     u64 i = 0;
9620     u64 entry_count = 0;
9621     u64 num_tgt = 0;
9622     u32 tmp_stat_type = 0;
9623     fc_port_t *fcport = NULL;
9624     unsigned long int_flags;
9625 
9626     /* Copy stat type to work on it */
9627     tmp_stat_type = flags;
9628 
9629     if (tmp_stat_type & BIT_17) {
9630         num_tgt = qla2x00_get_num_tgts(vha);
9631         /* unset BIT_17 */
9632         tmp_stat_type &= ~(1 << 17);
9633     }
9634     ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
9635 
9636     entry_count = ini_entry_count + num_tgt;
9637 
9638     rsp_data->entry_count = entry_count;
9639 
9640     i = 0;
9641     if (flags & QLA2XX_HW_ERROR) {
9642         rsp_data->entry[i].stat_type = QLA2XX_HW_ERROR;
9643         rsp_data->entry[i].tgt_num = 0x0;
9644         rsp_data->entry[i].cnt = vha->hw_err_cnt;
9645         i++;
9646     }
9647 
9648     if (flags & QLA2XX_SHT_LNK_DWN) {
9649         rsp_data->entry[i].stat_type = QLA2XX_SHT_LNK_DWN;
9650         rsp_data->entry[i].tgt_num = 0x0;
9651         rsp_data->entry[i].cnt = vha->short_link_down_cnt;
9652         i++;
9653     }
9654 
9655     if (flags & QLA2XX_INT_ERR) {
9656         rsp_data->entry[i].stat_type = QLA2XX_INT_ERR;
9657         rsp_data->entry[i].tgt_num = 0x0;
9658         rsp_data->entry[i].cnt = vha->interface_err_cnt;
9659         i++;
9660     }
9661 
9662     if (flags & QLA2XX_CMD_TIMEOUT) {
9663         rsp_data->entry[i].stat_type = QLA2XX_CMD_TIMEOUT;
9664         rsp_data->entry[i].tgt_num = 0x0;
9665         rsp_data->entry[i].cnt = vha->cmd_timeout_cnt;
9666         i++;
9667     }
9668 
9669     if (flags & QLA2XX_RESET_CMD_ERR) {
9670         rsp_data->entry[i].stat_type = QLA2XX_RESET_CMD_ERR;
9671         rsp_data->entry[i].tgt_num = 0x0;
9672         rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt;
9673         i++;
9674     }
9675 
9676     /* i will continue from previous loop, as target
9677      * entries are after initiator
9678      */
9679     if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
9680         spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
9681         list_for_each_entry(fcport, &vha->vp_fcports, list) {
9682             if (fcport->port_type != FCT_TARGET)
9683                 continue;
9684             if (!fcport->rport)
9685                 continue;
9686             rsp_data->entry[i].stat_type = QLA2XX_TGT_SHT_LNK_DOWN;
9687             rsp_data->entry[i].tgt_num = fcport->rport->number;
9688             rsp_data->entry[i].cnt = fcport->tgt_short_link_down_cnt;
9689             i++;
9690         }
9691         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
9692     }
9693     resp->status = EXT_STATUS_OK;
9694 
9695     return 0;
9696 }
9697 
9698 int qla2xxx_get_tgt_stats(struct Scsi_Host *host, u32 flags,
9699               struct fc_rport *rport, void *data, u64 size)
9700 {
9701     struct ql_vnd_tgt_stats_resp *tgt_data = data;
9702     fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
9703 
9704     tgt_data->status = 0;
9705     tgt_data->stats.entry_count = 1;
9706     tgt_data->stats.entry[0].stat_type = flags;
9707     tgt_data->stats.entry[0].tgt_num = rport->number;
9708     tgt_data->stats.entry[0].cnt = fcport->tgt_short_link_down_cnt;
9709 
9710     return 0;
9711 }
9712 
9713 int qla2xxx_disable_port(struct Scsi_Host *host)
9714 {
9715     scsi_qla_host_t *vha = shost_priv(host);
9716 
9717     vha->hw->flags.port_isolated = 1;
9718 
9719     if (qla2x00_isp_reg_stat(vha->hw)) {
9720         ql_log(ql_log_info, vha, 0x9006,
9721             "PCI/Register disconnect, exiting.\n");
9722         qla_pci_set_eeh_busy(vha);
9723         return FAILED;
9724     }
9725     if (qla2x00_chip_is_down(vha))
9726         return 0;
9727 
9728     if (vha->flags.online) {
9729         qla2x00_abort_isp_cleanup(vha);
9730         qla2x00_wait_for_sess_deletion(vha);
9731     }
9732 
9733     return 0;
9734 }
9735 
9736 int qla2xxx_enable_port(struct Scsi_Host *host)
9737 {
9738     scsi_qla_host_t *vha = shost_priv(host);
9739 
9740     if (qla2x00_isp_reg_stat(vha->hw)) {
9741         ql_log(ql_log_info, vha, 0x9001,
9742             "PCI/Register disconnect, exiting.\n");
9743         qla_pci_set_eeh_busy(vha);
9744         return FAILED;
9745     }
9746 
9747     vha->hw->flags.port_isolated = 0;
9748     /* Set the flag to 1, so that isp_abort can proceed */
9749     vha->flags.online = 1;
9750     set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
9751     qla2xxx_wake_dpc(vha);
9752 
9753     return 0;
9754 }