Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * QLogic Fibre Channel HBA Driver
0004  * Copyright (c)  2003-2014 QLogic Corporation
0005  */
0006 #include "qla_def.h"
0007 
0008 #include <linux/moduleparam.h>
0009 #include <linux/vmalloc.h>
0010 #include <linux/delay.h>
0011 #include <linux/kthread.h>
0012 #include <linux/mutex.h>
0013 #include <linux/kobject.h>
0014 #include <linux/slab.h>
0015 #include <linux/blk-mq-pci.h>
0016 #include <linux/refcount.h>
0017 #include <linux/crash_dump.h>
0018 
0019 #include <scsi/scsi_tcq.h>
0020 #include <scsi/scsicam.h>
0021 #include <scsi/scsi_transport.h>
0022 #include <scsi/scsi_transport_fc.h>
0023 
0024 #include "qla_target.h"
0025 
0026 /*
0027  * Driver version
0028  */
0029 char qla2x00_version_str[40];
0030 
0031 static int apidev_major;
0032 
0033 /*
0034  * SRB allocation cache
0035  */
0036 struct kmem_cache *srb_cachep;
0037 
0038 int ql2xfulldump_on_mpifail;
0039 module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
0040 MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
0041          "Set this to take full dump on MPI hang.");
0042 
0043 int ql2xenforce_iocb_limit = 1;
0044 module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
0045 MODULE_PARM_DESC(ql2xenforce_iocb_limit,
0046          "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
0047 
0048 /*
0049  * CT6 CTX allocation cache
0050  */
0051 static struct kmem_cache *ctx_cachep;
0052 /*
0053  * error level for logging
0054  */
0055 uint ql_errlev = 0x8001;
0056 
0057 int ql2xsecenable;
0058 module_param(ql2xsecenable, int, S_IRUGO);
0059 MODULE_PARM_DESC(ql2xsecenable,
0060     "Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled.");
0061 
0062 static int ql2xenableclass2;
0063 module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
0064 MODULE_PARM_DESC(ql2xenableclass2,
0065         "Specify if Class 2 operations are supported from the very "
0066         "beginning. Default is 0 - class 2 not supported.");
0067 
0068 
0069 int ql2xlogintimeout = 20;
0070 module_param(ql2xlogintimeout, int, S_IRUGO);
0071 MODULE_PARM_DESC(ql2xlogintimeout,
0072         "Login timeout value in seconds.");
0073 
0074 int qlport_down_retry;
0075 module_param(qlport_down_retry, int, S_IRUGO);
0076 MODULE_PARM_DESC(qlport_down_retry,
0077         "Maximum number of command retries to a port that returns "
0078         "a PORT-DOWN status.");
0079 
0080 int ql2xplogiabsentdevice;
0081 module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
0082 MODULE_PARM_DESC(ql2xplogiabsentdevice,
0083         "Option to enable PLOGI to devices that are not present after "
0084         "a Fabric scan.  This is needed for several broken switches. "
0085         "Default is 0 - no PLOGI. 1 - perform PLOGI.");
0086 
0087 int ql2xloginretrycount;
0088 module_param(ql2xloginretrycount, int, S_IRUGO);
0089 MODULE_PARM_DESC(ql2xloginretrycount,
0090         "Specify an alternate value for the NVRAM login retry count.");
0091 
0092 int ql2xallocfwdump = 1;
0093 module_param(ql2xallocfwdump, int, S_IRUGO);
0094 MODULE_PARM_DESC(ql2xallocfwdump,
0095         "Option to enable allocation of memory for a firmware dump "
0096         "during HBA initialization.  Memory allocation requirements "
0097         "vary by ISP type.  Default is 1 - allocate memory.");
0098 
0099 int ql2xextended_error_logging;
0100 module_param(ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
0101 module_param_named(logging, ql2xextended_error_logging, int, S_IRUGO|S_IWUSR);
0102 MODULE_PARM_DESC(ql2xextended_error_logging,
0103         "Option to enable extended error logging,\n"
0104         "\t\tDefault is 0 - no logging.  0x40000000 - Module Init & Probe.\n"
0105         "\t\t0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.\n"
0106         "\t\t0x08000000 - IO tracing.    0x04000000 - DPC Thread.\n"
0107         "\t\t0x02000000 - Async events.  0x01000000 - Timer routines.\n"
0108         "\t\t0x00800000 - User space.    0x00400000 - Task Management.\n"
0109         "\t\t0x00200000 - AER/EEH.       0x00100000 - Multi Q.\n"
0110         "\t\t0x00080000 - P3P Specific.  0x00040000 - Virtual Port.\n"
0111         "\t\t0x00020000 - Buffer Dump.   0x00010000 - Misc.\n"
0112         "\t\t0x00008000 - Verbose.       0x00004000 - Target.\n"
0113         "\t\t0x00002000 - Target Mgmt.   0x00001000 - Target TMF.\n"
0114         "\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
0115         "\t\t0x1e400000 - Preferred value for capturing essential "
0116         "debug information (equivalent to old "
0117         "ql2xextended_error_logging=1).\n"
0118         "\t\tDo LOGICAL OR of the value to enable more than one level");
0119 
0120 int ql2xshiftctondsd = 6;
0121 module_param(ql2xshiftctondsd, int, S_IRUGO);
0122 MODULE_PARM_DESC(ql2xshiftctondsd,
0123         "Set to control shifting of command type processing "
0124         "based on total number of SG elements.");
0125 
0126 int ql2xfdmienable = 1;
0127 module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
0128 module_param_named(fdmi, ql2xfdmienable, int, S_IRUGO|S_IWUSR);
0129 MODULE_PARM_DESC(ql2xfdmienable,
0130         "Enables FDMI registrations. "
0131         "0 - no FDMI registrations. "
0132         "1 - provide FDMI registrations (default).");
0133 
0134 #define MAX_Q_DEPTH 64
0135 static int ql2xmaxqdepth = MAX_Q_DEPTH;
0136 module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
0137 MODULE_PARM_DESC(ql2xmaxqdepth,
0138         "Maximum queue depth to set for each LUN. "
0139         "Default is 64.");
0140 
0141 int ql2xenabledif = 2;
0142 module_param(ql2xenabledif, int, S_IRUGO);
0143 MODULE_PARM_DESC(ql2xenabledif,
0144         " Enable T10-CRC-DIF:\n"
0145         " Default is 2.\n"
0146         "  0 -- No DIF Support\n"
0147         "  1 -- Enable DIF for all types\n"
0148         "  2 -- Enable DIF for all types, except Type 0.\n");
0149 
0150 #if (IS_ENABLED(CONFIG_NVME_FC))
0151 int ql2xnvmeenable = 1;
0152 #else
0153 int ql2xnvmeenable;
0154 #endif
0155 module_param(ql2xnvmeenable, int, 0644);
0156 MODULE_PARM_DESC(ql2xnvmeenable,
0157     "Enables NVME support. "
0158     "0 - no NVMe.  Default is Y");
0159 
0160 int ql2xenablehba_err_chk = 2;
0161 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
0162 MODULE_PARM_DESC(ql2xenablehba_err_chk,
0163         " Enable T10-CRC-DIF Error isolation by HBA:\n"
0164         " Default is 2.\n"
0165         "  0 -- Error isolation disabled\n"
0166         "  1 -- Error isolation enabled only for DIX Type 0\n"
0167         "  2 -- Error isolation enabled for all Types\n");
0168 
0169 int ql2xiidmaenable = 1;
0170 module_param(ql2xiidmaenable, int, S_IRUGO);
0171 MODULE_PARM_DESC(ql2xiidmaenable,
0172         "Enables iIDMA settings "
0173         "Default is 1 - perform iIDMA. 0 - no iIDMA.");
0174 
0175 int ql2xmqsupport = 1;
0176 module_param(ql2xmqsupport, int, S_IRUGO);
0177 MODULE_PARM_DESC(ql2xmqsupport,
0178         "Enable on demand multiple queue pairs support "
0179         "Default is 1 for supported. "
0180         "Set it to 0 to turn off mq qpair support.");
0181 
0182 int ql2xfwloadbin;
0183 module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
0184 module_param_named(fwload, ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
0185 MODULE_PARM_DESC(ql2xfwloadbin,
0186         "Option to specify location from which to load ISP firmware:.\n"
0187         " 2 -- load firmware via the request_firmware() (hotplug).\n"
0188         "      interface.\n"
0189         " 1 -- load firmware from flash.\n"
0190         " 0 -- use default semantics.\n");
0191 
0192 int ql2xetsenable;
0193 module_param(ql2xetsenable, int, S_IRUGO);
0194 MODULE_PARM_DESC(ql2xetsenable,
0195         "Enables firmware ETS burst."
0196         "Default is 0 - skip ETS enablement.");
0197 
0198 int ql2xdbwr = 1;
0199 module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
0200 MODULE_PARM_DESC(ql2xdbwr,
0201         "Option to specify scheme for request queue posting.\n"
0202         " 0 -- Regular doorbell.\n"
0203         " 1 -- CAMRAM doorbell (faster).\n");
0204 
0205 int ql2xgffidenable;
0206 module_param(ql2xgffidenable, int, S_IRUGO);
0207 MODULE_PARM_DESC(ql2xgffidenable,
0208         "Enables GFF_ID checks of port type. "
0209         "Default is 0 - Do not use GFF_ID information.");
0210 
0211 int ql2xasynctmfenable = 1;
0212 module_param(ql2xasynctmfenable, int, S_IRUGO);
0213 MODULE_PARM_DESC(ql2xasynctmfenable,
0214         "Enables issue of TM IOCBs asynchronously via IOCB mechanism"
0215         "Default is 1 - Issue TM IOCBs via mailbox mechanism.");
0216 
0217 int ql2xdontresethba;
0218 module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
0219 MODULE_PARM_DESC(ql2xdontresethba,
0220         "Option to specify reset behaviour.\n"
0221         " 0 (Default) -- Reset on failure.\n"
0222         " 1 -- Do not reset on failure.\n");
0223 
0224 uint64_t ql2xmaxlun = MAX_LUNS;
0225 module_param(ql2xmaxlun, ullong, S_IRUGO);
0226 MODULE_PARM_DESC(ql2xmaxlun,
0227         "Defines the maximum LU number to register with the SCSI "
0228         "midlayer. Default is 65535.");
0229 
0230 int ql2xmdcapmask = 0x1F;
0231 module_param(ql2xmdcapmask, int, S_IRUGO);
0232 MODULE_PARM_DESC(ql2xmdcapmask,
0233         "Set the Minidump driver capture mask level. "
0234         "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
0235 
0236 int ql2xmdenable = 1;
0237 module_param(ql2xmdenable, int, S_IRUGO);
0238 MODULE_PARM_DESC(ql2xmdenable,
0239         "Enable/disable MiniDump. "
0240         "0 - MiniDump disabled. "
0241         "1 (Default) - MiniDump enabled.");
0242 
0243 int ql2xexlogins;
0244 module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR);
0245 MODULE_PARM_DESC(ql2xexlogins,
0246          "Number of extended Logins. "
0247          "0 (Default)- Disabled.");
0248 
0249 int ql2xexchoffld = 1024;
0250 module_param(ql2xexchoffld, uint, 0644);
0251 MODULE_PARM_DESC(ql2xexchoffld,
0252     "Number of target exchanges.");
0253 
0254 int ql2xiniexchg = 1024;
0255 module_param(ql2xiniexchg, uint, 0644);
0256 MODULE_PARM_DESC(ql2xiniexchg,
0257     "Number of initiator exchanges.");
0258 
0259 int ql2xfwholdabts;
0260 module_param(ql2xfwholdabts, int, S_IRUGO);
0261 MODULE_PARM_DESC(ql2xfwholdabts,
0262         "Allow FW to hold status IOCB until ABTS rsp received. "
0263         "0 (Default) Do not set fw option. "
0264         "1 - Set fw option to hold ABTS.");
0265 
0266 int ql2xmvasynctoatio = 1;
0267 module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR);
0268 MODULE_PARM_DESC(ql2xmvasynctoatio,
0269         "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ"
0270         "0 (Default). Do not move IOCBs"
0271         "1 - Move IOCBs.");
0272 
0273 int ql2xautodetectsfp = 1;
0274 module_param(ql2xautodetectsfp, int, 0444);
0275 MODULE_PARM_DESC(ql2xautodetectsfp,
0276          "Detect SFP range and set appropriate distance.\n"
0277          "1 (Default): Enable\n");
0278 
0279 int ql2xenablemsix = 1;
0280 module_param(ql2xenablemsix, int, 0444);
0281 MODULE_PARM_DESC(ql2xenablemsix,
0282          "Set to enable MSI or MSI-X interrupt mechanism.\n"
0283          " Default is 1, enable MSI-X interrupt mechanism.\n"
0284          " 0 -- enable traditional pin-based mechanism.\n"
0285          " 1 -- enable MSI-X interrupt mechanism.\n"
0286          " 2 -- enable MSI interrupt mechanism.\n");
0287 
0288 int qla2xuseresexchforels;
0289 module_param(qla2xuseresexchforels, int, 0444);
0290 MODULE_PARM_DESC(qla2xuseresexchforels,
0291          "Reserve 1/2 of emergency exchanges for ELS.\n"
0292          " 0 (default): disabled");
0293 
0294 static int ql2xprotmask;
0295 module_param(ql2xprotmask, int, 0644);
0296 MODULE_PARM_DESC(ql2xprotmask,
0297          "Override DIF/DIX protection capabilities mask\n"
0298          "Default is 0 which sets protection mask based on "
0299          "capabilities reported by HBA firmware.\n");
0300 
0301 static int ql2xprotguard;
0302 module_param(ql2xprotguard, int, 0644);
0303 MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n"
0304          "  0 -- Let HBA firmware decide\n"
0305          "  1 -- Force T10 CRC\n"
0306          "  2 -- Force IP checksum\n");
0307 
0308 int ql2xdifbundlinginternalbuffers;
0309 module_param(ql2xdifbundlinginternalbuffers, int, 0644);
0310 MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers,
0311     "Force using internal buffers for DIF information\n"
0312     "0 (Default). Based on check.\n"
0313     "1 Force using internal buffers\n");
0314 
0315 int ql2xsmartsan;
0316 module_param(ql2xsmartsan, int, 0444);
0317 module_param_named(smartsan, ql2xsmartsan, int, 0444);
0318 MODULE_PARM_DESC(ql2xsmartsan,
0319         "Send SmartSAN Management Attributes for FDMI Registration."
0320         " Default is 0 - No SmartSAN registration,"
0321         " 1 - Register SmartSAN Management Attributes.");
0322 
0323 int ql2xrdpenable;
0324 module_param(ql2xrdpenable, int, 0444);
0325 module_param_named(rdpenable, ql2xrdpenable, int, 0444);
0326 MODULE_PARM_DESC(ql2xrdpenable,
0327         "Enables RDP responses. "
0328         "0 - no RDP responses (default). "
0329         "1 - provide RDP responses.");
0330 int ql2xabts_wait_nvme = 1;
0331 module_param(ql2xabts_wait_nvme, int, 0444);
0332 MODULE_PARM_DESC(ql2xabts_wait_nvme,
0333          "To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
0334 
0335 
0336 u32 ql2xdelay_before_pci_error_handling = 5;
0337 module_param(ql2xdelay_before_pci_error_handling, uint, 0644);
0338 MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling,
0339     "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n");
0340 
0341 int ql2xrspq_follow_inptr = 1;
0342 module_param(ql2xrspq_follow_inptr, int, 0644);
0343 MODULE_PARM_DESC(ql2xrspq_follow_inptr,
0344          "Follow RSP IN pointer for RSP updates for HBAs 27xx and newer (default: 1).");
0345 
0346 int ql2xrspq_follow_inptr_legacy = 1;
0347 module_param(ql2xrspq_follow_inptr_legacy, int, 0644);
0348 MODULE_PARM_DESC(ql2xrspq_follow_inptr_legacy,
0349          "Follow RSP IN pointer for RSP updates for HBAs older than 27XX. (default: 1).");
0350 
0351 static void qla2x00_clear_drv_active(struct qla_hw_data *);
0352 static void qla2x00_free_device(scsi_qla_host_t *);
0353 static int qla2xxx_map_queues(struct Scsi_Host *shost);
0354 static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
0355 
0356 u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES;
0357 module_param(ql2xnvme_queues, uint, S_IRUGO);
0358 MODULE_PARM_DESC(ql2xnvme_queues,
0359     "Number of NVMe Queues that can be configured.\n"
0360     "Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n"
0361     "1 - Minimum number of queues supported\n"
0362     "8 - Default value");
0363 
0364 static struct scsi_transport_template *qla2xxx_transport_template = NULL;
0365 struct scsi_transport_template *qla2xxx_transport_vport_template = NULL;
0366 
0367 /* TODO Convert to inlines
0368  *
0369  * Timer routines
0370  */
0371 
0372 __inline__ void
0373 qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval)
0374 {
0375     timer_setup(&vha->timer, qla2x00_timer, 0);
0376     vha->timer.expires = jiffies + interval * HZ;
0377     add_timer(&vha->timer);
0378     vha->timer_active = 1;
0379 }
0380 
0381 static inline void
0382 qla2x00_restart_timer(scsi_qla_host_t *vha, unsigned long interval)
0383 {
0384     /* Currently used for 82XX only. */
0385     if (vha->device_flags & DFLG_DEV_FAILED) {
0386         ql_dbg(ql_dbg_timer, vha, 0x600d,
0387             "Device in a failed state, returning.\n");
0388         return;
0389     }
0390 
0391     mod_timer(&vha->timer, jiffies + interval * HZ);
0392 }
0393 
0394 static __inline__ void
0395 qla2x00_stop_timer(scsi_qla_host_t *vha)
0396 {
0397     del_timer_sync(&vha->timer);
0398     vha->timer_active = 0;
0399 }
0400 
0401 static int qla2x00_do_dpc(void *data);
0402 
0403 static void qla2x00_rst_aen(scsi_qla_host_t *);
0404 
0405 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
0406     struct req_que **, struct rsp_que **);
0407 static void qla2x00_free_fw_dump(struct qla_hw_data *);
0408 static void qla2x00_mem_free(struct qla_hw_data *);
0409 int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
0410     struct qla_qpair *qpair);
0411 
0412 /* -------------------------------------------------------------------------- */
0413 static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
0414     struct rsp_que *rsp)
0415 {
0416     struct qla_hw_data *ha = vha->hw;
0417 
0418     rsp->qpair = ha->base_qpair;
0419     rsp->req = req;
0420     ha->base_qpair->hw = ha;
0421     ha->base_qpair->req = req;
0422     ha->base_qpair->rsp = rsp;
0423     ha->base_qpair->vha = vha;
0424     ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
0425     ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
0426     ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
0427     ha->base_qpair->srb_mempool = ha->srb_mempool;
0428     INIT_LIST_HEAD(&ha->base_qpair->hints_list);
0429     ha->base_qpair->enable_class_2 = ql2xenableclass2;
0430     /* init qpair to this cpu. Will adjust at run time. */
0431     qla_cpu_update(rsp->qpair, raw_smp_processor_id());
0432     ha->base_qpair->pdev = ha->pdev;
0433 
0434     if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
0435         ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
0436 }
0437 
0438 static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
0439                 struct rsp_que *rsp)
0440 {
0441     scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
0442 
0443     ha->req_q_map = kcalloc(ha->max_req_queues, sizeof(struct req_que *),
0444                 GFP_KERNEL);
0445     if (!ha->req_q_map) {
0446         ql_log(ql_log_fatal, vha, 0x003b,
0447             "Unable to allocate memory for request queue ptrs.\n");
0448         goto fail_req_map;
0449     }
0450 
0451     ha->rsp_q_map = kcalloc(ha->max_rsp_queues, sizeof(struct rsp_que *),
0452                 GFP_KERNEL);
0453     if (!ha->rsp_q_map) {
0454         ql_log(ql_log_fatal, vha, 0x003c,
0455             "Unable to allocate memory for response queue ptrs.\n");
0456         goto fail_rsp_map;
0457     }
0458 
0459     ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
0460     if (ha->base_qpair == NULL) {
0461         ql_log(ql_log_warn, vha, 0x00e0,
0462             "Failed to allocate base queue pair memory.\n");
0463         goto fail_base_qpair;
0464     }
0465 
0466     qla_init_base_qpair(vha, req, rsp);
0467 
0468     if ((ql2xmqsupport || ql2xnvmeenable) && ha->max_qpairs) {
0469         ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
0470             GFP_KERNEL);
0471         if (!ha->queue_pair_map) {
0472             ql_log(ql_log_fatal, vha, 0x0180,
0473                 "Unable to allocate memory for queue pair ptrs.\n");
0474             goto fail_qpair_map;
0475         }
0476     }
0477 
0478     /*
0479      * Make sure we record at least the request and response queue zero in
0480      * case we need to free them if part of the probe fails.
0481      */
0482     ha->rsp_q_map[0] = rsp;
0483     ha->req_q_map[0] = req;
0484     set_bit(0, ha->rsp_qid_map);
0485     set_bit(0, ha->req_qid_map);
0486     return 0;
0487 
0488 fail_qpair_map:
0489     kfree(ha->base_qpair);
0490     ha->base_qpair = NULL;
0491 fail_base_qpair:
0492     kfree(ha->rsp_q_map);
0493     ha->rsp_q_map = NULL;
0494 fail_rsp_map:
0495     kfree(ha->req_q_map);
0496     ha->req_q_map = NULL;
0497 fail_req_map:
0498     return -ENOMEM;
0499 }
0500 
0501 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
0502 {
0503     if (IS_QLAFX00(ha)) {
0504         if (req && req->ring_fx00)
0505             dma_free_coherent(&ha->pdev->dev,
0506                 (req->length_fx00 + 1) * sizeof(request_t),
0507                 req->ring_fx00, req->dma_fx00);
0508     } else if (req && req->ring)
0509         dma_free_coherent(&ha->pdev->dev,
0510         (req->length + 1) * sizeof(request_t),
0511         req->ring, req->dma);
0512 
0513     if (req)
0514         kfree(req->outstanding_cmds);
0515 
0516     kfree(req);
0517 }
0518 
0519 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
0520 {
0521     if (IS_QLAFX00(ha)) {
0522         if (rsp && rsp->ring_fx00)
0523             dma_free_coherent(&ha->pdev->dev,
0524                 (rsp->length_fx00 + 1) * sizeof(request_t),
0525                 rsp->ring_fx00, rsp->dma_fx00);
0526     } else if (rsp && rsp->ring) {
0527         dma_free_coherent(&ha->pdev->dev,
0528         (rsp->length + 1) * sizeof(response_t),
0529         rsp->ring, rsp->dma);
0530     }
0531     kfree(rsp);
0532 }
0533 
0534 static void qla2x00_free_queues(struct qla_hw_data *ha)
0535 {
0536     struct req_que *req;
0537     struct rsp_que *rsp;
0538     int cnt;
0539     unsigned long flags;
0540 
0541     if (ha->queue_pair_map) {
0542         kfree(ha->queue_pair_map);
0543         ha->queue_pair_map = NULL;
0544     }
0545     if (ha->base_qpair) {
0546         kfree(ha->base_qpair);
0547         ha->base_qpair = NULL;
0548     }
0549 
0550     spin_lock_irqsave(&ha->hardware_lock, flags);
0551     for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
0552         if (!test_bit(cnt, ha->req_qid_map))
0553             continue;
0554 
0555         req = ha->req_q_map[cnt];
0556         clear_bit(cnt, ha->req_qid_map);
0557         ha->req_q_map[cnt] = NULL;
0558 
0559         spin_unlock_irqrestore(&ha->hardware_lock, flags);
0560         qla2x00_free_req_que(ha, req);
0561         spin_lock_irqsave(&ha->hardware_lock, flags);
0562     }
0563     spin_unlock_irqrestore(&ha->hardware_lock, flags);
0564 
0565     kfree(ha->req_q_map);
0566     ha->req_q_map = NULL;
0567 
0568 
0569     spin_lock_irqsave(&ha->hardware_lock, flags);
0570     for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
0571         if (!test_bit(cnt, ha->rsp_qid_map))
0572             continue;
0573 
0574         rsp = ha->rsp_q_map[cnt];
0575         clear_bit(cnt, ha->rsp_qid_map);
0576         ha->rsp_q_map[cnt] =  NULL;
0577         spin_unlock_irqrestore(&ha->hardware_lock, flags);
0578         qla2x00_free_rsp_que(ha, rsp);
0579         spin_lock_irqsave(&ha->hardware_lock, flags);
0580     }
0581     spin_unlock_irqrestore(&ha->hardware_lock, flags);
0582 
0583     kfree(ha->rsp_q_map);
0584     ha->rsp_q_map = NULL;
0585 }
0586 
0587 static char *
0588 qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
0589 {
0590     struct qla_hw_data *ha = vha->hw;
0591     static const char *const pci_bus_modes[] = {
0592         "33", "66", "100", "133",
0593     };
0594     uint16_t pci_bus;
0595 
0596     pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9;
0597     if (pci_bus) {
0598         snprintf(str, str_len, "PCI-X (%s MHz)",
0599              pci_bus_modes[pci_bus]);
0600     } else {
0601         pci_bus = (ha->pci_attr & BIT_8) >> 8;
0602         snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]);
0603     }
0604 
0605     return str;
0606 }
0607 
0608 static char *
0609 qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len)
0610 {
0611     static const char *const pci_bus_modes[] = {
0612         "33", "66", "100", "133",
0613     };
0614     struct qla_hw_data *ha = vha->hw;
0615     uint32_t pci_bus;
0616 
0617     if (pci_is_pcie(ha->pdev)) {
0618         uint32_t lstat, lspeed, lwidth;
0619         const char *speed_str;
0620 
0621         pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
0622         lspeed = lstat & PCI_EXP_LNKCAP_SLS;
0623         lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
0624 
0625         switch (lspeed) {
0626         case 1:
0627             speed_str = "2.5GT/s";
0628             break;
0629         case 2:
0630             speed_str = "5.0GT/s";
0631             break;
0632         case 3:
0633             speed_str = "8.0GT/s";
0634             break;
0635         case 4:
0636             speed_str = "16.0GT/s";
0637             break;
0638         default:
0639             speed_str = "<unknown>";
0640             break;
0641         }
0642         snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth);
0643 
0644         return str;
0645     }
0646 
0647     pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8;
0648     if (pci_bus == 0 || pci_bus == 8)
0649         snprintf(str, str_len, "PCI (%s MHz)",
0650              pci_bus_modes[pci_bus >> 3]);
0651     else
0652         snprintf(str, str_len, "PCI-X Mode %d (%s MHz)",
0653              pci_bus & 4 ? 2 : 1,
0654              pci_bus_modes[pci_bus & 3]);
0655 
0656     return str;
0657 }
0658 
0659 static char *
0660 qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
0661 {
0662     char un_str[10];
0663     struct qla_hw_data *ha = vha->hw;
0664 
0665     snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
0666         ha->fw_minor_version, ha->fw_subminor_version);
0667 
0668     if (ha->fw_attributes & BIT_9) {
0669         strcat(str, "FLX");
0670         return (str);
0671     }
0672 
0673     switch (ha->fw_attributes & 0xFF) {
0674     case 0x7:
0675         strcat(str, "EF");
0676         break;
0677     case 0x17:
0678         strcat(str, "TP");
0679         break;
0680     case 0x37:
0681         strcat(str, "IP");
0682         break;
0683     case 0x77:
0684         strcat(str, "VI");
0685         break;
0686     default:
0687         sprintf(un_str, "(%x)", ha->fw_attributes);
0688         strcat(str, un_str);
0689         break;
0690     }
0691     if (ha->fw_attributes & 0x100)
0692         strcat(str, "X");
0693 
0694     return (str);
0695 }
0696 
0697 static char *
0698 qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
0699 {
0700     struct qla_hw_data *ha = vha->hw;
0701 
0702     snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
0703         ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
0704     return str;
0705 }
0706 
0707 void qla2x00_sp_free_dma(srb_t *sp)
0708 {
0709     struct qla_hw_data *ha = sp->vha->hw;
0710     struct scsi_cmnd *cmd = GET_CMD_SP(sp);
0711 
0712     if (sp->flags & SRB_DMA_VALID) {
0713         scsi_dma_unmap(cmd);
0714         sp->flags &= ~SRB_DMA_VALID;
0715     }
0716 
0717     if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
0718         dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
0719             scsi_prot_sg_count(cmd), cmd->sc_data_direction);
0720         sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
0721     }
0722 
0723     if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
0724         /* List assured to be having elements */
0725         qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
0726         sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
0727     }
0728 
0729     if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
0730         struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
0731 
0732         dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
0733         sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
0734     }
0735 
0736     if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
0737         struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
0738 
0739         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
0740             ctx1->fcp_cmnd_dma);
0741         list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
0742         ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
0743         ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
0744         mempool_free(ctx1, ha->ctx_mempool);
0745     }
0746 }
0747 
0748 void qla2x00_sp_compl(srb_t *sp, int res)
0749 {
0750     struct scsi_cmnd *cmd = GET_CMD_SP(sp);
0751     struct completion *comp = sp->comp;
0752 
0753     /* kref: INIT */
0754     kref_put(&sp->cmd_kref, qla2x00_sp_release);
0755     cmd->result = res;
0756     sp->type = 0;
0757     scsi_done(cmd);
0758     if (comp)
0759         complete(comp);
0760 }
0761 
0762 void qla2xxx_qpair_sp_free_dma(srb_t *sp)
0763 {
0764     struct scsi_cmnd *cmd = GET_CMD_SP(sp);
0765     struct qla_hw_data *ha = sp->fcport->vha->hw;
0766 
0767     if (sp->flags & SRB_DMA_VALID) {
0768         scsi_dma_unmap(cmd);
0769         sp->flags &= ~SRB_DMA_VALID;
0770     }
0771 
0772     if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
0773         dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
0774             scsi_prot_sg_count(cmd), cmd->sc_data_direction);
0775         sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
0776     }
0777 
0778     if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
0779         /* List assured to be having elements */
0780         qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx);
0781         sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
0782     }
0783 
0784     if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) {
0785         struct crc_context *difctx = sp->u.scmd.crc_ctx;
0786         struct dsd_dma *dif_dsd, *nxt_dsd;
0787 
0788         list_for_each_entry_safe(dif_dsd, nxt_dsd,
0789             &difctx->ldif_dma_hndl_list, list) {
0790             list_del(&dif_dsd->list);
0791             dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr,
0792                 dif_dsd->dsd_list_dma);
0793             kfree(dif_dsd);
0794             difctx->no_dif_bundl--;
0795         }
0796 
0797         list_for_each_entry_safe(dif_dsd, nxt_dsd,
0798             &difctx->ldif_dsd_list, list) {
0799             list_del(&dif_dsd->list);
0800             dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr,
0801                 dif_dsd->dsd_list_dma);
0802             kfree(dif_dsd);
0803             difctx->no_ldif_dsd--;
0804         }
0805 
0806         if (difctx->no_ldif_dsd) {
0807             ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
0808                 "%s: difctx->no_ldif_dsd=%x\n",
0809                 __func__, difctx->no_ldif_dsd);
0810         }
0811 
0812         if (difctx->no_dif_bundl) {
0813             ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022,
0814                 "%s: difctx->no_dif_bundl=%x\n",
0815                 __func__, difctx->no_dif_bundl);
0816         }
0817         sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID;
0818     }
0819 
0820     if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
0821         struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
0822 
0823         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
0824             ctx1->fcp_cmnd_dma);
0825         list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
0826         ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
0827         ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
0828         mempool_free(ctx1, ha->ctx_mempool);
0829         sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
0830     }
0831 
0832     if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
0833         struct crc_context *ctx0 = sp->u.scmd.crc_ctx;
0834 
0835         dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
0836         sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
0837     }
0838 }
0839 
0840 void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
0841 {
0842     struct scsi_cmnd *cmd = GET_CMD_SP(sp);
0843     struct completion *comp = sp->comp;
0844 
0845     /* ref: INIT */
0846     kref_put(&sp->cmd_kref, qla2x00_sp_release);
0847     cmd->result = res;
0848     sp->type = 0;
0849     scsi_done(cmd);
0850     if (comp)
0851         complete(comp);
0852 }
0853 
0854 static int
0855 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
0856 {
0857     scsi_qla_host_t *vha = shost_priv(host);
0858     fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
0859     struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
0860     struct qla_hw_data *ha = vha->hw;
0861     struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
0862     srb_t *sp;
0863     int rval;
0864 
0865     if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) ||
0866         WARN_ON_ONCE(!rport)) {
0867         cmd->result = DID_NO_CONNECT << 16;
0868         goto qc24_fail_command;
0869     }
0870 
0871     if (ha->mqenable) {
0872         uint32_t tag;
0873         uint16_t hwq;
0874         struct qla_qpair *qpair = NULL;
0875 
0876         tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
0877         hwq = blk_mq_unique_tag_to_hwq(tag);
0878         qpair = ha->queue_pair_map[hwq];
0879 
0880         if (qpair)
0881             return qla2xxx_mqueuecommand(host, cmd, qpair);
0882     }
0883 
0884     if (ha->flags.eeh_busy) {
0885         if (ha->flags.pci_channel_io_perm_failure) {
0886             ql_dbg(ql_dbg_aer, vha, 0x9010,
0887                 "PCI Channel IO permanent failure, exiting "
0888                 "cmd=%p.\n", cmd);
0889             cmd->result = DID_NO_CONNECT << 16;
0890         } else {
0891             ql_dbg(ql_dbg_aer, vha, 0x9011,
0892                 "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
0893             cmd->result = DID_REQUEUE << 16;
0894         }
0895         goto qc24_fail_command;
0896     }
0897 
0898     rval = fc_remote_port_chkready(rport);
0899     if (rval) {
0900         cmd->result = rval;
0901         ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
0902             "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
0903             cmd, rval);
0904         goto qc24_fail_command;
0905     }
0906 
0907     if (!vha->flags.difdix_supported &&
0908         scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
0909             ql_dbg(ql_dbg_io, vha, 0x3004,
0910                 "DIF Cap not reg, fail DIF capable cmd's:%p.\n",
0911                 cmd);
0912             cmd->result = DID_NO_CONNECT << 16;
0913             goto qc24_fail_command;
0914     }
0915 
0916     if (!fcport || fcport->deleted) {
0917         cmd->result = DID_IMM_RETRY << 16;
0918         goto qc24_fail_command;
0919     }
0920 
0921     if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
0922         if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
0923             atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
0924             ql_dbg(ql_dbg_io, vha, 0x3005,
0925                 "Returning DNC, fcport_state=%d loop_state=%d.\n",
0926                 atomic_read(&fcport->state),
0927                 atomic_read(&base_vha->loop_state));
0928             cmd->result = DID_NO_CONNECT << 16;
0929             goto qc24_fail_command;
0930         }
0931         goto qc24_target_busy;
0932     }
0933 
0934     /*
0935      * Return target busy if we've received a non-zero retry_delay_timer
0936      * in a FCP_RSP.
0937      */
0938     if (fcport->retry_delay_timestamp == 0) {
0939         /* retry delay not set */
0940     } else if (time_after(jiffies, fcport->retry_delay_timestamp))
0941         fcport->retry_delay_timestamp = 0;
0942     else
0943         goto qc24_target_busy;
0944 
0945     sp = scsi_cmd_priv(cmd);
0946     /* ref: INIT */
0947     qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport);
0948 
0949     sp->u.scmd.cmd = cmd;
0950     sp->type = SRB_SCSI_CMD;
0951     sp->free = qla2x00_sp_free_dma;
0952     sp->done = qla2x00_sp_compl;
0953 
0954     rval = ha->isp_ops->start_scsi(sp);
0955     if (rval != QLA_SUCCESS) {
0956         ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
0957             "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
0958         goto qc24_host_busy_free_sp;
0959     }
0960 
0961     return 0;
0962 
0963 qc24_host_busy_free_sp:
0964     /* ref: INIT */
0965     kref_put(&sp->cmd_kref, qla2x00_sp_release);
0966 
0967 qc24_target_busy:
0968     return SCSI_MLQUEUE_TARGET_BUSY;
0969 
0970 qc24_fail_command:
0971     scsi_done(cmd);
0972 
0973     return 0;
0974 }
0975 
0976 /* For MQ supported I/O */
0977 int
0978 qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
0979     struct qla_qpair *qpair)
0980 {
0981     scsi_qla_host_t *vha = shost_priv(host);
0982     fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
0983     struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device));
0984     struct qla_hw_data *ha = vha->hw;
0985     struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
0986     srb_t *sp;
0987     int rval;
0988 
0989     rval = rport ? fc_remote_port_chkready(rport) : (DID_NO_CONNECT << 16);
0990     if (rval) {
0991         cmd->result = rval;
0992         ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
0993             "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
0994             cmd, rval);
0995         goto qc24_fail_command;
0996     }
0997 
0998     if (!qpair->online) {
0999         ql_dbg(ql_dbg_io, vha, 0x3077,
1000                "qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy);
1001         cmd->result = DID_NO_CONNECT << 16;
1002         goto qc24_fail_command;
1003     }
1004 
1005     if (!fcport || fcport->deleted) {
1006         cmd->result = DID_IMM_RETRY << 16;
1007         goto qc24_fail_command;
1008     }
1009 
1010     if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) {
1011         if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
1012             atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1013             ql_dbg(ql_dbg_io, vha, 0x3077,
1014                 "Returning DNC, fcport_state=%d loop_state=%d.\n",
1015                 atomic_read(&fcport->state),
1016                 atomic_read(&base_vha->loop_state));
1017             cmd->result = DID_NO_CONNECT << 16;
1018             goto qc24_fail_command;
1019         }
1020         goto qc24_target_busy;
1021     }
1022 
1023     /*
1024      * Return target busy if we've received a non-zero retry_delay_timer
1025      * in a FCP_RSP.
1026      */
1027     if (fcport->retry_delay_timestamp == 0) {
1028         /* retry delay not set */
1029     } else if (time_after(jiffies, fcport->retry_delay_timestamp))
1030         fcport->retry_delay_timestamp = 0;
1031     else
1032         goto qc24_target_busy;
1033 
1034     sp = scsi_cmd_priv(cmd);
1035     /* ref: INIT */
1036     qla2xxx_init_sp(sp, vha, qpair, fcport);
1037 
1038     sp->u.scmd.cmd = cmd;
1039     sp->type = SRB_SCSI_CMD;
1040     sp->free = qla2xxx_qpair_sp_free_dma;
1041     sp->done = qla2xxx_qpair_sp_compl;
1042 
1043     rval = ha->isp_ops->start_scsi_mq(sp);
1044     if (rval != QLA_SUCCESS) {
1045         ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078,
1046             "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
1047         goto qc24_host_busy_free_sp;
1048     }
1049 
1050     return 0;
1051 
1052 qc24_host_busy_free_sp:
1053     /* ref: INIT */
1054     kref_put(&sp->cmd_kref, qla2x00_sp_release);
1055 
1056 qc24_target_busy:
1057     return SCSI_MLQUEUE_TARGET_BUSY;
1058 
1059 qc24_fail_command:
1060     scsi_done(cmd);
1061 
1062     return 0;
1063 }
1064 
1065 /*
1066  * qla2x00_eh_wait_on_command
1067  *    Waits for the command to be returned by the Firmware for some
1068  *    max time.
1069  *
1070  * Input:
1071  *    cmd = Scsi Command to wait on.
1072  *
1073  * Return:
1074  *    Completed in time : QLA_SUCCESS
1075  *    Did not complete in time : QLA_FUNCTION_FAILED
1076  */
1077 static int
1078 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
1079 {
1080 #define ABORT_POLLING_PERIOD    1000
1081 #define ABORT_WAIT_ITER     ((2 * 1000) / (ABORT_POLLING_PERIOD))
1082     unsigned long wait_iter = ABORT_WAIT_ITER;
1083     scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1084     struct qla_hw_data *ha = vha->hw;
1085     srb_t *sp = scsi_cmd_priv(cmd);
1086     int ret = QLA_SUCCESS;
1087 
1088     if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
1089         ql_dbg(ql_dbg_taskm, vha, 0x8005,
1090             "Return:eh_wait.\n");
1091         return ret;
1092     }
1093 
1094     while (sp->type && wait_iter--)
1095         msleep(ABORT_POLLING_PERIOD);
1096     if (sp->type)
1097         ret = QLA_FUNCTION_FAILED;
1098 
1099     return ret;
1100 }
1101 
1102 /*
1103  * qla2x00_wait_for_hba_online
1104  *    Wait till the HBA is online after going through
1105  *    <= MAX_RETRIES_OF_ISP_ABORT  or
1106  *    finally HBA is disabled ie marked offline
1107  *
1108  * Input:
1109  *     ha - pointer to host adapter structure
1110  *
1111  * Note:
1112  *    Does context switching-Release SPIN_LOCK
1113  *    (if any) before calling this routine.
1114  *
1115  * Return:
1116  *    Success (Adapter is online) : 0
1117  *    Failed  (Adapter is offline/disabled) : 1
1118  */
1119 int
1120 qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
1121 {
1122     int     return_status;
1123     unsigned long   wait_online;
1124     struct qla_hw_data *ha = vha->hw;
1125     scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1126 
1127     wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
1128     while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
1129         test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
1130         test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
1131         ha->dpc_active) && time_before(jiffies, wait_online)) {
1132 
1133         msleep(1000);
1134     }
1135     if (base_vha->flags.online)
1136         return_status = QLA_SUCCESS;
1137     else
1138         return_status = QLA_FUNCTION_FAILED;
1139 
1140     return (return_status);
1141 }
1142 
1143 static inline int test_fcport_count(scsi_qla_host_t *vha)
1144 {
1145     struct qla_hw_data *ha = vha->hw;
1146     unsigned long flags;
1147     int res;
1148     /* Return 0 = sleep, x=wake */
1149 
1150     spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1151     ql_dbg(ql_dbg_init, vha, 0x00ec,
1152         "tgt %p, fcport_count=%d\n",
1153         vha, vha->fcport_count);
1154     res = (vha->fcport_count == 0);
1155     if  (res) {
1156         struct fc_port *fcport;
1157 
1158         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1159             if (fcport->deleted != QLA_SESS_DELETED) {
1160                 /* session(s) may not be fully logged in
1161                  * (ie fcport_count=0), but session
1162                  * deletion thread(s) may be inflight.
1163                  */
1164 
1165                 res = 0;
1166                 break;
1167             }
1168         }
1169     }
1170     spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1171 
1172     return res;
1173 }
1174 
1175 /*
1176  * qla2x00_wait_for_sess_deletion can only be called from remove_one.
1177  * it has dependency on UNLOADING flag to stop device discovery
1178  */
1179 void
1180 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
1181 {
1182     u8 i;
1183 
1184     qla2x00_mark_all_devices_lost(vha);
1185 
1186     for (i = 0; i < 10; i++) {
1187         if (wait_event_timeout(vha->fcport_waitQ,
1188             test_fcport_count(vha), HZ) > 0)
1189             break;
1190     }
1191 
1192     flush_workqueue(vha->hw->wq);
1193 }
1194 
1195 /*
1196  * qla2x00_wait_for_hba_ready
1197  * Wait till the HBA is ready before doing driver unload
1198  *
1199  * Input:
1200  *     ha - pointer to host adapter structure
1201  *
1202  * Note:
1203  *    Does context switching-Release SPIN_LOCK
1204  *    (if any) before calling this routine.
1205  *
1206  */
1207 static void
1208 qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
1209 {
1210     struct qla_hw_data *ha = vha->hw;
1211     scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1212 
1213     while ((qla2x00_reset_active(vha) || ha->dpc_active ||
1214         ha->flags.mbox_busy) ||
1215            test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
1216            test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
1217         if (test_bit(UNLOADING, &base_vha->dpc_flags))
1218             break;
1219         msleep(1000);
1220     }
1221 }
1222 
1223 int
1224 qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
1225 {
1226     int     return_status;
1227     unsigned long   wait_reset;
1228     struct qla_hw_data *ha = vha->hw;
1229     scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1230 
1231     wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ);
1232     while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
1233         test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
1234         test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
1235         ha->dpc_active) && time_before(jiffies, wait_reset)) {
1236 
1237         msleep(1000);
1238 
1239         if (!test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
1240             ha->flags.chip_reset_done)
1241             break;
1242     }
1243     if (ha->flags.chip_reset_done)
1244         return_status = QLA_SUCCESS;
1245     else
1246         return_status = QLA_FUNCTION_FAILED;
1247 
1248     return return_status;
1249 }
1250 
1251 /**************************************************************************
1252 * qla2xxx_eh_abort
1253 *
1254 * Description:
1255 *    The abort function will abort the specified command.
1256 *
1257 * Input:
1258 *    cmd = Linux SCSI command packet to be aborted.
1259 *
1260 * Returns:
1261 *    Either SUCCESS or FAILED.
1262 *
1263 * Note:
1264 *    Only return FAILED if command not returned by firmware.
1265 **************************************************************************/
1266 static int
1267 qla2xxx_eh_abort(struct scsi_cmnd *cmd)
1268 {
1269     scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1270     DECLARE_COMPLETION_ONSTACK(comp);
1271     srb_t *sp;
1272     int ret;
1273     unsigned int id;
1274     uint64_t lun;
1275     int rval;
1276     struct qla_hw_data *ha = vha->hw;
1277     uint32_t ratov_j;
1278     struct qla_qpair *qpair;
1279     unsigned long flags;
1280     int fast_fail_status = SUCCESS;
1281 
1282     if (qla2x00_isp_reg_stat(ha)) {
1283         ql_log(ql_log_info, vha, 0x8042,
1284             "PCI/Register disconnect, exiting.\n");
1285         qla_pci_set_eeh_busy(vha);
1286         return FAILED;
1287     }
1288 
1289     /* Save any FAST_IO_FAIL value to return later if abort succeeds */
1290     ret = fc_block_scsi_eh(cmd);
1291     if (ret != 0)
1292         fast_fail_status = ret;
1293 
1294     sp = scsi_cmd_priv(cmd);
1295     qpair = sp->qpair;
1296 
1297     vha->cmd_timeout_cnt++;
1298 
1299     if ((sp->fcport && sp->fcport->deleted) || !qpair)
1300         return fast_fail_status != SUCCESS ? fast_fail_status : FAILED;
1301 
1302     spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1303     sp->comp = &comp;
1304     spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1305 
1306 
1307     id = cmd->device->id;
1308     lun = cmd->device->lun;
1309 
1310     ql_dbg(ql_dbg_taskm, vha, 0x8002,
1311         "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
1312         vha->host_no, id, lun, sp, cmd, sp->handle);
1313 
1314     /*
1315      * Abort will release the original Command/sp from FW. Let the
1316      * original command call scsi_done. In return, he will wakeup
1317      * this sleeping thread.
1318      */
1319     rval = ha->isp_ops->abort_command(sp);
1320 
1321     ql_dbg(ql_dbg_taskm, vha, 0x8003,
1322            "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
1323 
1324     /* Wait for the command completion. */
1325     ratov_j = ha->r_a_tov/10 * 4 * 1000;
1326     ratov_j = msecs_to_jiffies(ratov_j);
1327     switch (rval) {
1328     case QLA_SUCCESS:
1329         if (!wait_for_completion_timeout(&comp, ratov_j)) {
1330             ql_dbg(ql_dbg_taskm, vha, 0xffff,
1331                 "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
1332                 __func__, ha->r_a_tov/10);
1333             ret = FAILED;
1334         } else {
1335             ret = fast_fail_status;
1336         }
1337         break;
1338     default:
1339         ret = FAILED;
1340         break;
1341     }
1342 
1343     sp->comp = NULL;
1344 
1345     ql_log(ql_log_info, vha, 0x801c,
1346         "Abort command issued nexus=%ld:%d:%llu -- %x.\n",
1347         vha->host_no, id, lun, ret);
1348 
1349     return ret;
1350 }
1351 
1352 /*
1353  * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED.
1354  */
1355 static int
1356 __qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t,
1357                        uint64_t l, enum nexus_wait_type type)
1358 {
1359     int cnt, match, status;
1360     unsigned long flags;
1361     scsi_qla_host_t *vha = qpair->vha;
1362     struct req_que *req = qpair->req;
1363     srb_t *sp;
1364     struct scsi_cmnd *cmd;
1365 
1366     status = QLA_SUCCESS;
1367 
1368     spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1369     for (cnt = 1; status == QLA_SUCCESS &&
1370         cnt < req->num_outstanding_cmds; cnt++) {
1371         sp = req->outstanding_cmds[cnt];
1372         if (!sp)
1373             continue;
1374         if (sp->type != SRB_SCSI_CMD)
1375             continue;
1376         if (vha->vp_idx != sp->vha->vp_idx)
1377             continue;
1378         match = 0;
1379         cmd = GET_CMD_SP(sp);
1380         switch (type) {
1381         case WAIT_HOST:
1382             match = 1;
1383             break;
1384         case WAIT_TARGET:
1385             match = cmd->device->id == t;
1386             break;
1387         case WAIT_LUN:
1388             match = (cmd->device->id == t &&
1389                 cmd->device->lun == l);
1390             break;
1391         }
1392         if (!match)
1393             continue;
1394 
1395         spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1396         status = qla2x00_eh_wait_on_command(cmd);
1397         spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1398     }
1399     spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1400 
1401     return status;
1402 }
1403 
1404 int
1405 qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
1406                      uint64_t l, enum nexus_wait_type type)
1407 {
1408     struct qla_qpair *qpair;
1409     struct qla_hw_data *ha = vha->hw;
1410     int i, status = QLA_SUCCESS;
1411 
1412     status = __qla2x00_eh_wait_for_pending_commands(ha->base_qpair, t, l,
1413                             type);
1414     for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) {
1415         qpair = ha->queue_pair_map[i];
1416         if (!qpair)
1417             continue;
1418         status = __qla2x00_eh_wait_for_pending_commands(qpair, t, l,
1419                                 type);
1420     }
1421     return status;
1422 }
1423 
1424 static char *reset_errors[] = {
1425     "HBA not online",
1426     "HBA not ready",
1427     "Task management failed",
1428     "Waiting for command completions",
1429 };
1430 
1431 static int
1432 qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
1433 {
1434     struct scsi_device *sdev = cmd->device;
1435     scsi_qla_host_t *vha = shost_priv(sdev->host);
1436     struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1437     fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
1438     struct qla_hw_data *ha = vha->hw;
1439     int err;
1440 
1441     if (qla2x00_isp_reg_stat(ha)) {
1442         ql_log(ql_log_info, vha, 0x803e,
1443             "PCI/Register disconnect, exiting.\n");
1444         qla_pci_set_eeh_busy(vha);
1445         return FAILED;
1446     }
1447 
1448     if (!fcport) {
1449         return FAILED;
1450     }
1451 
1452     err = fc_block_rport(rport);
1453     if (err != 0)
1454         return err;
1455 
1456     if (fcport->deleted)
1457         return FAILED;
1458 
1459     ql_log(ql_log_info, vha, 0x8009,
1460         "DEVICE RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", vha->host_no,
1461         sdev->id, sdev->lun, cmd);
1462 
1463     err = 0;
1464     if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1465         ql_log(ql_log_warn, vha, 0x800a,
1466             "Wait for hba online failed for cmd=%p.\n", cmd);
1467         goto eh_reset_failed;
1468     }
1469     err = 2;
1470     if (ha->isp_ops->lun_reset(fcport, sdev->lun, 1)
1471         != QLA_SUCCESS) {
1472         ql_log(ql_log_warn, vha, 0x800c,
1473             "do_reset failed for cmd=%p.\n", cmd);
1474         goto eh_reset_failed;
1475     }
1476     err = 3;
1477     if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
1478         sdev->lun, WAIT_LUN) != QLA_SUCCESS) {
1479         ql_log(ql_log_warn, vha, 0x800d,
1480             "wait for pending cmds failed for cmd=%p.\n", cmd);
1481         goto eh_reset_failed;
1482     }
1483 
1484     ql_log(ql_log_info, vha, 0x800e,
1485         "DEVICE RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n",
1486         vha->host_no, sdev->id, sdev->lun, cmd);
1487 
1488     return SUCCESS;
1489 
1490 eh_reset_failed:
1491     ql_log(ql_log_info, vha, 0x800f,
1492         "DEVICE RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
1493         reset_errors[err], vha->host_no, sdev->id, sdev->lun,
1494         cmd);
1495     vha->reset_cmd_err_cnt++;
1496     return FAILED;
1497 }
1498 
1499 static int
1500 qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
1501 {
1502     struct scsi_device *sdev = cmd->device;
1503     struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1504     scsi_qla_host_t *vha = shost_priv(rport_to_shost(rport));
1505     struct qla_hw_data *ha = vha->hw;
1506     fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1507     int err;
1508 
1509     if (qla2x00_isp_reg_stat(ha)) {
1510         ql_log(ql_log_info, vha, 0x803f,
1511             "PCI/Register disconnect, exiting.\n");
1512         qla_pci_set_eeh_busy(vha);
1513         return FAILED;
1514     }
1515 
1516     if (!fcport) {
1517         return FAILED;
1518     }
1519 
1520     err = fc_block_rport(rport);
1521     if (err != 0)
1522         return err;
1523 
1524     if (fcport->deleted)
1525         return FAILED;
1526 
1527     ql_log(ql_log_info, vha, 0x8009,
1528         "TARGET RESET ISSUED nexus=%ld:%d cmd=%p.\n", vha->host_no,
1529         sdev->id, cmd);
1530 
1531     err = 0;
1532     if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1533         ql_log(ql_log_warn, vha, 0x800a,
1534             "Wait for hba online failed for cmd=%p.\n", cmd);
1535         goto eh_reset_failed;
1536     }
1537     err = 2;
1538     if (ha->isp_ops->target_reset(fcport, 0, 0) != QLA_SUCCESS) {
1539         ql_log(ql_log_warn, vha, 0x800c,
1540             "target_reset failed for cmd=%p.\n", cmd);
1541         goto eh_reset_failed;
1542     }
1543     err = 3;
1544     if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
1545         0, WAIT_TARGET) != QLA_SUCCESS) {
1546         ql_log(ql_log_warn, vha, 0x800d,
1547             "wait for pending cmds failed for cmd=%p.\n", cmd);
1548         goto eh_reset_failed;
1549     }
1550 
1551     ql_log(ql_log_info, vha, 0x800e,
1552         "TARGET RESET SUCCEEDED nexus:%ld:%d cmd=%p.\n",
1553         vha->host_no, sdev->id, cmd);
1554 
1555     return SUCCESS;
1556 
1557 eh_reset_failed:
1558     ql_log(ql_log_info, vha, 0x800f,
1559         "TARGET RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n",
1560         reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
1561         cmd);
1562     vha->reset_cmd_err_cnt++;
1563     return FAILED;
1564 }
1565 
1566 /**************************************************************************
1567 * qla2xxx_eh_bus_reset
1568 *
1569 * Description:
1570 *    The bus reset function will reset the bus and abort any executing
1571 *    commands.
1572 *
1573 * Input:
1574 *    cmd = Linux SCSI command packet of the command that cause the
1575 *          bus reset.
1576 *
1577 * Returns:
1578 *    SUCCESS/FAILURE (defined as macro in scsi.h).
1579 *
1580 **************************************************************************/
1581 static int
1582 qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1583 {
1584     scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1585     int ret = FAILED;
1586     unsigned int id;
1587     uint64_t lun;
1588     struct qla_hw_data *ha = vha->hw;
1589 
1590     if (qla2x00_isp_reg_stat(ha)) {
1591         ql_log(ql_log_info, vha, 0x8040,
1592             "PCI/Register disconnect, exiting.\n");
1593         qla_pci_set_eeh_busy(vha);
1594         return FAILED;
1595     }
1596 
1597     id = cmd->device->id;
1598     lun = cmd->device->lun;
1599 
1600     if (qla2x00_chip_is_down(vha))
1601         return ret;
1602 
1603     ql_log(ql_log_info, vha, 0x8012,
1604         "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
1605 
1606     if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1607         ql_log(ql_log_fatal, vha, 0x8013,
1608             "Wait for hba online failed board disabled.\n");
1609         goto eh_bus_reset_done;
1610     }
1611 
1612     if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
1613         ret = SUCCESS;
1614 
1615     if (ret == FAILED)
1616         goto eh_bus_reset_done;
1617 
1618     /* Flush outstanding commands. */
1619     if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) !=
1620         QLA_SUCCESS) {
1621         ql_log(ql_log_warn, vha, 0x8014,
1622             "Wait for pending commands failed.\n");
1623         ret = FAILED;
1624     }
1625 
1626 eh_bus_reset_done:
1627     ql_log(ql_log_warn, vha, 0x802b,
1628         "BUS RESET %s nexus=%ld:%d:%llu.\n",
1629         (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
1630 
1631     return ret;
1632 }
1633 
1634 /**************************************************************************
1635 * qla2xxx_eh_host_reset
1636 *
1637 * Description:
1638 *    The reset function will reset the Adapter.
1639 *
1640 * Input:
1641 *      cmd = Linux SCSI command packet of the command that cause the
1642 *            adapter reset.
1643 *
1644 * Returns:
1645 *      Either SUCCESS or FAILED.
1646 *
1647 * Note:
1648 **************************************************************************/
1649 static int
1650 qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
1651 {
1652     scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1653     struct qla_hw_data *ha = vha->hw;
1654     int ret = FAILED;
1655     unsigned int id;
1656     uint64_t lun;
1657     scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1658 
1659     if (qla2x00_isp_reg_stat(ha)) {
1660         ql_log(ql_log_info, vha, 0x8041,
1661             "PCI/Register disconnect, exiting.\n");
1662         qla_pci_set_eeh_busy(vha);
1663         return SUCCESS;
1664     }
1665 
1666     id = cmd->device->id;
1667     lun = cmd->device->lun;
1668 
1669     ql_log(ql_log_info, vha, 0x8018,
1670         "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
1671 
1672     /*
1673      * No point in issuing another reset if one is active.  Also do not
1674      * attempt a reset if we are updating flash.
1675      */
1676     if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
1677         goto eh_host_reset_lock;
1678 
1679     if (vha != base_vha) {
1680         if (qla2x00_vp_abort_isp(vha))
1681             goto eh_host_reset_lock;
1682     } else {
1683         if (IS_P3P_TYPE(vha->hw)) {
1684             if (!qla82xx_fcoe_ctx_reset(vha)) {
1685                 /* Ctx reset success */
1686                 ret = SUCCESS;
1687                 goto eh_host_reset_lock;
1688             }
1689             /* fall thru if ctx reset failed */
1690         }
1691         if (ha->wq)
1692             flush_workqueue(ha->wq);
1693 
1694         set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1695         if (ha->isp_ops->abort_isp(base_vha)) {
1696             clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1697             /* failed. schedule dpc to try */
1698             set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
1699 
1700             if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1701                 ql_log(ql_log_warn, vha, 0x802a,
1702                     "wait for hba online failed.\n");
1703                 goto eh_host_reset_lock;
1704             }
1705         }
1706         clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
1707     }
1708 
1709     /* Waiting for command to be returned to OS.*/
1710     if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) ==
1711         QLA_SUCCESS)
1712         ret = SUCCESS;
1713 
1714 eh_host_reset_lock:
1715     ql_log(ql_log_info, vha, 0x8017,
1716         "ADAPTER RESET %s nexus=%ld:%d:%llu.\n",
1717         (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
1718 
1719     return ret;
1720 }
1721 
1722 /*
1723 * qla2x00_loop_reset
1724 *      Issue loop reset.
1725 *
1726 * Input:
1727 *      ha = adapter block pointer.
1728 *
1729 * Returns:
1730 *      0 = success
1731 */
1732 int
1733 qla2x00_loop_reset(scsi_qla_host_t *vha)
1734 {
1735     int ret;
1736     struct qla_hw_data *ha = vha->hw;
1737 
1738     if (IS_QLAFX00(ha))
1739         return QLA_SUCCESS;
1740 
1741     if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
1742         atomic_set(&vha->loop_state, LOOP_DOWN);
1743         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1744         qla2x00_mark_all_devices_lost(vha);
1745         ret = qla2x00_full_login_lip(vha);
1746         if (ret != QLA_SUCCESS) {
1747             ql_dbg(ql_dbg_taskm, vha, 0x802d,
1748                 "full_login_lip=%d.\n", ret);
1749         }
1750     }
1751 
1752     if (ha->flags.enable_lip_reset) {
1753         ret = qla2x00_lip_reset(vha);
1754         if (ret != QLA_SUCCESS)
1755             ql_dbg(ql_dbg_taskm, vha, 0x802e,
1756                 "lip_reset failed (%d).\n", ret);
1757     }
1758 
1759     /* Issue marker command only when we are going to start the I/O */
1760     vha->marker_needed = 1;
1761 
1762     return QLA_SUCCESS;
1763 }
1764 
1765 /*
1766  * The caller must ensure that no completion interrupts will happen
1767  * while this function is in progress.
1768  */
1769 static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
1770                   unsigned long *flags)
1771     __releases(qp->qp_lock_ptr)
1772     __acquires(qp->qp_lock_ptr)
1773 {
1774     DECLARE_COMPLETION_ONSTACK(comp);
1775     scsi_qla_host_t *vha = qp->vha;
1776     struct qla_hw_data *ha = vha->hw;
1777     struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1778     int rval;
1779     bool ret_cmd;
1780     uint32_t ratov_j;
1781 
1782     lockdep_assert_held(qp->qp_lock_ptr);
1783 
1784     if (qla2x00_chip_is_down(vha)) {
1785         sp->done(sp, res);
1786         return;
1787     }
1788 
1789     if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS ||
1790         (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy &&
1791          !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
1792          !qla2x00_isp_reg_stat(ha))) {
1793         if (sp->comp) {
1794             sp->done(sp, res);
1795             return;
1796         }
1797 
1798         sp->comp = &comp;
1799         spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
1800 
1801         rval = ha->isp_ops->abort_command(sp);
1802         /* Wait for command completion. */
1803         ret_cmd = false;
1804         ratov_j = ha->r_a_tov/10 * 4 * 1000;
1805         ratov_j = msecs_to_jiffies(ratov_j);
1806         switch (rval) {
1807         case QLA_SUCCESS:
1808             if (wait_for_completion_timeout(&comp, ratov_j)) {
1809                 ql_dbg(ql_dbg_taskm, vha, 0xffff,
1810                     "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n",
1811                     __func__, ha->r_a_tov/10);
1812                 ret_cmd = true;
1813             }
1814             /* else FW return SP to driver */
1815             break;
1816         default:
1817             ret_cmd = true;
1818             break;
1819         }
1820 
1821         spin_lock_irqsave(qp->qp_lock_ptr, *flags);
1822         if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd)))
1823             sp->done(sp, res);
1824     } else {
1825         sp->done(sp, res);
1826     }
1827 }
1828 
1829 /*
1830  * The caller must ensure that no completion interrupts will happen
1831  * while this function is in progress.
1832  */
1833 static void
1834 __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
1835 {
1836     int cnt;
1837     unsigned long flags;
1838     srb_t *sp;
1839     scsi_qla_host_t *vha = qp->vha;
1840     struct qla_hw_data *ha = vha->hw;
1841     struct req_que *req;
1842     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1843     struct qla_tgt_cmd *cmd;
1844 
1845     if (!ha->req_q_map)
1846         return;
1847     spin_lock_irqsave(qp->qp_lock_ptr, flags);
1848     req = qp->req;
1849     for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
1850         sp = req->outstanding_cmds[cnt];
1851         if (sp) {
1852             switch (sp->cmd_type) {
1853             case TYPE_SRB:
1854                 qla2x00_abort_srb(qp, sp, res, &flags);
1855                 break;
1856             case TYPE_TGT_CMD:
1857                 if (!vha->hw->tgt.tgt_ops || !tgt ||
1858                     qla_ini_mode_enabled(vha)) {
1859                     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
1860                         "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
1861                         vha->dpc_flags);
1862                     continue;
1863                 }
1864                 cmd = (struct qla_tgt_cmd *)sp;
1865                 cmd->aborted = 1;
1866                 break;
1867             case TYPE_TGT_TMCMD:
1868                 /* Skip task management functions. */
1869                 break;
1870             default:
1871                 break;
1872             }
1873             req->outstanding_cmds[cnt] = NULL;
1874         }
1875     }
1876     spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
1877 }
1878 
1879 /*
1880  * The caller must ensure that no completion interrupts will happen
1881  * while this function is in progress.
1882  */
1883 void
1884 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1885 {
1886     int que;
1887     struct qla_hw_data *ha = vha->hw;
1888 
1889     /* Continue only if initialization complete. */
1890     if (!ha->base_qpair)
1891         return;
1892     __qla2x00_abort_all_cmds(ha->base_qpair, res);
1893 
1894     if (!ha->queue_pair_map)
1895         return;
1896     for (que = 0; que < ha->max_qpairs; que++) {
1897         if (!ha->queue_pair_map[que])
1898             continue;
1899 
1900         __qla2x00_abort_all_cmds(ha->queue_pair_map[que], res);
1901     }
1902 }
1903 
1904 static int
1905 qla2xxx_slave_alloc(struct scsi_device *sdev)
1906 {
1907     struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1908 
1909     if (!rport || fc_remote_port_chkready(rport))
1910         return -ENXIO;
1911 
1912     sdev->hostdata = *(fc_port_t **)rport->dd_data;
1913 
1914     return 0;
1915 }
1916 
1917 static int
1918 qla2xxx_slave_configure(struct scsi_device *sdev)
1919 {
1920     scsi_qla_host_t *vha = shost_priv(sdev->host);
1921     struct req_que *req = vha->req;
1922 
1923     if (IS_T10_PI_CAPABLE(vha->hw))
1924         blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1925 
1926     scsi_change_queue_depth(sdev, req->max_q_depth);
1927     return 0;
1928 }
1929 
1930 static void
1931 qla2xxx_slave_destroy(struct scsi_device *sdev)
1932 {
1933     sdev->hostdata = NULL;
1934 }
1935 
1936 /**
1937  * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
1938  * @ha: HA context
1939  *
1940  * At exit, the @ha's flags.enable_64bit_addressing set to indicated
1941  * supported addressing method.
1942  */
1943 static void
1944 qla2x00_config_dma_addressing(struct qla_hw_data *ha)
1945 {
1946     /* Assume a 32bit DMA mask. */
1947     ha->flags.enable_64bit_addressing = 0;
1948 
1949     if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
1950         /* Any upper-dword bits set? */
1951         if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
1952             !dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
1953             /* Ok, a 64bit DMA mask is applicable. */
1954             ha->flags.enable_64bit_addressing = 1;
1955             ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
1956             ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
1957             return;
1958         }
1959     }
1960 
1961     dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1962     dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
1963 }
1964 
1965 static void
1966 qla2x00_enable_intrs(struct qla_hw_data *ha)
1967 {
1968     unsigned long flags = 0;
1969     struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1970 
1971     spin_lock_irqsave(&ha->hardware_lock, flags);
1972     ha->interrupts_on = 1;
1973     /* enable risc and host interrupts */
1974     wrt_reg_word(&reg->ictrl, ICR_EN_INT | ICR_EN_RISC);
1975     rd_reg_word(&reg->ictrl);
1976     spin_unlock_irqrestore(&ha->hardware_lock, flags);
1977 
1978 }
1979 
1980 static void
1981 qla2x00_disable_intrs(struct qla_hw_data *ha)
1982 {
1983     unsigned long flags = 0;
1984     struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1985 
1986     spin_lock_irqsave(&ha->hardware_lock, flags);
1987     ha->interrupts_on = 0;
1988     /* disable risc and host interrupts */
1989     wrt_reg_word(&reg->ictrl, 0);
1990     rd_reg_word(&reg->ictrl);
1991     spin_unlock_irqrestore(&ha->hardware_lock, flags);
1992 }
1993 
1994 static void
1995 qla24xx_enable_intrs(struct qla_hw_data *ha)
1996 {
1997     unsigned long flags = 0;
1998     struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1999 
2000     spin_lock_irqsave(&ha->hardware_lock, flags);
2001     ha->interrupts_on = 1;
2002     wrt_reg_dword(&reg->ictrl, ICRX_EN_RISC_INT);
2003     rd_reg_dword(&reg->ictrl);
2004     spin_unlock_irqrestore(&ha->hardware_lock, flags);
2005 }
2006 
2007 static void
2008 qla24xx_disable_intrs(struct qla_hw_data *ha)
2009 {
2010     unsigned long flags = 0;
2011     struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2012 
2013     if (IS_NOPOLLING_TYPE(ha))
2014         return;
2015     spin_lock_irqsave(&ha->hardware_lock, flags);
2016     ha->interrupts_on = 0;
2017     wrt_reg_dword(&reg->ictrl, 0);
2018     rd_reg_dword(&reg->ictrl);
2019     spin_unlock_irqrestore(&ha->hardware_lock, flags);
2020 }
2021 
2022 static int
2023 qla2x00_iospace_config(struct qla_hw_data *ha)
2024 {
2025     resource_size_t pio;
2026     uint16_t msix;
2027 
2028     if (pci_request_selected_regions(ha->pdev, ha->bars,
2029         QLA2XXX_DRIVER_NAME)) {
2030         ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
2031             "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
2032             pci_name(ha->pdev));
2033         goto iospace_error_exit;
2034     }
2035     if (!(ha->bars & 1))
2036         goto skip_pio;
2037 
2038     /* We only need PIO for Flash operations on ISP2312 v2 chips. */
2039     pio = pci_resource_start(ha->pdev, 0);
2040     if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
2041         if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
2042             ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
2043                 "Invalid pci I/O region size (%s).\n",
2044                 pci_name(ha->pdev));
2045             pio = 0;
2046         }
2047     } else {
2048         ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
2049             "Region #0 no a PIO resource (%s).\n",
2050             pci_name(ha->pdev));
2051         pio = 0;
2052     }
2053     ha->pio_address = pio;
2054     ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
2055         "PIO address=%llu.\n",
2056         (unsigned long long)ha->pio_address);
2057 
2058 skip_pio:
2059     /* Use MMIO operations for all accesses. */
2060     if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
2061         ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
2062             "Region #1 not an MMIO resource (%s), aborting.\n",
2063             pci_name(ha->pdev));
2064         goto iospace_error_exit;
2065     }
2066     if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
2067         ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
2068             "Invalid PCI mem region size (%s), aborting.\n",
2069             pci_name(ha->pdev));
2070         goto iospace_error_exit;
2071     }
2072 
2073     ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
2074     if (!ha->iobase) {
2075         ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
2076             "Cannot remap MMIO (%s), aborting.\n",
2077             pci_name(ha->pdev));
2078         goto iospace_error_exit;
2079     }
2080 
2081     /* Determine queue resources */
2082     ha->max_req_queues = ha->max_rsp_queues = 1;
2083     ha->msix_count = QLA_BASE_VECTORS;
2084 
2085     /* Check if FW supports MQ or not */
2086     if (!(ha->fw_attributes & BIT_6))
2087         goto mqiobase_exit;
2088 
2089     if (!ql2xmqsupport || !ql2xnvmeenable ||
2090         (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
2091         goto mqiobase_exit;
2092 
2093     ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
2094             pci_resource_len(ha->pdev, 3));
2095     if (ha->mqiobase) {
2096         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
2097             "MQIO Base=%p.\n", ha->mqiobase);
2098         /* Read MSIX vector size of the board */
2099         pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
2100         ha->msix_count = msix + 1;
2101         /* Max queues are bounded by available msix vectors */
2102         /* MB interrupt uses 1 vector */
2103         ha->max_req_queues = ha->msix_count - 1;
2104         ha->max_rsp_queues = ha->max_req_queues;
2105         /* Queue pairs is the max value minus the base queue pair */
2106         ha->max_qpairs = ha->max_rsp_queues - 1;
2107         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0188,
2108             "Max no of queues pairs: %d.\n", ha->max_qpairs);
2109 
2110         ql_log_pci(ql_log_info, ha->pdev, 0x001a,
2111             "MSI-X vector count: %d.\n", ha->msix_count);
2112     } else
2113         ql_log_pci(ql_log_info, ha->pdev, 0x001b,
2114             "BAR 3 not enabled.\n");
2115 
2116 mqiobase_exit:
2117     ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
2118         "MSIX Count: %d.\n", ha->msix_count);
2119     return (0);
2120 
2121 iospace_error_exit:
2122     return (-ENOMEM);
2123 }
2124 
2125 
2126 static int
2127 qla83xx_iospace_config(struct qla_hw_data *ha)
2128 {
2129     uint16_t msix;
2130 
2131     if (pci_request_selected_regions(ha->pdev, ha->bars,
2132         QLA2XXX_DRIVER_NAME)) {
2133         ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
2134             "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
2135             pci_name(ha->pdev));
2136 
2137         goto iospace_error_exit;
2138     }
2139 
2140     /* Use MMIO operations for all accesses. */
2141     if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
2142         ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
2143             "Invalid pci I/O region size (%s).\n",
2144             pci_name(ha->pdev));
2145         goto iospace_error_exit;
2146     }
2147     if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
2148         ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
2149             "Invalid PCI mem region size (%s), aborting\n",
2150             pci_name(ha->pdev));
2151         goto iospace_error_exit;
2152     }
2153 
2154     ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
2155     if (!ha->iobase) {
2156         ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
2157             "Cannot remap MMIO (%s), aborting.\n",
2158             pci_name(ha->pdev));
2159         goto iospace_error_exit;
2160     }
2161 
2162     /* 64bit PCI BAR - BAR2 will correspoond to region 4 */
2163     /* 83XX 26XX always use MQ type access for queues
2164      * - mbar 2, a.k.a region 4 */
2165     ha->max_req_queues = ha->max_rsp_queues = 1;
2166     ha->msix_count = QLA_BASE_VECTORS;
2167     ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
2168             pci_resource_len(ha->pdev, 4));
2169 
2170     if (!ha->mqiobase) {
2171         ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
2172             "BAR2/region4 not enabled\n");
2173         goto mqiobase_exit;
2174     }
2175 
2176     ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
2177             pci_resource_len(ha->pdev, 2));
2178     if (ha->msixbase) {
2179         /* Read MSIX vector size of the board */
2180         pci_read_config_word(ha->pdev,
2181             QLA_83XX_PCI_MSIX_CONTROL, &msix);
2182         ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE)  + 1;
2183         /*
2184          * By default, driver uses at least two msix vectors
2185          * (default & rspq)
2186          */
2187         if (ql2xmqsupport || ql2xnvmeenable) {
2188             /* MB interrupt uses 1 vector */
2189             ha->max_req_queues = ha->msix_count - 1;
2190 
2191             /* ATIOQ needs 1 vector. That's 1 less QPair */
2192             if (QLA_TGT_MODE_ENABLED())
2193                 ha->max_req_queues--;
2194 
2195             ha->max_rsp_queues = ha->max_req_queues;
2196 
2197             /* Queue pairs is the max value minus
2198              * the base queue pair */
2199             ha->max_qpairs = ha->max_req_queues - 1;
2200             ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3,
2201                 "Max no of queues pairs: %d.\n", ha->max_qpairs);
2202         }
2203         ql_log_pci(ql_log_info, ha->pdev, 0x011c,
2204             "MSI-X vector count: %d.\n", ha->msix_count);
2205     } else
2206         ql_log_pci(ql_log_info, ha->pdev, 0x011e,
2207             "BAR 1 not enabled.\n");
2208 
2209 mqiobase_exit:
2210     ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
2211         "MSIX Count: %d.\n", ha->msix_count);
2212     return 0;
2213 
2214 iospace_error_exit:
2215     return -ENOMEM;
2216 }
2217 
2218 static struct isp_operations qla2100_isp_ops = {
2219     .pci_config     = qla2100_pci_config,
2220     .reset_chip     = qla2x00_reset_chip,
2221     .chip_diag      = qla2x00_chip_diag,
2222     .config_rings       = qla2x00_config_rings,
2223     .reset_adapter      = qla2x00_reset_adapter,
2224     .nvram_config       = qla2x00_nvram_config,
2225     .update_fw_options  = qla2x00_update_fw_options,
2226     .load_risc      = qla2x00_load_risc,
2227     .pci_info_str       = qla2x00_pci_info_str,
2228     .fw_version_str     = qla2x00_fw_version_str,
2229     .intr_handler       = qla2100_intr_handler,
2230     .enable_intrs       = qla2x00_enable_intrs,
2231     .disable_intrs      = qla2x00_disable_intrs,
2232     .abort_command      = qla2x00_abort_command,
2233     .target_reset       = qla2x00_abort_target,
2234     .lun_reset      = qla2x00_lun_reset,
2235     .fabric_login       = qla2x00_login_fabric,
2236     .fabric_logout      = qla2x00_fabric_logout,
2237     .calc_req_entries   = qla2x00_calc_iocbs_32,
2238     .build_iocbs        = qla2x00_build_scsi_iocbs_32,
2239     .prep_ms_iocb       = qla2x00_prep_ms_iocb,
2240     .prep_ms_fdmi_iocb  = qla2x00_prep_ms_fdmi_iocb,
2241     .read_nvram     = qla2x00_read_nvram_data,
2242     .write_nvram        = qla2x00_write_nvram_data,
2243     .fw_dump        = qla2100_fw_dump,
2244     .beacon_on      = NULL,
2245     .beacon_off     = NULL,
2246     .beacon_blink       = NULL,
2247     .read_optrom        = qla2x00_read_optrom_data,
2248     .write_optrom       = qla2x00_write_optrom_data,
2249     .get_flash_version  = qla2x00_get_flash_version,
2250     .start_scsi     = qla2x00_start_scsi,
2251     .start_scsi_mq          = NULL,
2252     .abort_isp      = qla2x00_abort_isp,
2253     .iospace_config         = qla2x00_iospace_config,
2254     .initialize_adapter = qla2x00_initialize_adapter,
2255 };
2256 
2257 static struct isp_operations qla2300_isp_ops = {
2258     .pci_config     = qla2300_pci_config,
2259     .reset_chip     = qla2x00_reset_chip,
2260     .chip_diag      = qla2x00_chip_diag,
2261     .config_rings       = qla2x00_config_rings,
2262     .reset_adapter      = qla2x00_reset_adapter,
2263     .nvram_config       = qla2x00_nvram_config,
2264     .update_fw_options  = qla2x00_update_fw_options,
2265     .load_risc      = qla2x00_load_risc,
2266     .pci_info_str       = qla2x00_pci_info_str,
2267     .fw_version_str     = qla2x00_fw_version_str,
2268     .intr_handler       = qla2300_intr_handler,
2269     .enable_intrs       = qla2x00_enable_intrs,
2270     .disable_intrs      = qla2x00_disable_intrs,
2271     .abort_command      = qla2x00_abort_command,
2272     .target_reset       = qla2x00_abort_target,
2273     .lun_reset      = qla2x00_lun_reset,
2274     .fabric_login       = qla2x00_login_fabric,
2275     .fabric_logout      = qla2x00_fabric_logout,
2276     .calc_req_entries   = qla2x00_calc_iocbs_32,
2277     .build_iocbs        = qla2x00_build_scsi_iocbs_32,
2278     .prep_ms_iocb       = qla2x00_prep_ms_iocb,
2279     .prep_ms_fdmi_iocb  = qla2x00_prep_ms_fdmi_iocb,
2280     .read_nvram     = qla2x00_read_nvram_data,
2281     .write_nvram        = qla2x00_write_nvram_data,
2282     .fw_dump        = qla2300_fw_dump,
2283     .beacon_on      = qla2x00_beacon_on,
2284     .beacon_off     = qla2x00_beacon_off,
2285     .beacon_blink       = qla2x00_beacon_blink,
2286     .read_optrom        = qla2x00_read_optrom_data,
2287     .write_optrom       = qla2x00_write_optrom_data,
2288     .get_flash_version  = qla2x00_get_flash_version,
2289     .start_scsi     = qla2x00_start_scsi,
2290     .start_scsi_mq          = NULL,
2291     .abort_isp      = qla2x00_abort_isp,
2292     .iospace_config     = qla2x00_iospace_config,
2293     .initialize_adapter = qla2x00_initialize_adapter,
2294 };
2295 
2296 static struct isp_operations qla24xx_isp_ops = {
2297     .pci_config     = qla24xx_pci_config,
2298     .reset_chip     = qla24xx_reset_chip,
2299     .chip_diag      = qla24xx_chip_diag,
2300     .config_rings       = qla24xx_config_rings,
2301     .reset_adapter      = qla24xx_reset_adapter,
2302     .nvram_config       = qla24xx_nvram_config,
2303     .update_fw_options  = qla24xx_update_fw_options,
2304     .load_risc      = qla24xx_load_risc,
2305     .pci_info_str       = qla24xx_pci_info_str,
2306     .fw_version_str     = qla24xx_fw_version_str,
2307     .intr_handler       = qla24xx_intr_handler,
2308     .enable_intrs       = qla24xx_enable_intrs,
2309     .disable_intrs      = qla24xx_disable_intrs,
2310     .abort_command      = qla24xx_abort_command,
2311     .target_reset       = qla24xx_abort_target,
2312     .lun_reset      = qla24xx_lun_reset,
2313     .fabric_login       = qla24xx_login_fabric,
2314     .fabric_logout      = qla24xx_fabric_logout,
2315     .calc_req_entries   = NULL,
2316     .build_iocbs        = NULL,
2317     .prep_ms_iocb       = qla24xx_prep_ms_iocb,
2318     .prep_ms_fdmi_iocb  = qla24xx_prep_ms_fdmi_iocb,
2319     .read_nvram     = qla24xx_read_nvram_data,
2320     .write_nvram        = qla24xx_write_nvram_data,
2321     .fw_dump        = qla24xx_fw_dump,
2322     .beacon_on      = qla24xx_beacon_on,
2323     .beacon_off     = qla24xx_beacon_off,
2324     .beacon_blink       = qla24xx_beacon_blink,
2325     .read_optrom        = qla24xx_read_optrom_data,
2326     .write_optrom       = qla24xx_write_optrom_data,
2327     .get_flash_version  = qla24xx_get_flash_version,
2328     .start_scsi     = qla24xx_start_scsi,
2329     .start_scsi_mq          = NULL,
2330     .abort_isp      = qla2x00_abort_isp,
2331     .iospace_config     = qla2x00_iospace_config,
2332     .initialize_adapter = qla2x00_initialize_adapter,
2333 };
2334 
2335 static struct isp_operations qla25xx_isp_ops = {
2336     .pci_config     = qla25xx_pci_config,
2337     .reset_chip     = qla24xx_reset_chip,
2338     .chip_diag      = qla24xx_chip_diag,
2339     .config_rings       = qla24xx_config_rings,
2340     .reset_adapter      = qla24xx_reset_adapter,
2341     .nvram_config       = qla24xx_nvram_config,
2342     .update_fw_options  = qla24xx_update_fw_options,
2343     .load_risc      = qla24xx_load_risc,
2344     .pci_info_str       = qla24xx_pci_info_str,
2345     .fw_version_str     = qla24xx_fw_version_str,
2346     .intr_handler       = qla24xx_intr_handler,
2347     .enable_intrs       = qla24xx_enable_intrs,
2348     .disable_intrs      = qla24xx_disable_intrs,
2349     .abort_command      = qla24xx_abort_command,
2350     .target_reset       = qla24xx_abort_target,
2351     .lun_reset      = qla24xx_lun_reset,
2352     .fabric_login       = qla24xx_login_fabric,
2353     .fabric_logout      = qla24xx_fabric_logout,
2354     .calc_req_entries   = NULL,
2355     .build_iocbs        = NULL,
2356     .prep_ms_iocb       = qla24xx_prep_ms_iocb,
2357     .prep_ms_fdmi_iocb  = qla24xx_prep_ms_fdmi_iocb,
2358     .read_nvram     = qla25xx_read_nvram_data,
2359     .write_nvram        = qla25xx_write_nvram_data,
2360     .fw_dump        = qla25xx_fw_dump,
2361     .beacon_on      = qla24xx_beacon_on,
2362     .beacon_off     = qla24xx_beacon_off,
2363     .beacon_blink       = qla24xx_beacon_blink,
2364     .read_optrom        = qla25xx_read_optrom_data,
2365     .write_optrom       = qla24xx_write_optrom_data,
2366     .get_flash_version  = qla24xx_get_flash_version,
2367     .start_scsi     = qla24xx_dif_start_scsi,
2368     .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
2369     .abort_isp      = qla2x00_abort_isp,
2370     .iospace_config     = qla2x00_iospace_config,
2371     .initialize_adapter = qla2x00_initialize_adapter,
2372 };
2373 
2374 static struct isp_operations qla81xx_isp_ops = {
2375     .pci_config     = qla25xx_pci_config,
2376     .reset_chip     = qla24xx_reset_chip,
2377     .chip_diag      = qla24xx_chip_diag,
2378     .config_rings       = qla24xx_config_rings,
2379     .reset_adapter      = qla24xx_reset_adapter,
2380     .nvram_config       = qla81xx_nvram_config,
2381     .update_fw_options  = qla24xx_update_fw_options,
2382     .load_risc      = qla81xx_load_risc,
2383     .pci_info_str       = qla24xx_pci_info_str,
2384     .fw_version_str     = qla24xx_fw_version_str,
2385     .intr_handler       = qla24xx_intr_handler,
2386     .enable_intrs       = qla24xx_enable_intrs,
2387     .disable_intrs      = qla24xx_disable_intrs,
2388     .abort_command      = qla24xx_abort_command,
2389     .target_reset       = qla24xx_abort_target,
2390     .lun_reset      = qla24xx_lun_reset,
2391     .fabric_login       = qla24xx_login_fabric,
2392     .fabric_logout      = qla24xx_fabric_logout,
2393     .calc_req_entries   = NULL,
2394     .build_iocbs        = NULL,
2395     .prep_ms_iocb       = qla24xx_prep_ms_iocb,
2396     .prep_ms_fdmi_iocb  = qla24xx_prep_ms_fdmi_iocb,
2397     .read_nvram     = NULL,
2398     .write_nvram        = NULL,
2399     .fw_dump        = qla81xx_fw_dump,
2400     .beacon_on      = qla24xx_beacon_on,
2401     .beacon_off     = qla24xx_beacon_off,
2402     .beacon_blink       = qla83xx_beacon_blink,
2403     .read_optrom        = qla25xx_read_optrom_data,
2404     .write_optrom       = qla24xx_write_optrom_data,
2405     .get_flash_version  = qla24xx_get_flash_version,
2406     .start_scsi     = qla24xx_dif_start_scsi,
2407     .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
2408     .abort_isp      = qla2x00_abort_isp,
2409     .iospace_config     = qla2x00_iospace_config,
2410     .initialize_adapter = qla2x00_initialize_adapter,
2411 };
2412 
2413 static struct isp_operations qla82xx_isp_ops = {
2414     .pci_config     = qla82xx_pci_config,
2415     .reset_chip     = qla82xx_reset_chip,
2416     .chip_diag      = qla24xx_chip_diag,
2417     .config_rings       = qla82xx_config_rings,
2418     .reset_adapter      = qla24xx_reset_adapter,
2419     .nvram_config       = qla81xx_nvram_config,
2420     .update_fw_options  = qla24xx_update_fw_options,
2421     .load_risc      = qla82xx_load_risc,
2422     .pci_info_str       = qla24xx_pci_info_str,
2423     .fw_version_str     = qla24xx_fw_version_str,
2424     .intr_handler       = qla82xx_intr_handler,
2425     .enable_intrs       = qla82xx_enable_intrs,
2426     .disable_intrs      = qla82xx_disable_intrs,
2427     .abort_command      = qla24xx_abort_command,
2428     .target_reset       = qla24xx_abort_target,
2429     .lun_reset      = qla24xx_lun_reset,
2430     .fabric_login       = qla24xx_login_fabric,
2431     .fabric_logout      = qla24xx_fabric_logout,
2432     .calc_req_entries   = NULL,
2433     .build_iocbs        = NULL,
2434     .prep_ms_iocb       = qla24xx_prep_ms_iocb,
2435     .prep_ms_fdmi_iocb  = qla24xx_prep_ms_fdmi_iocb,
2436     .read_nvram     = qla24xx_read_nvram_data,
2437     .write_nvram        = qla24xx_write_nvram_data,
2438     .fw_dump        = qla82xx_fw_dump,
2439     .beacon_on      = qla82xx_beacon_on,
2440     .beacon_off     = qla82xx_beacon_off,
2441     .beacon_blink       = NULL,
2442     .read_optrom        = qla82xx_read_optrom_data,
2443     .write_optrom       = qla82xx_write_optrom_data,
2444     .get_flash_version  = qla82xx_get_flash_version,
2445     .start_scsi             = qla82xx_start_scsi,
2446     .start_scsi_mq          = NULL,
2447     .abort_isp      = qla82xx_abort_isp,
2448     .iospace_config         = qla82xx_iospace_config,
2449     .initialize_adapter = qla2x00_initialize_adapter,
2450 };
2451 
2452 static struct isp_operations qla8044_isp_ops = {
2453     .pci_config     = qla82xx_pci_config,
2454     .reset_chip     = qla82xx_reset_chip,
2455     .chip_diag      = qla24xx_chip_diag,
2456     .config_rings       = qla82xx_config_rings,
2457     .reset_adapter      = qla24xx_reset_adapter,
2458     .nvram_config       = qla81xx_nvram_config,
2459     .update_fw_options  = qla24xx_update_fw_options,
2460     .load_risc      = qla82xx_load_risc,
2461     .pci_info_str       = qla24xx_pci_info_str,
2462     .fw_version_str     = qla24xx_fw_version_str,
2463     .intr_handler       = qla8044_intr_handler,
2464     .enable_intrs       = qla82xx_enable_intrs,
2465     .disable_intrs      = qla82xx_disable_intrs,
2466     .abort_command      = qla24xx_abort_command,
2467     .target_reset       = qla24xx_abort_target,
2468     .lun_reset      = qla24xx_lun_reset,
2469     .fabric_login       = qla24xx_login_fabric,
2470     .fabric_logout      = qla24xx_fabric_logout,
2471     .calc_req_entries   = NULL,
2472     .build_iocbs        = NULL,
2473     .prep_ms_iocb       = qla24xx_prep_ms_iocb,
2474     .prep_ms_fdmi_iocb  = qla24xx_prep_ms_fdmi_iocb,
2475     .read_nvram     = NULL,
2476     .write_nvram        = NULL,
2477     .fw_dump        = qla8044_fw_dump,
2478     .beacon_on      = qla82xx_beacon_on,
2479     .beacon_off     = qla82xx_beacon_off,
2480     .beacon_blink       = NULL,
2481     .read_optrom        = qla8044_read_optrom_data,
2482     .write_optrom       = qla8044_write_optrom_data,
2483     .get_flash_version  = qla82xx_get_flash_version,
2484     .start_scsi             = qla82xx_start_scsi,
2485     .start_scsi_mq          = NULL,
2486     .abort_isp      = qla8044_abort_isp,
2487     .iospace_config     = qla82xx_iospace_config,
2488     .initialize_adapter = qla2x00_initialize_adapter,
2489 };
2490 
2491 static struct isp_operations qla83xx_isp_ops = {
2492     .pci_config     = qla25xx_pci_config,
2493     .reset_chip     = qla24xx_reset_chip,
2494     .chip_diag      = qla24xx_chip_diag,
2495     .config_rings       = qla24xx_config_rings,
2496     .reset_adapter      = qla24xx_reset_adapter,
2497     .nvram_config       = qla81xx_nvram_config,
2498     .update_fw_options  = qla24xx_update_fw_options,
2499     .load_risc      = qla81xx_load_risc,
2500     .pci_info_str       = qla24xx_pci_info_str,
2501     .fw_version_str     = qla24xx_fw_version_str,
2502     .intr_handler       = qla24xx_intr_handler,
2503     .enable_intrs       = qla24xx_enable_intrs,
2504     .disable_intrs      = qla24xx_disable_intrs,
2505     .abort_command      = qla24xx_abort_command,
2506     .target_reset       = qla24xx_abort_target,
2507     .lun_reset      = qla24xx_lun_reset,
2508     .fabric_login       = qla24xx_login_fabric,
2509     .fabric_logout      = qla24xx_fabric_logout,
2510     .calc_req_entries   = NULL,
2511     .build_iocbs        = NULL,
2512     .prep_ms_iocb       = qla24xx_prep_ms_iocb,
2513     .prep_ms_fdmi_iocb  = qla24xx_prep_ms_fdmi_iocb,
2514     .read_nvram     = NULL,
2515     .write_nvram        = NULL,
2516     .fw_dump        = qla83xx_fw_dump,
2517     .beacon_on      = qla24xx_beacon_on,
2518     .beacon_off     = qla24xx_beacon_off,
2519     .beacon_blink       = qla83xx_beacon_blink,
2520     .read_optrom        = qla25xx_read_optrom_data,
2521     .write_optrom       = qla24xx_write_optrom_data,
2522     .get_flash_version  = qla24xx_get_flash_version,
2523     .start_scsi     = qla24xx_dif_start_scsi,
2524     .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
2525     .abort_isp      = qla2x00_abort_isp,
2526     .iospace_config     = qla83xx_iospace_config,
2527     .initialize_adapter = qla2x00_initialize_adapter,
2528 };
2529 
2530 static struct isp_operations qlafx00_isp_ops = {
2531     .pci_config     = qlafx00_pci_config,
2532     .reset_chip     = qlafx00_soft_reset,
2533     .chip_diag      = qlafx00_chip_diag,
2534     .config_rings       = qlafx00_config_rings,
2535     .reset_adapter      = qlafx00_soft_reset,
2536     .nvram_config       = NULL,
2537     .update_fw_options  = NULL,
2538     .load_risc      = NULL,
2539     .pci_info_str       = qlafx00_pci_info_str,
2540     .fw_version_str     = qlafx00_fw_version_str,
2541     .intr_handler       = qlafx00_intr_handler,
2542     .enable_intrs       = qlafx00_enable_intrs,
2543     .disable_intrs      = qlafx00_disable_intrs,
2544     .abort_command      = qla24xx_async_abort_command,
2545     .target_reset       = qlafx00_abort_target,
2546     .lun_reset      = qlafx00_lun_reset,
2547     .fabric_login       = NULL,
2548     .fabric_logout      = NULL,
2549     .calc_req_entries   = NULL,
2550     .build_iocbs        = NULL,
2551     .prep_ms_iocb       = qla24xx_prep_ms_iocb,
2552     .prep_ms_fdmi_iocb  = qla24xx_prep_ms_fdmi_iocb,
2553     .read_nvram     = qla24xx_read_nvram_data,
2554     .write_nvram        = qla24xx_write_nvram_data,
2555     .fw_dump        = NULL,
2556     .beacon_on      = qla24xx_beacon_on,
2557     .beacon_off     = qla24xx_beacon_off,
2558     .beacon_blink       = NULL,
2559     .read_optrom        = qla24xx_read_optrom_data,
2560     .write_optrom       = qla24xx_write_optrom_data,
2561     .get_flash_version  = qla24xx_get_flash_version,
2562     .start_scsi     = qlafx00_start_scsi,
2563     .start_scsi_mq          = NULL,
2564     .abort_isp      = qlafx00_abort_isp,
2565     .iospace_config     = qlafx00_iospace_config,
2566     .initialize_adapter = qlafx00_initialize_adapter,
2567 };
2568 
2569 static struct isp_operations qla27xx_isp_ops = {
2570     .pci_config     = qla25xx_pci_config,
2571     .reset_chip     = qla24xx_reset_chip,
2572     .chip_diag      = qla24xx_chip_diag,
2573     .config_rings       = qla24xx_config_rings,
2574     .reset_adapter      = qla24xx_reset_adapter,
2575     .nvram_config       = qla81xx_nvram_config,
2576     .update_fw_options  = qla24xx_update_fw_options,
2577     .load_risc      = qla81xx_load_risc,
2578     .pci_info_str       = qla24xx_pci_info_str,
2579     .fw_version_str     = qla24xx_fw_version_str,
2580     .intr_handler       = qla24xx_intr_handler,
2581     .enable_intrs       = qla24xx_enable_intrs,
2582     .disable_intrs      = qla24xx_disable_intrs,
2583     .abort_command      = qla24xx_abort_command,
2584     .target_reset       = qla24xx_abort_target,
2585     .lun_reset      = qla24xx_lun_reset,
2586     .fabric_login       = qla24xx_login_fabric,
2587     .fabric_logout      = qla24xx_fabric_logout,
2588     .calc_req_entries   = NULL,
2589     .build_iocbs        = NULL,
2590     .prep_ms_iocb       = qla24xx_prep_ms_iocb,
2591     .prep_ms_fdmi_iocb  = qla24xx_prep_ms_fdmi_iocb,
2592     .read_nvram     = NULL,
2593     .write_nvram        = NULL,
2594     .fw_dump        = qla27xx_fwdump,
2595     .mpi_fw_dump        = qla27xx_mpi_fwdump,
2596     .beacon_on      = qla24xx_beacon_on,
2597     .beacon_off     = qla24xx_beacon_off,
2598     .beacon_blink       = qla83xx_beacon_blink,
2599     .read_optrom        = qla25xx_read_optrom_data,
2600     .write_optrom       = qla24xx_write_optrom_data,
2601     .get_flash_version  = qla24xx_get_flash_version,
2602     .start_scsi     = qla24xx_dif_start_scsi,
2603     .start_scsi_mq          = qla2xxx_dif_start_scsi_mq,
2604     .abort_isp      = qla2x00_abort_isp,
2605     .iospace_config     = qla83xx_iospace_config,
2606     .initialize_adapter = qla2x00_initialize_adapter,
2607 };
2608 
2609 static inline void
2610 qla2x00_set_isp_flags(struct qla_hw_data *ha)
2611 {
2612     ha->device_type = DT_EXTENDED_IDS;
2613     switch (ha->pdev->device) {
2614     case PCI_DEVICE_ID_QLOGIC_ISP2100:
2615         ha->isp_type |= DT_ISP2100;
2616         ha->device_type &= ~DT_EXTENDED_IDS;
2617         ha->fw_srisc_address = RISC_START_ADDRESS_2100;
2618         break;
2619     case PCI_DEVICE_ID_QLOGIC_ISP2200:
2620         ha->isp_type |= DT_ISP2200;
2621         ha->device_type &= ~DT_EXTENDED_IDS;
2622         ha->fw_srisc_address = RISC_START_ADDRESS_2100;
2623         break;
2624     case PCI_DEVICE_ID_QLOGIC_ISP2300:
2625         ha->isp_type |= DT_ISP2300;
2626         ha->device_type |= DT_ZIO_SUPPORTED;
2627         ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2628         break;
2629     case PCI_DEVICE_ID_QLOGIC_ISP2312:
2630         ha->isp_type |= DT_ISP2312;
2631         ha->device_type |= DT_ZIO_SUPPORTED;
2632         ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2633         break;
2634     case PCI_DEVICE_ID_QLOGIC_ISP2322:
2635         ha->isp_type |= DT_ISP2322;
2636         ha->device_type |= DT_ZIO_SUPPORTED;
2637         if (ha->pdev->subsystem_vendor == 0x1028 &&
2638             ha->pdev->subsystem_device == 0x0170)
2639             ha->device_type |= DT_OEM_001;
2640         ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2641         break;
2642     case PCI_DEVICE_ID_QLOGIC_ISP6312:
2643         ha->isp_type |= DT_ISP6312;
2644         ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2645         break;
2646     case PCI_DEVICE_ID_QLOGIC_ISP6322:
2647         ha->isp_type |= DT_ISP6322;
2648         ha->fw_srisc_address = RISC_START_ADDRESS_2300;
2649         break;
2650     case PCI_DEVICE_ID_QLOGIC_ISP2422:
2651         ha->isp_type |= DT_ISP2422;
2652         ha->device_type |= DT_ZIO_SUPPORTED;
2653         ha->device_type |= DT_FWI2;
2654         ha->device_type |= DT_IIDMA;
2655         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2656         break;
2657     case PCI_DEVICE_ID_QLOGIC_ISP2432:
2658         ha->isp_type |= DT_ISP2432;
2659         ha->device_type |= DT_ZIO_SUPPORTED;
2660         ha->device_type |= DT_FWI2;
2661         ha->device_type |= DT_IIDMA;
2662         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2663         break;
2664     case PCI_DEVICE_ID_QLOGIC_ISP8432:
2665         ha->isp_type |= DT_ISP8432;
2666         ha->device_type |= DT_ZIO_SUPPORTED;
2667         ha->device_type |= DT_FWI2;
2668         ha->device_type |= DT_IIDMA;
2669         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2670         break;
2671     case PCI_DEVICE_ID_QLOGIC_ISP5422:
2672         ha->isp_type |= DT_ISP5422;
2673         ha->device_type |= DT_FWI2;
2674         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2675         break;
2676     case PCI_DEVICE_ID_QLOGIC_ISP5432:
2677         ha->isp_type |= DT_ISP5432;
2678         ha->device_type |= DT_FWI2;
2679         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2680         break;
2681     case PCI_DEVICE_ID_QLOGIC_ISP2532:
2682         ha->isp_type |= DT_ISP2532;
2683         ha->device_type |= DT_ZIO_SUPPORTED;
2684         ha->device_type |= DT_FWI2;
2685         ha->device_type |= DT_IIDMA;
2686         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2687         break;
2688     case PCI_DEVICE_ID_QLOGIC_ISP8001:
2689         ha->isp_type |= DT_ISP8001;
2690         ha->device_type |= DT_ZIO_SUPPORTED;
2691         ha->device_type |= DT_FWI2;
2692         ha->device_type |= DT_IIDMA;
2693         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2694         break;
2695     case PCI_DEVICE_ID_QLOGIC_ISP8021:
2696         ha->isp_type |= DT_ISP8021;
2697         ha->device_type |= DT_ZIO_SUPPORTED;
2698         ha->device_type |= DT_FWI2;
2699         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2700         /* Initialize 82XX ISP flags */
2701         qla82xx_init_flags(ha);
2702         break;
2703      case PCI_DEVICE_ID_QLOGIC_ISP8044:
2704         ha->isp_type |= DT_ISP8044;
2705         ha->device_type |= DT_ZIO_SUPPORTED;
2706         ha->device_type |= DT_FWI2;
2707         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2708         /* Initialize 82XX ISP flags */
2709         qla82xx_init_flags(ha);
2710         break;
2711     case PCI_DEVICE_ID_QLOGIC_ISP2031:
2712         ha->isp_type |= DT_ISP2031;
2713         ha->device_type |= DT_ZIO_SUPPORTED;
2714         ha->device_type |= DT_FWI2;
2715         ha->device_type |= DT_IIDMA;
2716         ha->device_type |= DT_T10_PI;
2717         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2718         break;
2719     case PCI_DEVICE_ID_QLOGIC_ISP8031:
2720         ha->isp_type |= DT_ISP8031;
2721         ha->device_type |= DT_ZIO_SUPPORTED;
2722         ha->device_type |= DT_FWI2;
2723         ha->device_type |= DT_IIDMA;
2724         ha->device_type |= DT_T10_PI;
2725         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2726         break;
2727     case PCI_DEVICE_ID_QLOGIC_ISPF001:
2728         ha->isp_type |= DT_ISPFX00;
2729         break;
2730     case PCI_DEVICE_ID_QLOGIC_ISP2071:
2731         ha->isp_type |= DT_ISP2071;
2732         ha->device_type |= DT_ZIO_SUPPORTED;
2733         ha->device_type |= DT_FWI2;
2734         ha->device_type |= DT_IIDMA;
2735         ha->device_type |= DT_T10_PI;
2736         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2737         break;
2738     case PCI_DEVICE_ID_QLOGIC_ISP2271:
2739         ha->isp_type |= DT_ISP2271;
2740         ha->device_type |= DT_ZIO_SUPPORTED;
2741         ha->device_type |= DT_FWI2;
2742         ha->device_type |= DT_IIDMA;
2743         ha->device_type |= DT_T10_PI;
2744         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2745         break;
2746     case PCI_DEVICE_ID_QLOGIC_ISP2261:
2747         ha->isp_type |= DT_ISP2261;
2748         ha->device_type |= DT_ZIO_SUPPORTED;
2749         ha->device_type |= DT_FWI2;
2750         ha->device_type |= DT_IIDMA;
2751         ha->device_type |= DT_T10_PI;
2752         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2753         break;
2754     case PCI_DEVICE_ID_QLOGIC_ISP2081:
2755     case PCI_DEVICE_ID_QLOGIC_ISP2089:
2756         ha->isp_type |= DT_ISP2081;
2757         ha->device_type |= DT_ZIO_SUPPORTED;
2758         ha->device_type |= DT_FWI2;
2759         ha->device_type |= DT_IIDMA;
2760         ha->device_type |= DT_T10_PI;
2761         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2762         break;
2763     case PCI_DEVICE_ID_QLOGIC_ISP2281:
2764     case PCI_DEVICE_ID_QLOGIC_ISP2289:
2765         ha->isp_type |= DT_ISP2281;
2766         ha->device_type |= DT_ZIO_SUPPORTED;
2767         ha->device_type |= DT_FWI2;
2768         ha->device_type |= DT_IIDMA;
2769         ha->device_type |= DT_T10_PI;
2770         ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2771         break;
2772     }
2773 
2774     if (IS_QLA82XX(ha))
2775         ha->port_no = ha->portnum & 1;
2776     else {
2777         /* Get adapter physical port no from interrupt pin register. */
2778         pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
2779         if (IS_QLA25XX(ha) || IS_QLA2031(ha) ||
2780             IS_QLA27XX(ha) || IS_QLA28XX(ha))
2781             ha->port_no--;
2782         else
2783             ha->port_no = !(ha->port_no & 1);
2784     }
2785 
2786     ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
2787         "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
2788         ha->device_type, ha->port_no, ha->fw_srisc_address);
2789 }
2790 
2791 static void
2792 qla2xxx_scan_start(struct Scsi_Host *shost)
2793 {
2794     scsi_qla_host_t *vha = shost_priv(shost);
2795 
2796     if (vha->hw->flags.running_gold_fw)
2797         return;
2798 
2799     set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2800     set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2801     set_bit(RSCN_UPDATE, &vha->dpc_flags);
2802     set_bit(NPIV_CONFIG_NEEDED, &vha->dpc_flags);
2803 }
2804 
2805 static int
2806 qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
2807 {
2808     scsi_qla_host_t *vha = shost_priv(shost);
2809 
2810     if (test_bit(UNLOADING, &vha->dpc_flags))
2811         return 1;
2812     if (!vha->host)
2813         return 1;
2814     if (time > vha->hw->loop_reset_delay * HZ)
2815         return 1;
2816 
2817     return atomic_read(&vha->loop_state) == LOOP_READY;
2818 }
2819 
2820 static void qla_heartbeat_work_fn(struct work_struct *work)
2821 {
2822     struct qla_hw_data *ha = container_of(work,
2823         struct qla_hw_data, heartbeat_work);
2824     struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2825 
2826     if (!ha->flags.mbox_busy && base_vha->flags.init_done)
2827         qla_no_op_mb(base_vha);
2828 }
2829 
2830 static void qla2x00_iocb_work_fn(struct work_struct *work)
2831 {
2832     struct scsi_qla_host *vha = container_of(work,
2833         struct scsi_qla_host, iocb_work);
2834     struct qla_hw_data *ha = vha->hw;
2835     struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2836     int i = 2;
2837     unsigned long flags;
2838 
2839     if (test_bit(UNLOADING, &base_vha->dpc_flags))
2840         return;
2841 
2842     while (!list_empty(&vha->work_list) && i > 0) {
2843         qla2x00_do_work(vha);
2844         i--;
2845     }
2846 
2847     spin_lock_irqsave(&vha->work_lock, flags);
2848     clear_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags);
2849     spin_unlock_irqrestore(&vha->work_lock, flags);
2850 }
2851 
2852 /*
2853  * PCI driver interface
2854  */
2855 static int
2856 qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2857 {
2858     int ret = -ENODEV;
2859     struct Scsi_Host *host;
2860     scsi_qla_host_t *base_vha = NULL;
2861     struct qla_hw_data *ha;
2862     char pci_info[30];
2863     char fw_str[30], wq_name[30];
2864     struct scsi_host_template *sht;
2865     int bars, mem_only = 0;
2866     uint16_t req_length = 0, rsp_length = 0;
2867     struct req_que *req = NULL;
2868     struct rsp_que *rsp = NULL;
2869     int i;
2870 
2871     bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
2872     sht = &qla2xxx_driver_template;
2873     if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
2874         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2432 ||
2875         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8432 ||
2876         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5422 ||
2877         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
2878         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
2879         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
2880         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
2881         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
2882         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
2883         pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
2884         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
2885         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
2886         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
2887         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 ||
2888         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 ||
2889         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 ||
2890         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 ||
2891         pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) {
2892         bars = pci_select_bars(pdev, IORESOURCE_MEM);
2893         mem_only = 1;
2894         ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
2895             "Mem only adapter.\n");
2896     }
2897     ql_dbg_pci(ql_dbg_init, pdev, 0x0008,
2898         "Bars=%d.\n", bars);
2899 
2900     if (mem_only) {
2901         if (pci_enable_device_mem(pdev))
2902             return ret;
2903     } else {
2904         if (pci_enable_device(pdev))
2905             return ret;
2906     }
2907 
2908     if (is_kdump_kernel()) {
2909         ql2xmqsupport = 0;
2910         ql2xallocfwdump = 0;
2911     }
2912 
2913     /* This may fail but that's ok */
2914     pci_enable_pcie_error_reporting(pdev);
2915 
2916     ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
2917     if (!ha) {
2918         ql_log_pci(ql_log_fatal, pdev, 0x0009,
2919             "Unable to allocate memory for ha.\n");
2920         goto disable_device;
2921     }
2922     ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
2923         "Memory allocated for ha=%p.\n", ha);
2924     ha->pdev = pdev;
2925     INIT_LIST_HEAD(&ha->tgt.q_full_list);
2926     spin_lock_init(&ha->tgt.q_full_lock);
2927     spin_lock_init(&ha->tgt.sess_lock);
2928     spin_lock_init(&ha->tgt.atio_lock);
2929 
2930     spin_lock_init(&ha->sadb_lock);
2931     INIT_LIST_HEAD(&ha->sadb_tx_index_list);
2932     INIT_LIST_HEAD(&ha->sadb_rx_index_list);
2933 
2934     spin_lock_init(&ha->sadb_fp_lock);
2935 
2936     if (qla_edif_sadb_build_free_pool(ha)) {
2937         kfree(ha);
2938         goto  disable_device;
2939     }
2940 
2941     atomic_set(&ha->nvme_active_aen_cnt, 0);
2942 
2943     /* Clear our data area */
2944     ha->bars = bars;
2945     ha->mem_only = mem_only;
2946     spin_lock_init(&ha->hardware_lock);
2947     spin_lock_init(&ha->vport_slock);
2948     mutex_init(&ha->selflogin_lock);
2949     mutex_init(&ha->optrom_mutex);
2950 
2951     /* Set ISP-type information. */
2952     qla2x00_set_isp_flags(ha);
2953 
2954     /* Set EEH reset type to fundamental if required by hba */
2955     if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
2956         IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
2957         pdev->needs_freset = 1;
2958 
2959     ha->prev_topology = 0;
2960     ha->init_cb_size = sizeof(init_cb_t);
2961     ha->link_data_rate = PORT_SPEED_UNKNOWN;
2962     ha->optrom_size = OPTROM_SIZE_2300;
2963     ha->max_exchg = FW_MAX_EXCHANGES_CNT;
2964     atomic_set(&ha->num_pend_mbx_stage1, 0);
2965     atomic_set(&ha->num_pend_mbx_stage2, 0);
2966     atomic_set(&ha->num_pend_mbx_stage3, 0);
2967     atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
2968     ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
2969 
2970     /* Assign ISP specific operations. */
2971     if (IS_QLA2100(ha)) {
2972         ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
2973         ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
2974         req_length = REQUEST_ENTRY_CNT_2100;
2975         rsp_length = RESPONSE_ENTRY_CNT_2100;
2976         ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
2977         ha->gid_list_info_size = 4;
2978         ha->flash_conf_off = ~0;
2979         ha->flash_data_off = ~0;
2980         ha->nvram_conf_off = ~0;
2981         ha->nvram_data_off = ~0;
2982         ha->isp_ops = &qla2100_isp_ops;
2983     } else if (IS_QLA2200(ha)) {
2984         ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
2985         ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
2986         req_length = REQUEST_ENTRY_CNT_2200;
2987         rsp_length = RESPONSE_ENTRY_CNT_2100;
2988         ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
2989         ha->gid_list_info_size = 4;
2990         ha->flash_conf_off = ~0;
2991         ha->flash_data_off = ~0;
2992         ha->nvram_conf_off = ~0;
2993         ha->nvram_data_off = ~0;
2994         ha->isp_ops = &qla2100_isp_ops;
2995     } else if (IS_QLA23XX(ha)) {
2996         ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
2997         ha->mbx_count = MAILBOX_REGISTER_COUNT;
2998         req_length = REQUEST_ENTRY_CNT_2200;
2999         rsp_length = RESPONSE_ENTRY_CNT_2300;
3000         ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3001         ha->gid_list_info_size = 6;
3002         if (IS_QLA2322(ha) || IS_QLA6322(ha))
3003             ha->optrom_size = OPTROM_SIZE_2322;
3004         ha->flash_conf_off = ~0;
3005         ha->flash_data_off = ~0;
3006         ha->nvram_conf_off = ~0;
3007         ha->nvram_data_off = ~0;
3008         ha->isp_ops = &qla2300_isp_ops;
3009     } else if (IS_QLA24XX_TYPE(ha)) {
3010         ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3011         ha->mbx_count = MAILBOX_REGISTER_COUNT;
3012         req_length = REQUEST_ENTRY_CNT_24XX;
3013         rsp_length = RESPONSE_ENTRY_CNT_2300;
3014         ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3015         ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3016         ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
3017         ha->gid_list_info_size = 8;
3018         ha->optrom_size = OPTROM_SIZE_24XX;
3019         ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA24XX;
3020         ha->isp_ops = &qla24xx_isp_ops;
3021         ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
3022         ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
3023         ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
3024         ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
3025     } else if (IS_QLA25XX(ha)) {
3026         ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3027         ha->mbx_count = MAILBOX_REGISTER_COUNT;
3028         req_length = REQUEST_ENTRY_CNT_24XX;
3029         rsp_length = RESPONSE_ENTRY_CNT_2300;
3030         ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3031         ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3032         ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
3033         ha->gid_list_info_size = 8;
3034         ha->optrom_size = OPTROM_SIZE_25XX;
3035         ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3036         ha->isp_ops = &qla25xx_isp_ops;
3037         ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
3038         ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
3039         ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
3040         ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
3041     } else if (IS_QLA81XX(ha)) {
3042         ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3043         ha->mbx_count = MAILBOX_REGISTER_COUNT;
3044         req_length = REQUEST_ENTRY_CNT_24XX;
3045         rsp_length = RESPONSE_ENTRY_CNT_2300;
3046         ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3047         ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3048         ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3049         ha->gid_list_info_size = 8;
3050         ha->optrom_size = OPTROM_SIZE_81XX;
3051         ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3052         ha->isp_ops = &qla81xx_isp_ops;
3053         ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
3054         ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
3055         ha->nvram_conf_off = ~0;
3056         ha->nvram_data_off = ~0;
3057     } else if (IS_QLA82XX(ha)) {
3058         ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3059         ha->mbx_count = MAILBOX_REGISTER_COUNT;
3060         req_length = REQUEST_ENTRY_CNT_82XX;
3061         rsp_length = RESPONSE_ENTRY_CNT_82XX;
3062         ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3063         ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3064         ha->gid_list_info_size = 8;
3065         ha->optrom_size = OPTROM_SIZE_82XX;
3066         ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3067         ha->isp_ops = &qla82xx_isp_ops;
3068         ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
3069         ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
3070         ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
3071         ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
3072     } else if (IS_QLA8044(ha)) {
3073         ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3074         ha->mbx_count = MAILBOX_REGISTER_COUNT;
3075         req_length = REQUEST_ENTRY_CNT_82XX;
3076         rsp_length = RESPONSE_ENTRY_CNT_82XX;
3077         ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3078         ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3079         ha->gid_list_info_size = 8;
3080         ha->optrom_size = OPTROM_SIZE_83XX;
3081         ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3082         ha->isp_ops = &qla8044_isp_ops;
3083         ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
3084         ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
3085         ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
3086         ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
3087     } else if (IS_QLA83XX(ha)) {
3088         ha->portnum = PCI_FUNC(ha->pdev->devfn);
3089         ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3090         ha->mbx_count = MAILBOX_REGISTER_COUNT;
3091         req_length = REQUEST_ENTRY_CNT_83XX;
3092         rsp_length = RESPONSE_ENTRY_CNT_83XX;
3093         ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3094         ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3095         ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3096         ha->gid_list_info_size = 8;
3097         ha->optrom_size = OPTROM_SIZE_83XX;
3098         ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3099         ha->isp_ops = &qla83xx_isp_ops;
3100         ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
3101         ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
3102         ha->nvram_conf_off = ~0;
3103         ha->nvram_data_off = ~0;
3104     }  else if (IS_QLAFX00(ha)) {
3105         ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00;
3106         ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00;
3107         ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
3108         req_length = REQUEST_ENTRY_CNT_FX00;
3109         rsp_length = RESPONSE_ENTRY_CNT_FX00;
3110         ha->isp_ops = &qlafx00_isp_ops;
3111         ha->port_down_retry_count = 30; /* default value */
3112         ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
3113         ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
3114         ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL;
3115         ha->mr.fw_hbt_en = 1;
3116         ha->mr.host_info_resend = false;
3117         ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL;
3118     } else if (IS_QLA27XX(ha)) {
3119         ha->portnum = PCI_FUNC(ha->pdev->devfn);
3120         ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3121         ha->mbx_count = MAILBOX_REGISTER_COUNT;
3122         req_length = REQUEST_ENTRY_CNT_83XX;
3123         rsp_length = RESPONSE_ENTRY_CNT_83XX;
3124         ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3125         ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3126         ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3127         ha->gid_list_info_size = 8;
3128         ha->optrom_size = OPTROM_SIZE_83XX;
3129         ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3130         ha->isp_ops = &qla27xx_isp_ops;
3131         ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
3132         ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
3133         ha->nvram_conf_off = ~0;
3134         ha->nvram_data_off = ~0;
3135     } else if (IS_QLA28XX(ha)) {
3136         ha->portnum = PCI_FUNC(ha->pdev->devfn);
3137         ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
3138         ha->mbx_count = MAILBOX_REGISTER_COUNT;
3139         req_length = REQUEST_ENTRY_CNT_83XX;
3140         rsp_length = RESPONSE_ENTRY_CNT_83XX;
3141         ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
3142         ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
3143         ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
3144         ha->gid_list_info_size = 8;
3145         ha->optrom_size = OPTROM_SIZE_28XX;
3146         ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
3147         ha->isp_ops = &qla27xx_isp_ops;
3148         ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX;
3149         ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX;
3150         ha->nvram_conf_off = ~0;
3151         ha->nvram_data_off = ~0;
3152     }
3153 
3154     ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
3155         "mbx_count=%d, req_length=%d, "
3156         "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
3157         "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, "
3158         "max_fibre_devices=%d.\n",
3159         ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
3160         ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
3161         ha->nvram_npiv_size, ha->max_fibre_devices);
3162     ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
3163         "isp_ops=%p, flash_conf_off=%d, "
3164         "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
3165         ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
3166         ha->nvram_conf_off, ha->nvram_data_off);
3167 
3168     /* Configure PCI I/O space */
3169     ret = ha->isp_ops->iospace_config(ha);
3170     if (ret)
3171         goto iospace_config_failed;
3172 
3173     ql_log_pci(ql_log_info, pdev, 0x001d,
3174         "Found an ISP%04X irq %d iobase 0x%p.\n",
3175         pdev->device, pdev->irq, ha->iobase);
3176     mutex_init(&ha->vport_lock);
3177     mutex_init(&ha->mq_lock);
3178     init_completion(&ha->mbx_cmd_comp);
3179     complete(&ha->mbx_cmd_comp);
3180     init_completion(&ha->mbx_intr_comp);
3181     init_completion(&ha->dcbx_comp);
3182     init_completion(&ha->lb_portup_comp);
3183 
3184     set_bit(0, (unsigned long *) ha->vp_idx_map);
3185 
3186     qla2x00_config_dma_addressing(ha);
3187     ql_dbg_pci(ql_dbg_init, pdev, 0x0020,
3188         "64 Bit addressing is %s.\n",
3189         ha->flags.enable_64bit_addressing ? "enable" :
3190         "disable");
3191     ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp);
3192     if (ret) {
3193         ql_log_pci(ql_log_fatal, pdev, 0x0031,
3194             "Failed to allocate memory for adapter, aborting.\n");
3195 
3196         goto probe_hw_failed;
3197     }
3198 
3199     req->max_q_depth = MAX_Q_DEPTH;
3200     if (ql2xmaxqdepth != 0 && ql2xmaxqdepth <= 0xffffU)
3201         req->max_q_depth = ql2xmaxqdepth;
3202 
3203 
3204     base_vha = qla2x00_create_host(sht, ha);
3205     if (!base_vha) {
3206         ret = -ENOMEM;
3207         goto probe_hw_failed;
3208     }
3209 
3210     pci_set_drvdata(pdev, base_vha);
3211     set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
3212 
3213     host = base_vha->host;
3214     base_vha->req = req;
3215     if (IS_QLA2XXX_MIDTYPE(ha))
3216         base_vha->mgmt_svr_loop_id =
3217             qla2x00_reserve_mgmt_server_loop_id(base_vha);
3218     else
3219         base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
3220                         base_vha->vp_idx;
3221 
3222     /* Setup fcport template structure. */
3223     ha->mr.fcport.vha = base_vha;
3224     ha->mr.fcport.port_type = FCT_UNKNOWN;
3225     ha->mr.fcport.loop_id = FC_NO_LOOP_ID;
3226     qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED);
3227     ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
3228     ha->mr.fcport.scan_state = 1;
3229 
3230     qla2xxx_reset_stats(host, QLA2XX_HW_ERROR | QLA2XX_SHT_LNK_DWN |
3231                 QLA2XX_INT_ERR | QLA2XX_CMD_TIMEOUT |
3232                 QLA2XX_RESET_CMD_ERR | QLA2XX_TGT_SHT_LNK_DOWN);
3233 
3234     /* Set the SG table size based on ISP type */
3235     if (!IS_FWI2_CAPABLE(ha)) {
3236         if (IS_QLA2100(ha))
3237             host->sg_tablesize = 32;
3238     } else {
3239         if (!IS_QLA82XX(ha))
3240             host->sg_tablesize = QLA_SG_ALL;
3241     }
3242     host->max_id = ha->max_fibre_devices;
3243     host->cmd_per_lun = 3;
3244     host->unique_id = host->host_no;
3245     if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
3246         host->max_cmd_len = 32;
3247     else
3248         host->max_cmd_len = MAX_CMDSZ;
3249     host->max_channel = MAX_BUSES - 1;
3250     /* Older HBAs support only 16-bit LUNs */
3251     if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) &&
3252         ql2xmaxlun > 0xffff)
3253         host->max_lun = 0xffff;
3254     else
3255         host->max_lun = ql2xmaxlun;
3256     host->transportt = qla2xxx_transport_template;
3257     sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
3258 
3259     ql_dbg(ql_dbg_init, base_vha, 0x0033,
3260         "max_id=%d this_id=%d "
3261         "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
3262         "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id,
3263         host->this_id, host->cmd_per_lun, host->unique_id,
3264         host->max_cmd_len, host->max_channel, host->max_lun,
3265         host->transportt, sht->vendor_id);
3266 
3267     INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
3268     INIT_WORK(&ha->heartbeat_work, qla_heartbeat_work_fn);
3269 
3270     /* Set up the irqs */
3271     ret = qla2x00_request_irqs(ha, rsp);
3272     if (ret)
3273         goto probe_failed;
3274 
3275     /* Alloc arrays of request and response ring ptrs */
3276     ret = qla2x00_alloc_queues(ha, req, rsp);
3277     if (ret) {
3278         ql_log(ql_log_fatal, base_vha, 0x003d,
3279             "Failed to allocate memory for queue pointers..."
3280             "aborting.\n");
3281         ret = -ENODEV;
3282         goto probe_failed;
3283     }
3284 
3285     if (ha->mqenable) {
3286         /* number of hardware queues supported by blk/scsi-mq*/
3287         host->nr_hw_queues = ha->max_qpairs;
3288 
3289         ql_dbg(ql_dbg_init, base_vha, 0x0192,
3290             "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues);
3291     } else {
3292         if (ql2xnvmeenable) {
3293             host->nr_hw_queues = ha->max_qpairs;
3294             ql_dbg(ql_dbg_init, base_vha, 0x0194,
3295                 "FC-NVMe support is enabled, HW queues=%d\n",
3296                 host->nr_hw_queues);
3297         } else {
3298             ql_dbg(ql_dbg_init, base_vha, 0x0193,
3299                 "blk/scsi-mq disabled.\n");
3300         }
3301     }
3302 
3303     qlt_probe_one_stage1(base_vha, ha);
3304 
3305     pci_save_state(pdev);
3306 
3307     /* Assign back pointers */
3308     rsp->req = req;
3309     req->rsp = rsp;
3310 
3311     if (IS_QLAFX00(ha)) {
3312         ha->rsp_q_map[0] = rsp;
3313         ha->req_q_map[0] = req;
3314         set_bit(0, ha->req_qid_map);
3315         set_bit(0, ha->rsp_qid_map);
3316     }
3317 
3318     /* FWI2-capable only. */
3319     req->req_q_in = &ha->iobase->isp24.req_q_in;
3320     req->req_q_out = &ha->iobase->isp24.req_q_out;
3321     rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
3322     rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
3323     if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3324         IS_QLA28XX(ha)) {
3325         req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
3326         req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
3327         rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
3328         rsp->rsp_q_out =  &ha->mqiobase->isp25mq.rsp_q_out;
3329     }
3330 
3331     if (IS_QLAFX00(ha)) {
3332         req->req_q_in = &ha->iobase->ispfx00.req_q_in;
3333         req->req_q_out = &ha->iobase->ispfx00.req_q_out;
3334         rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in;
3335         rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
3336     }
3337 
3338     if (IS_P3P_TYPE(ha)) {
3339         req->req_q_out = &ha->iobase->isp82.req_q_out[0];
3340         rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
3341         rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
3342     }
3343 
3344     ql_dbg(ql_dbg_multiq, base_vha, 0xc009,
3345         "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
3346         ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
3347     ql_dbg(ql_dbg_multiq, base_vha, 0xc00a,
3348         "req->req_q_in=%p req->req_q_out=%p "
3349         "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
3350         req->req_q_in, req->req_q_out,
3351         rsp->rsp_q_in, rsp->rsp_q_out);
3352     ql_dbg(ql_dbg_init, base_vha, 0x003e,
3353         "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n",
3354         ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp);
3355     ql_dbg(ql_dbg_init, base_vha, 0x003f,
3356         "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
3357         req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
3358 
3359     ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0);
3360     if (unlikely(!ha->wq)) {
3361         ret = -ENOMEM;
3362         goto probe_failed;
3363     }
3364 
3365     if (ha->isp_ops->initialize_adapter(base_vha)) {
3366         ql_log(ql_log_fatal, base_vha, 0x00d6,
3367             "Failed to initialize adapter - Adapter flags %x.\n",
3368             base_vha->device_flags);
3369 
3370         if (IS_QLA82XX(ha)) {
3371             qla82xx_idc_lock(ha);
3372             qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3373                 QLA8XXX_DEV_FAILED);
3374             qla82xx_idc_unlock(ha);
3375             ql_log(ql_log_fatal, base_vha, 0x00d7,
3376                 "HW State: FAILED.\n");
3377         } else if (IS_QLA8044(ha)) {
3378             qla8044_idc_lock(ha);
3379             qla8044_wr_direct(base_vha,
3380                 QLA8044_CRB_DEV_STATE_INDEX,
3381                 QLA8XXX_DEV_FAILED);
3382             qla8044_idc_unlock(ha);
3383             ql_log(ql_log_fatal, base_vha, 0x0150,
3384                 "HW State: FAILED.\n");
3385         }
3386 
3387         ret = -ENODEV;
3388         goto probe_failed;
3389     }
3390 
3391     if (IS_QLAFX00(ha))
3392         host->can_queue = QLAFX00_MAX_CANQUEUE;
3393     else
3394         host->can_queue = req->num_outstanding_cmds - 10;
3395 
3396     ql_dbg(ql_dbg_init, base_vha, 0x0032,
3397         "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
3398         host->can_queue, base_vha->req,
3399         base_vha->mgmt_svr_loop_id, host->sg_tablesize);
3400 
3401     /* Check if FW supports MQ or not for ISP25xx */
3402     if (IS_QLA25XX(ha) && !(ha->fw_attributes & BIT_6))
3403         ha->mqenable = 0;
3404 
3405     if (ha->mqenable) {
3406         bool startit = false;
3407 
3408         if (QLA_TGT_MODE_ENABLED())
3409             startit = false;
3410 
3411         if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED)
3412             startit = true;
3413 
3414         /* Create start of day qpairs for Block MQ */
3415         for (i = 0; i < ha->max_qpairs; i++)
3416             qla2xxx_create_qpair(base_vha, 5, 0, startit);
3417     }
3418     qla_init_iocb_limit(base_vha);
3419 
3420     if (ha->flags.running_gold_fw)
3421         goto skip_dpc;
3422 
3423     /*
3424      * Startup the kernel thread for this host adapter
3425      */
3426     ha->dpc_thread = kthread_create(qla2x00_do_dpc, ha,
3427         "%s_dpc", base_vha->host_str);
3428     if (IS_ERR(ha->dpc_thread)) {
3429         ql_log(ql_log_fatal, base_vha, 0x00ed,
3430             "Failed to start DPC thread.\n");
3431         ret = PTR_ERR(ha->dpc_thread);
3432         ha->dpc_thread = NULL;
3433         goto probe_failed;
3434     }
3435     ql_dbg(ql_dbg_init, base_vha, 0x00ee,
3436         "DPC thread started successfully.\n");
3437 
3438     /*
3439      * If we're not coming up in initiator mode, we might sit for
3440      * a while without waking up the dpc thread, which leads to a
3441      * stuck process warning.  So just kick the dpc once here and
3442      * let the kthread start (and go back to sleep in qla2x00_do_dpc).
3443      */
3444     qla2xxx_wake_dpc(base_vha);
3445 
3446     INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
3447 
3448     if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
3449         sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);
3450         ha->dpc_lp_wq = create_singlethread_workqueue(wq_name);
3451         INIT_WORK(&ha->idc_aen, qla83xx_service_idc_aen);
3452 
3453         sprintf(wq_name, "qla2xxx_%lu_dpc_hp_wq", base_vha->host_no);
3454         ha->dpc_hp_wq = create_singlethread_workqueue(wq_name);
3455         INIT_WORK(&ha->nic_core_reset, qla83xx_nic_core_reset_work);
3456         INIT_WORK(&ha->idc_state_handler,
3457             qla83xx_idc_state_handler_work);
3458         INIT_WORK(&ha->nic_core_unrecoverable,
3459             qla83xx_nic_core_unrecoverable_work);
3460     }
3461 
3462 skip_dpc:
3463     list_add_tail(&base_vha->list, &ha->vp_list);
3464     base_vha->host->irq = ha->pdev->irq;
3465 
3466     /* Initialized the timer */
3467     qla2x00_start_timer(base_vha, WATCH_INTERVAL);
3468     ql_dbg(ql_dbg_init, base_vha, 0x00ef,
3469         "Started qla2x00_timer with "
3470         "interval=%d.\n", WATCH_INTERVAL);
3471     ql_dbg(ql_dbg_init, base_vha, 0x00f0,
3472         "Detected hba at address=%p.\n",
3473         ha);
3474 
3475     if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
3476         if (ha->fw_attributes & BIT_4) {
3477             int prot = 0, guard;
3478 
3479             base_vha->flags.difdix_supported = 1;
3480             ql_dbg(ql_dbg_init, base_vha, 0x00f1,
3481                 "Registering for DIF/DIX type 1 and 3 protection.\n");
3482             if (ql2xenabledif == 1)
3483                 prot = SHOST_DIX_TYPE0_PROTECTION;
3484             if (ql2xprotmask)
3485                 scsi_host_set_prot(host, ql2xprotmask);
3486             else
3487                 scsi_host_set_prot(host,
3488                     prot | SHOST_DIF_TYPE1_PROTECTION
3489                     | SHOST_DIF_TYPE2_PROTECTION
3490                     | SHOST_DIF_TYPE3_PROTECTION
3491                     | SHOST_DIX_TYPE1_PROTECTION
3492                     | SHOST_DIX_TYPE2_PROTECTION
3493                     | SHOST_DIX_TYPE3_PROTECTION);
3494 
3495             guard = SHOST_DIX_GUARD_CRC;
3496 
3497             if (IS_PI_IPGUARD_CAPABLE(ha) &&
3498                 (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha)))
3499                 guard |= SHOST_DIX_GUARD_IP;
3500 
3501             if (ql2xprotguard)
3502                 scsi_host_set_guard(host, ql2xprotguard);
3503             else
3504                 scsi_host_set_guard(host, guard);
3505         } else
3506             base_vha->flags.difdix_supported = 0;
3507     }
3508 
3509     ha->isp_ops->enable_intrs(ha);
3510 
3511     if (IS_QLAFX00(ha)) {
3512         ret = qlafx00_fx_disc(base_vha,
3513             &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
3514         host->sg_tablesize = (ha->mr.extended_io_enabled) ?
3515             QLA_SG_ALL : 128;
3516     }
3517 
3518     ret = scsi_add_host(host, &pdev->dev);
3519     if (ret)
3520         goto probe_failed;
3521 
3522     base_vha->flags.init_done = 1;
3523     base_vha->flags.online = 1;
3524     ha->prev_minidump_failed = 0;
3525 
3526     ql_dbg(ql_dbg_init, base_vha, 0x00f2,
3527         "Init done and hba is online.\n");
3528 
3529     if (qla_ini_mode_enabled(base_vha) ||
3530         qla_dual_mode_enabled(base_vha))
3531         scsi_scan_host(host);
3532     else
3533         ql_dbg(ql_dbg_init, base_vha, 0x0122,
3534             "skipping scsi_scan_host() for non-initiator port\n");
3535 
3536     qla2x00_alloc_sysfs_attr(base_vha);
3537 
3538     if (IS_QLAFX00(ha)) {
3539         ret = qlafx00_fx_disc(base_vha,
3540             &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
3541 
3542         /* Register system information */
3543         ret =  qlafx00_fx_disc(base_vha,
3544             &base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO);
3545     }
3546 
3547     qla2x00_init_host_attr(base_vha);
3548 
3549     qla2x00_dfs_setup(base_vha);
3550 
3551     ql_log(ql_log_info, base_vha, 0x00fb,
3552         "QLogic %s - %s.\n", ha->model_number, ha->model_desc);
3553     ql_log(ql_log_info, base_vha, 0x00fc,
3554         "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n",
3555         pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info,
3556                                sizeof(pci_info)),
3557         pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
3558         base_vha->host_no,
3559         ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
3560 
3561     qlt_add_target(ha, base_vha);
3562 
3563     clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
3564 
3565     if (test_bit(UNLOADING, &base_vha->dpc_flags))
3566         return -ENODEV;
3567 
3568     return 0;
3569 
3570 probe_failed:
3571     qla_enode_stop(base_vha);
3572     qla_edb_stop(base_vha);
3573     if (base_vha->gnl.l) {
3574         dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3575                 base_vha->gnl.l, base_vha->gnl.ldma);
3576         base_vha->gnl.l = NULL;
3577     }
3578 
3579     if (base_vha->timer_active)
3580         qla2x00_stop_timer(base_vha);
3581     base_vha->flags.online = 0;
3582     if (ha->dpc_thread) {
3583         struct task_struct *t = ha->dpc_thread;
3584 
3585         ha->dpc_thread = NULL;
3586         kthread_stop(t);
3587     }
3588 
3589     qla2x00_free_device(base_vha);
3590     scsi_host_put(base_vha->host);
3591     /*
3592      * Need to NULL out local req/rsp after
3593      * qla2x00_free_device => qla2x00_free_queues frees
3594      * what these are pointing to. Or else we'll
3595      * fall over below in qla2x00_free_req/rsp_que.
3596      */
3597     req = NULL;
3598     rsp = NULL;
3599 
3600 probe_hw_failed:
3601     qla2x00_mem_free(ha);
3602     qla2x00_free_req_que(ha, req);
3603     qla2x00_free_rsp_que(ha, rsp);
3604     qla2x00_clear_drv_active(ha);
3605 
3606 iospace_config_failed:
3607     if (IS_P3P_TYPE(ha)) {
3608         if (!ha->nx_pcibase)
3609             iounmap((device_reg_t *)ha->nx_pcibase);
3610         if (!ql2xdbwr)
3611             iounmap((device_reg_t *)ha->nxdb_wr_ptr);
3612     } else {
3613         if (ha->iobase)
3614             iounmap(ha->iobase);
3615         if (ha->cregbase)
3616             iounmap(ha->cregbase);
3617     }
3618     pci_release_selected_regions(ha->pdev, ha->bars);
3619     kfree(ha);
3620 
3621 disable_device:
3622     pci_disable_device(pdev);
3623     return ret;
3624 }
3625 
3626 static void __qla_set_remove_flag(scsi_qla_host_t *base_vha)
3627 {
3628     scsi_qla_host_t *vp;
3629     unsigned long flags;
3630     struct qla_hw_data *ha;
3631 
3632     if (!base_vha)
3633         return;
3634 
3635     ha = base_vha->hw;
3636 
3637     spin_lock_irqsave(&ha->vport_slock, flags);
3638     list_for_each_entry(vp, &ha->vp_list, list)
3639         set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags);
3640 
3641     /*
3642      * Indicate device removal to prevent future board_disable
3643      * and wait until any pending board_disable has completed.
3644      */
3645     set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
3646     spin_unlock_irqrestore(&ha->vport_slock, flags);
3647 }
3648 
3649 static void
3650 qla2x00_shutdown(struct pci_dev *pdev)
3651 {
3652     scsi_qla_host_t *vha;
3653     struct qla_hw_data  *ha;
3654 
3655     vha = pci_get_drvdata(pdev);
3656     ha = vha->hw;
3657 
3658     ql_log(ql_log_info, vha, 0xfffa,
3659         "Adapter shutdown\n");
3660 
3661     /*
3662      * Prevent future board_disable and wait
3663      * until any pending board_disable has completed.
3664      */
3665     __qla_set_remove_flag(vha);
3666     cancel_work_sync(&ha->board_disable);
3667 
3668     if (!atomic_read(&pdev->enable_cnt))
3669         return;
3670 
3671     /* Notify ISPFX00 firmware */
3672     if (IS_QLAFX00(ha))
3673         qlafx00_driver_shutdown(vha, 20);
3674 
3675     /* Turn-off FCE trace */
3676     if (ha->flags.fce_enabled) {
3677         qla2x00_disable_fce_trace(vha, NULL, NULL);
3678         ha->flags.fce_enabled = 0;
3679     }
3680 
3681     /* Turn-off EFT trace */
3682     if (ha->eft)
3683         qla2x00_disable_eft_trace(vha);
3684 
3685     if (IS_QLA25XX(ha) ||  IS_QLA2031(ha) || IS_QLA27XX(ha) ||
3686         IS_QLA28XX(ha)) {
3687         if (ha->flags.fw_started)
3688             qla2x00_abort_isp_cleanup(vha);
3689     } else {
3690         /* Stop currently executing firmware. */
3691         qla2x00_try_to_stop_firmware(vha);
3692     }
3693 
3694     /* Disable timer */
3695     if (vha->timer_active)
3696         qla2x00_stop_timer(vha);
3697 
3698     /* Turn adapter off line */
3699     vha->flags.online = 0;
3700 
3701     /* turn-off interrupts on the card */
3702     if (ha->interrupts_on) {
3703         vha->flags.init_done = 0;
3704         ha->isp_ops->disable_intrs(ha);
3705     }
3706 
3707     qla2x00_free_irqs(vha);
3708 
3709     qla2x00_free_fw_dump(ha);
3710 
3711     pci_disable_device(pdev);
3712     ql_log(ql_log_info, vha, 0xfffe,
3713         "Adapter shutdown successfully.\n");
3714 }
3715 
3716 /* Deletes all the virtual ports for a given ha */
3717 static void
3718 qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
3719 {
3720     scsi_qla_host_t *vha;
3721     unsigned long flags;
3722 
3723     mutex_lock(&ha->vport_lock);
3724     while (ha->cur_vport_count) {
3725         spin_lock_irqsave(&ha->vport_slock, flags);
3726 
3727         BUG_ON(base_vha->list.next == &ha->vp_list);
3728         /* This assumes first entry in ha->vp_list is always base vha */
3729         vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
3730         scsi_host_get(vha->host);
3731 
3732         spin_unlock_irqrestore(&ha->vport_slock, flags);
3733         mutex_unlock(&ha->vport_lock);
3734 
3735         qla_nvme_delete(vha);
3736 
3737         fc_vport_terminate(vha->fc_vport);
3738         scsi_host_put(vha->host);
3739 
3740         mutex_lock(&ha->vport_lock);
3741     }
3742     mutex_unlock(&ha->vport_lock);
3743 }
3744 
3745 /* Stops all deferred work threads */
3746 static void
3747 qla2x00_destroy_deferred_work(struct qla_hw_data *ha)
3748 {
3749     /* Cancel all work and destroy DPC workqueues */
3750     if (ha->dpc_lp_wq) {
3751         cancel_work_sync(&ha->idc_aen);
3752         destroy_workqueue(ha->dpc_lp_wq);
3753         ha->dpc_lp_wq = NULL;
3754     }
3755 
3756     if (ha->dpc_hp_wq) {
3757         cancel_work_sync(&ha->nic_core_reset);
3758         cancel_work_sync(&ha->idc_state_handler);
3759         cancel_work_sync(&ha->nic_core_unrecoverable);
3760         destroy_workqueue(ha->dpc_hp_wq);
3761         ha->dpc_hp_wq = NULL;
3762     }
3763 
3764     /* Kill the kernel thread for this host */
3765     if (ha->dpc_thread) {
3766         struct task_struct *t = ha->dpc_thread;
3767 
3768         /*
3769          * qla2xxx_wake_dpc checks for ->dpc_thread
3770          * so we need to zero it out.
3771          */
3772         ha->dpc_thread = NULL;
3773         kthread_stop(t);
3774     }
3775 }
3776 
3777 static void
3778 qla2x00_unmap_iobases(struct qla_hw_data *ha)
3779 {
3780     if (IS_QLA82XX(ha)) {
3781 
3782         iounmap((device_reg_t *)ha->nx_pcibase);
3783         if (!ql2xdbwr)
3784             iounmap((device_reg_t *)ha->nxdb_wr_ptr);
3785     } else {
3786         if (ha->iobase)
3787             iounmap(ha->iobase);
3788 
3789         if (ha->cregbase)
3790             iounmap(ha->cregbase);
3791 
3792         if (ha->mqiobase)
3793             iounmap(ha->mqiobase);
3794 
3795         if (ha->msixbase)
3796             iounmap(ha->msixbase);
3797     }
3798 }
3799 
3800 static void
3801 qla2x00_clear_drv_active(struct qla_hw_data *ha)
3802 {
3803     if (IS_QLA8044(ha)) {
3804         qla8044_idc_lock(ha);
3805         qla8044_clear_drv_active(ha);
3806         qla8044_idc_unlock(ha);
3807     } else if (IS_QLA82XX(ha)) {
3808         qla82xx_idc_lock(ha);
3809         qla82xx_clear_drv_active(ha);
3810         qla82xx_idc_unlock(ha);
3811     }
3812 }
3813 
3814 static void
3815 qla2x00_remove_one(struct pci_dev *pdev)
3816 {
3817     scsi_qla_host_t *base_vha;
3818     struct qla_hw_data  *ha;
3819 
3820     base_vha = pci_get_drvdata(pdev);
3821     ha = base_vha->hw;
3822     ql_log(ql_log_info, base_vha, 0xb079,
3823         "Removing driver\n");
3824     __qla_set_remove_flag(base_vha);
3825     cancel_work_sync(&ha->board_disable);
3826 
3827     /*
3828      * If the PCI device is disabled then there was a PCI-disconnect and
3829      * qla2x00_disable_board_on_pci_error has taken care of most of the
3830      * resources.
3831      */
3832     if (!atomic_read(&pdev->enable_cnt)) {
3833         dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3834             base_vha->gnl.l, base_vha->gnl.ldma);
3835         base_vha->gnl.l = NULL;
3836         scsi_host_put(base_vha->host);
3837         kfree(ha);
3838         pci_set_drvdata(pdev, NULL);
3839         return;
3840     }
3841     qla2x00_wait_for_hba_ready(base_vha);
3842 
3843     /*
3844      * if UNLOADING flag is already set, then continue unload,
3845      * where it was set first.
3846      */
3847     if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
3848         return;
3849 
3850     if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
3851         IS_QLA28XX(ha)) {
3852         if (ha->flags.fw_started)
3853             qla2x00_abort_isp_cleanup(base_vha);
3854     } else if (!IS_QLAFX00(ha)) {
3855         if (IS_QLA8031(ha)) {
3856             ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
3857                 "Clearing fcoe driver presence.\n");
3858             if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
3859                 ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
3860                     "Error while clearing DRV-Presence.\n");
3861         }
3862 
3863         qla2x00_try_to_stop_firmware(base_vha);
3864     }
3865 
3866     qla2x00_wait_for_sess_deletion(base_vha);
3867 
3868     qla_nvme_delete(base_vha);
3869 
3870     dma_free_coherent(&ha->pdev->dev,
3871         base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
3872 
3873     base_vha->gnl.l = NULL;
3874     qla_enode_stop(base_vha);
3875     qla_edb_stop(base_vha);
3876 
3877     vfree(base_vha->scan.l);
3878 
3879     if (IS_QLAFX00(ha))
3880         qlafx00_driver_shutdown(base_vha, 20);
3881 
3882     qla2x00_delete_all_vps(ha, base_vha);
3883 
3884     qla2x00_dfs_remove(base_vha);
3885 
3886     qla84xx_put_chip(base_vha);
3887 
3888     /* Disable timer */
3889     if (base_vha->timer_active)
3890         qla2x00_stop_timer(base_vha);
3891 
3892     base_vha->flags.online = 0;
3893 
3894     /* free DMA memory */
3895     if (ha->exlogin_buf)
3896         qla2x00_free_exlogin_buffer(ha);
3897 
3898     /* free DMA memory */
3899     if (ha->exchoffld_buf)
3900         qla2x00_free_exchoffld_buffer(ha);
3901 
3902     qla2x00_destroy_deferred_work(ha);
3903 
3904     qlt_remove_target(ha, base_vha);
3905 
3906     qla2x00_free_sysfs_attr(base_vha, true);
3907 
3908     fc_remove_host(base_vha->host);
3909 
3910     scsi_remove_host(base_vha->host);
3911 
3912     qla2x00_free_device(base_vha);
3913 
3914     qla2x00_clear_drv_active(ha);
3915 
3916     scsi_host_put(base_vha->host);
3917 
3918     qla2x00_unmap_iobases(ha);
3919 
3920     pci_release_selected_regions(ha->pdev, ha->bars);
3921     kfree(ha);
3922 
3923     pci_disable_pcie_error_reporting(pdev);
3924 
3925     pci_disable_device(pdev);
3926 }
3927 
3928 static inline void
3929 qla24xx_free_purex_list(struct purex_list *list)
3930 {
3931     struct purex_item *item, *next;
3932     ulong flags;
3933 
3934     spin_lock_irqsave(&list->lock, flags);
3935     list_for_each_entry_safe(item, next, &list->head, list) {
3936         list_del(&item->list);
3937         if (item == &item->vha->default_item)
3938             continue;
3939         kfree(item);
3940     }
3941     spin_unlock_irqrestore(&list->lock, flags);
3942 }
3943 
3944 static void
3945 qla2x00_free_device(scsi_qla_host_t *vha)
3946 {
3947     struct qla_hw_data *ha = vha->hw;
3948 
3949     qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
3950 
3951     /* Disable timer */
3952     if (vha->timer_active)
3953         qla2x00_stop_timer(vha);
3954 
3955     qla25xx_delete_queues(vha);
3956     vha->flags.online = 0;
3957 
3958     /* turn-off interrupts on the card */
3959     if (ha->interrupts_on) {
3960         vha->flags.init_done = 0;
3961         ha->isp_ops->disable_intrs(ha);
3962     }
3963 
3964     qla2x00_free_fcports(vha);
3965 
3966     qla2x00_free_irqs(vha);
3967 
3968     /* Flush the work queue and remove it */
3969     if (ha->wq) {
3970         destroy_workqueue(ha->wq);
3971         ha->wq = NULL;
3972     }
3973 
3974 
3975     qla24xx_free_purex_list(&vha->purex_list);
3976 
3977     qla2x00_mem_free(ha);
3978 
3979     qla82xx_md_free(vha);
3980 
3981     qla_edif_sadb_release_free_pool(ha);
3982     qla_edif_sadb_release(ha);
3983 
3984     qla2x00_free_queues(ha);
3985 }
3986 
3987 void qla2x00_free_fcports(struct scsi_qla_host *vha)
3988 {
3989     fc_port_t *fcport, *tfcport;
3990 
3991     list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list)
3992         qla2x00_free_fcport(fcport);
3993 }
3994 
3995 static inline void
3996 qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport)
3997 {
3998     int now;
3999 
4000     if (!fcport->rport)
4001         return;
4002 
4003     if (fcport->rport) {
4004         ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
4005             "%s %8phN. rport %p roles %x\n",
4006             __func__, fcport->port_name, fcport->rport,
4007             fcport->rport->roles);
4008         fc_remote_port_delete(fcport->rport);
4009     }
4010     qlt_do_generation_tick(vha, &now);
4011 }
4012 
4013 /*
4014  * qla2x00_mark_device_lost Updates fcport state when device goes offline.
4015  *
4016  * Input: ha = adapter block pointer.  fcport = port structure pointer.
4017  *
4018  * Return: None.
4019  *
4020  * Context:
4021  */
4022 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
4023     int do_login)
4024 {
4025     if (IS_QLAFX00(vha->hw)) {
4026         qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
4027         qla2x00_schedule_rport_del(vha, fcport);
4028         return;
4029     }
4030 
4031     if (atomic_read(&fcport->state) == FCS_ONLINE &&
4032         vha->vp_idx == fcport->vha->vp_idx) {
4033         qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
4034         qla2x00_schedule_rport_del(vha, fcport);
4035     }
4036 
4037     /*
4038      * We may need to retry the login, so don't change the state of the
4039      * port but do the retries.
4040      */
4041     if (atomic_read(&fcport->state) != FCS_DEVICE_DEAD)
4042         qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
4043 
4044     if (!do_login)
4045         return;
4046 
4047     set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
4048 }
4049 
4050 void
4051 qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha)
4052 {
4053     fc_port_t *fcport;
4054 
4055     ql_dbg(ql_dbg_disc, vha, 0x20f1,
4056         "Mark all dev lost\n");
4057 
4058     list_for_each_entry(fcport, &vha->vp_fcports, list) {
4059         if (fcport->loop_id != FC_NO_LOOP_ID &&
4060             (fcport->flags & FCF_FCP2_DEVICE) &&
4061             fcport->port_type == FCT_TARGET &&
4062             !qla2x00_reset_active(vha)) {
4063             ql_dbg(ql_dbg_disc, vha, 0x211a,
4064                    "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC",
4065                    fcport->flags, fcport->port_type,
4066                    fcport->d_id.b24, fcport->port_name);
4067             continue;
4068         }
4069         fcport->scan_state = 0;
4070         qlt_schedule_sess_for_deletion(fcport);
4071     }
4072 }
4073 
4074 static void qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
4075 {
4076     int i;
4077 
4078     if (IS_FWI2_CAPABLE(ha))
4079         return;
4080 
4081     for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
4082         set_bit(i, ha->loop_id_map);
4083     set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
4084     set_bit(BROADCAST, ha->loop_id_map);
4085 }
4086 
4087 /*
4088 * qla2x00_mem_alloc
4089 *      Allocates adapter memory.
4090 *
4091 * Returns:
4092 *      0  = success.
4093 *      !0  = failure.
4094 */
4095 static int
4096 qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
4097     struct req_que **req, struct rsp_que **rsp)
4098 {
4099     char    name[16];
4100     int rc;
4101 
4102     ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
4103         &ha->init_cb_dma, GFP_KERNEL);
4104     if (!ha->init_cb)
4105         goto fail;
4106 
4107     rc = btree_init32(&ha->host_map);
4108     if (rc)
4109         goto fail_free_init_cb;
4110 
4111     if (qlt_mem_alloc(ha) < 0)
4112         goto fail_free_btree;
4113 
4114     ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
4115         qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
4116     if (!ha->gid_list)
4117         goto fail_free_tgt_mem;
4118 
4119     ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
4120     if (!ha->srb_mempool)
4121         goto fail_free_gid_list;
4122 
4123     if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) {
4124         /* Allocate cache for CT6 Ctx. */
4125         if (!ctx_cachep) {
4126             ctx_cachep = kmem_cache_create("qla2xxx_ctx",
4127                 sizeof(struct ct6_dsd), 0,
4128                 SLAB_HWCACHE_ALIGN, NULL);
4129             if (!ctx_cachep)
4130                 goto fail_free_srb_mempool;
4131         }
4132         ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
4133             ctx_cachep);
4134         if (!ha->ctx_mempool)
4135             goto fail_free_srb_mempool;
4136         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021,
4137             "ctx_cachep=%p ctx_mempool=%p.\n",
4138             ctx_cachep, ha->ctx_mempool);
4139     }
4140 
4141     /* Get memory for cached NVRAM */
4142     ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
4143     if (!ha->nvram)
4144         goto fail_free_ctx_mempool;
4145 
4146     snprintf(name, sizeof(name), "%s_%d", QLA2XXX_DRIVER_NAME,
4147         ha->pdev->device);
4148     ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
4149         DMA_POOL_SIZE, 8, 0);
4150     if (!ha->s_dma_pool)
4151         goto fail_free_nvram;
4152 
4153     ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022,
4154         "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
4155         ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
4156 
4157     if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) {
4158         ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
4159             DSD_LIST_DMA_POOL_SIZE, 8, 0);
4160         if (!ha->dl_dma_pool) {
4161             ql_log_pci(ql_log_fatal, ha->pdev, 0x0023,
4162                 "Failed to allocate memory for dl_dma_pool.\n");
4163             goto fail_s_dma_pool;
4164         }
4165 
4166         ha->fcp_cmnd_dma_pool = dma_pool_create(name, &ha->pdev->dev,
4167             FCP_CMND_DMA_POOL_SIZE, 8, 0);
4168         if (!ha->fcp_cmnd_dma_pool) {
4169             ql_log_pci(ql_log_fatal, ha->pdev, 0x0024,
4170                 "Failed to allocate memory for fcp_cmnd_dma_pool.\n");
4171             goto fail_dl_dma_pool;
4172         }
4173 
4174         if (ql2xenabledif) {
4175             u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE;
4176             struct dsd_dma *dsd, *nxt;
4177             uint i;
4178             /* Creata a DMA pool of buffers for DIF bundling */
4179             ha->dif_bundl_pool = dma_pool_create(name,
4180                 &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0);
4181             if (!ha->dif_bundl_pool) {
4182                 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
4183                     "%s: failed create dif_bundl_pool\n",
4184                     __func__);
4185                 goto fail_dif_bundl_dma_pool;
4186             }
4187 
4188             INIT_LIST_HEAD(&ha->pool.good.head);
4189             INIT_LIST_HEAD(&ha->pool.unusable.head);
4190             ha->pool.good.count = 0;
4191             ha->pool.unusable.count = 0;
4192             for (i = 0; i < 128; i++) {
4193                 dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC);
4194                 if (!dsd) {
4195                     ql_dbg_pci(ql_dbg_init, ha->pdev,
4196                         0xe0ee, "%s: failed alloc dsd\n",
4197                         __func__);
4198                     return -ENOMEM;
4199                 }
4200                 ha->dif_bundle_kallocs++;
4201 
4202                 dsd->dsd_addr = dma_pool_alloc(
4203                     ha->dif_bundl_pool, GFP_ATOMIC,
4204                     &dsd->dsd_list_dma);
4205                 if (!dsd->dsd_addr) {
4206                     ql_dbg_pci(ql_dbg_init, ha->pdev,
4207                         0xe0ee,
4208                         "%s: failed alloc ->dsd_addr\n",
4209                         __func__);
4210                     kfree(dsd);
4211                     ha->dif_bundle_kallocs--;
4212                     continue;
4213                 }
4214                 ha->dif_bundle_dma_allocs++;
4215 
4216                 /*
4217                  * if DMA buffer crosses 4G boundary,
4218                  * put it on bad list
4219                  */
4220                 if (MSD(dsd->dsd_list_dma) ^
4221                     MSD(dsd->dsd_list_dma + bufsize)) {
4222                     list_add_tail(&dsd->list,
4223                         &ha->pool.unusable.head);
4224                     ha->pool.unusable.count++;
4225                 } else {
4226                     list_add_tail(&dsd->list,
4227                         &ha->pool.good.head);
4228                     ha->pool.good.count++;
4229                 }
4230             }
4231 
4232             /* return the good ones back to the pool */
4233             list_for_each_entry_safe(dsd, nxt,
4234                 &ha->pool.good.head, list) {
4235                 list_del(&dsd->list);
4236                 dma_pool_free(ha->dif_bundl_pool,
4237                     dsd->dsd_addr, dsd->dsd_list_dma);
4238                 ha->dif_bundle_dma_allocs--;
4239                 kfree(dsd);
4240                 ha->dif_bundle_kallocs--;
4241             }
4242 
4243             ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024,
4244                 "%s: dif dma pool (good=%u unusable=%u)\n",
4245                 __func__, ha->pool.good.count,
4246                 ha->pool.unusable.count);
4247         }
4248 
4249         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025,
4250             "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n",
4251             ha->dl_dma_pool, ha->fcp_cmnd_dma_pool,
4252             ha->dif_bundl_pool);
4253     }
4254 
4255     /* Allocate memory for SNS commands */
4256     if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4257     /* Get consistent memory allocated for SNS commands */
4258         ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
4259         sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
4260         if (!ha->sns_cmd)
4261             goto fail_dma_pool;
4262         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
4263             "sns_cmd: %p.\n", ha->sns_cmd);
4264     } else {
4265     /* Get consistent memory allocated for MS IOCB */
4266         ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
4267             &ha->ms_iocb_dma);
4268         if (!ha->ms_iocb)
4269             goto fail_dma_pool;
4270     /* Get consistent memory allocated for CT SNS commands */
4271         ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
4272             sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
4273         if (!ha->ct_sns)
4274             goto fail_free_ms_iocb;
4275         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027,
4276             "ms_iocb=%p ct_sns=%p.\n",
4277             ha->ms_iocb, ha->ct_sns);
4278     }
4279 
4280     /* Allocate memory for request ring */
4281     *req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
4282     if (!*req) {
4283         ql_log_pci(ql_log_fatal, ha->pdev, 0x0028,
4284             "Failed to allocate memory for req.\n");
4285         goto fail_req;
4286     }
4287     (*req)->length = req_len;
4288     (*req)->ring = dma_alloc_coherent(&ha->pdev->dev,
4289         ((*req)->length + 1) * sizeof(request_t),
4290         &(*req)->dma, GFP_KERNEL);
4291     if (!(*req)->ring) {
4292         ql_log_pci(ql_log_fatal, ha->pdev, 0x0029,
4293             "Failed to allocate memory for req_ring.\n");
4294         goto fail_req_ring;
4295     }
4296     /* Allocate memory for response ring */
4297     *rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
4298     if (!*rsp) {
4299         ql_log_pci(ql_log_fatal, ha->pdev, 0x002a,
4300             "Failed to allocate memory for rsp.\n");
4301         goto fail_rsp;
4302     }
4303     (*rsp)->hw = ha;
4304     (*rsp)->length = rsp_len;
4305     (*rsp)->ring = dma_alloc_coherent(&ha->pdev->dev,
4306         ((*rsp)->length + 1) * sizeof(response_t),
4307         &(*rsp)->dma, GFP_KERNEL);
4308     if (!(*rsp)->ring) {
4309         ql_log_pci(ql_log_fatal, ha->pdev, 0x002b,
4310             "Failed to allocate memory for rsp_ring.\n");
4311         goto fail_rsp_ring;
4312     }
4313     (*req)->rsp = *rsp;
4314     (*rsp)->req = *req;
4315     ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c,
4316         "req=%p req->length=%d req->ring=%p rsp=%p "
4317         "rsp->length=%d rsp->ring=%p.\n",
4318         *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length,
4319         (*rsp)->ring);
4320     /* Allocate memory for NVRAM data for vports */
4321     if (ha->nvram_npiv_size) {
4322         ha->npiv_info = kcalloc(ha->nvram_npiv_size,
4323                     sizeof(struct qla_npiv_entry),
4324                     GFP_KERNEL);
4325         if (!ha->npiv_info) {
4326             ql_log_pci(ql_log_fatal, ha->pdev, 0x002d,
4327                 "Failed to allocate memory for npiv_info.\n");
4328             goto fail_npiv_info;
4329         }
4330     } else
4331         ha->npiv_info = NULL;
4332 
4333     /* Get consistent memory allocated for EX-INIT-CB. */
4334     if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
4335         IS_QLA28XX(ha)) {
4336         ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
4337             &ha->ex_init_cb_dma);
4338         if (!ha->ex_init_cb)
4339             goto fail_ex_init_cb;
4340         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e,
4341             "ex_init_cb=%p.\n", ha->ex_init_cb);
4342     }
4343 
4344     /* Get consistent memory allocated for Special Features-CB. */
4345     if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4346         ha->sf_init_cb = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL,
4347                         &ha->sf_init_cb_dma);
4348         if (!ha->sf_init_cb)
4349             goto fail_sf_init_cb;
4350         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199,
4351                "sf_init_cb=%p.\n", ha->sf_init_cb);
4352     }
4353 
4354     INIT_LIST_HEAD(&ha->gbl_dsd_list);
4355 
4356     /* Get consistent memory allocated for Async Port-Database. */
4357     if (!IS_FWI2_CAPABLE(ha)) {
4358         ha->async_pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
4359             &ha->async_pd_dma);
4360         if (!ha->async_pd)
4361             goto fail_async_pd;
4362         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f,
4363             "async_pd=%p.\n", ha->async_pd);
4364     }
4365 
4366     INIT_LIST_HEAD(&ha->vp_list);
4367 
4368     /* Allocate memory for our loop_id bitmap */
4369     ha->loop_id_map = kcalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE),
4370                   sizeof(long),
4371                   GFP_KERNEL);
4372     if (!ha->loop_id_map)
4373         goto fail_loop_id_map;
4374     else {
4375         qla2x00_set_reserved_loop_ids(ha);
4376         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
4377             "loop_id_map=%p.\n", ha->loop_id_map);
4378     }
4379 
4380     ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev,
4381         SFP_DEV_SIZE, &ha->sfp_data_dma, GFP_KERNEL);
4382     if (!ha->sfp_data) {
4383         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
4384             "Unable to allocate memory for SFP read-data.\n");
4385         goto fail_sfp_data;
4386     }
4387 
4388     ha->flt = dma_alloc_coherent(&ha->pdev->dev,
4389         sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma,
4390         GFP_KERNEL);
4391     if (!ha->flt) {
4392         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
4393             "Unable to allocate memory for FLT.\n");
4394         goto fail_flt_buffer;
4395     }
4396 
4397     /* allocate the purex dma pool */
4398     ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev,
4399         ELS_MAX_PAYLOAD, 8, 0);
4400 
4401     if (!ha->purex_dma_pool) {
4402         ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
4403             "Unable to allocate purex_dma_pool.\n");
4404         goto fail_flt;
4405     }
4406 
4407     ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16;
4408     ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev,
4409         ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL);
4410 
4411     if (!ha->elsrej.c) {
4412         ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
4413             "Alloc failed for els reject cmd.\n");
4414         goto fail_elsrej;
4415     }
4416     ha->elsrej.c->er_cmd = ELS_LS_RJT;
4417     ha->elsrej.c->er_reason = ELS_RJT_LOGIC;
4418     ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA;
4419     return 0;
4420 
4421 fail_elsrej:
4422     dma_pool_destroy(ha->purex_dma_pool);
4423 fail_flt:
4424     dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
4425         ha->flt, ha->flt_dma);
4426 
4427 fail_flt_buffer:
4428     dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE,
4429         ha->sfp_data, ha->sfp_data_dma);
4430 fail_sfp_data:
4431     kfree(ha->loop_id_map);
4432 fail_loop_id_map:
4433     dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
4434 fail_async_pd:
4435     dma_pool_free(ha->s_dma_pool, ha->sf_init_cb, ha->sf_init_cb_dma);
4436 fail_sf_init_cb:
4437     dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
4438 fail_ex_init_cb:
4439     kfree(ha->npiv_info);
4440 fail_npiv_info:
4441     dma_free_coherent(&ha->pdev->dev, ((*rsp)->length + 1) *
4442         sizeof(response_t), (*rsp)->ring, (*rsp)->dma);
4443     (*rsp)->ring = NULL;
4444     (*rsp)->dma = 0;
4445 fail_rsp_ring:
4446     kfree(*rsp);
4447     *rsp = NULL;
4448 fail_rsp:
4449     dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
4450         sizeof(request_t), (*req)->ring, (*req)->dma);
4451     (*req)->ring = NULL;
4452     (*req)->dma = 0;
4453 fail_req_ring:
4454     kfree(*req);
4455     *req = NULL;
4456 fail_req:
4457     dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
4458         ha->ct_sns, ha->ct_sns_dma);
4459     ha->ct_sns = NULL;
4460     ha->ct_sns_dma = 0;
4461 fail_free_ms_iocb:
4462     dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
4463     ha->ms_iocb = NULL;
4464     ha->ms_iocb_dma = 0;
4465 
4466     if (ha->sns_cmd)
4467         dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
4468             ha->sns_cmd, ha->sns_cmd_dma);
4469 fail_dma_pool:
4470     if (ql2xenabledif) {
4471         struct dsd_dma *dsd, *nxt;
4472 
4473         list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
4474             list) {
4475             list_del(&dsd->list);
4476             dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
4477                 dsd->dsd_list_dma);
4478             ha->dif_bundle_dma_allocs--;
4479             kfree(dsd);
4480             ha->dif_bundle_kallocs--;
4481             ha->pool.unusable.count--;
4482         }
4483         dma_pool_destroy(ha->dif_bundl_pool);
4484         ha->dif_bundl_pool = NULL;
4485     }
4486 
4487 fail_dif_bundl_dma_pool:
4488     if (IS_QLA82XX(ha) || ql2xenabledif) {
4489         dma_pool_destroy(ha->fcp_cmnd_dma_pool);
4490         ha->fcp_cmnd_dma_pool = NULL;
4491     }
4492 fail_dl_dma_pool:
4493     if (IS_QLA82XX(ha) || ql2xenabledif) {
4494         dma_pool_destroy(ha->dl_dma_pool);
4495         ha->dl_dma_pool = NULL;
4496     }
4497 fail_s_dma_pool:
4498     dma_pool_destroy(ha->s_dma_pool);
4499     ha->s_dma_pool = NULL;
4500 fail_free_nvram:
4501     kfree(ha->nvram);
4502     ha->nvram = NULL;
4503 fail_free_ctx_mempool:
4504     mempool_destroy(ha->ctx_mempool);
4505     ha->ctx_mempool = NULL;
4506 fail_free_srb_mempool:
4507     mempool_destroy(ha->srb_mempool);
4508     ha->srb_mempool = NULL;
4509 fail_free_gid_list:
4510     dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
4511     ha->gid_list,
4512     ha->gid_list_dma);
4513     ha->gid_list = NULL;
4514     ha->gid_list_dma = 0;
4515 fail_free_tgt_mem:
4516     qlt_mem_free(ha);
4517 fail_free_btree:
4518     btree_destroy32(&ha->host_map);
4519 fail_free_init_cb:
4520     dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
4521     ha->init_cb_dma);
4522     ha->init_cb = NULL;
4523     ha->init_cb_dma = 0;
4524 fail:
4525     ql_log(ql_log_fatal, NULL, 0x0030,
4526         "Memory allocation failure.\n");
4527     return -ENOMEM;
4528 }
4529 
4530 int
4531 qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
4532 {
4533     int rval;
4534     uint16_t    size, max_cnt;
4535     uint32_t temp;
4536     struct qla_hw_data *ha = vha->hw;
4537 
4538     /* Return if we don't need to alloacate any extended logins */
4539     if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400)
4540         return QLA_SUCCESS;
4541 
4542     if (!IS_EXLOGIN_OFFLD_CAPABLE(ha))
4543         return QLA_SUCCESS;
4544 
4545     ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins);
4546     max_cnt = 0;
4547     rval = qla_get_exlogin_status(vha, &size, &max_cnt);
4548     if (rval != QLA_SUCCESS) {
4549         ql_log_pci(ql_log_fatal, ha->pdev, 0xd029,
4550             "Failed to get exlogin status.\n");
4551         return rval;
4552     }
4553 
4554     temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins;
4555     temp *= size;
4556 
4557     if (temp != ha->exlogin_size) {
4558         qla2x00_free_exlogin_buffer(ha);
4559         ha->exlogin_size = temp;
4560 
4561         ql_log(ql_log_info, vha, 0xd024,
4562             "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n",
4563             max_cnt, size, temp);
4564 
4565         ql_log(ql_log_info, vha, 0xd025,
4566             "EXLOGIN: requested size=0x%x\n", ha->exlogin_size);
4567 
4568         /* Get consistent memory for extended logins */
4569         ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev,
4570             ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL);
4571         if (!ha->exlogin_buf) {
4572             ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a,
4573             "Failed to allocate memory for exlogin_buf_dma.\n");
4574             return -ENOMEM;
4575         }
4576     }
4577 
4578     /* Now configure the dma buffer */
4579     rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma);
4580     if (rval) {
4581         ql_log(ql_log_fatal, vha, 0xd033,
4582             "Setup extended login buffer  ****FAILED****.\n");
4583         qla2x00_free_exlogin_buffer(ha);
4584     }
4585 
4586     return rval;
4587 }
4588 
4589 /*
4590 * qla2x00_free_exlogin_buffer
4591 *
4592 * Input:
4593 *   ha = adapter block pointer
4594 */
4595 void
4596 qla2x00_free_exlogin_buffer(struct qla_hw_data *ha)
4597 {
4598     if (ha->exlogin_buf) {
4599         dma_free_coherent(&ha->pdev->dev, ha->exlogin_size,
4600             ha->exlogin_buf, ha->exlogin_buf_dma);
4601         ha->exlogin_buf = NULL;
4602         ha->exlogin_size = 0;
4603     }
4604 }
4605 
4606 static void
4607 qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
4608 {
4609     u32 temp;
4610     struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
4611     *ret_cnt = FW_DEF_EXCHANGES_CNT;
4612 
4613     if (max_cnt > vha->hw->max_exchg)
4614         max_cnt = vha->hw->max_exchg;
4615 
4616     if (qla_ini_mode_enabled(vha)) {
4617         if (vha->ql2xiniexchg > max_cnt)
4618             vha->ql2xiniexchg = max_cnt;
4619 
4620         if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
4621             *ret_cnt = vha->ql2xiniexchg;
4622 
4623     } else if (qla_tgt_mode_enabled(vha)) {
4624         if (vha->ql2xexchoffld > max_cnt) {
4625             vha->ql2xexchoffld = max_cnt;
4626             icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
4627         }
4628 
4629         if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
4630             *ret_cnt = vha->ql2xexchoffld;
4631     } else if (qla_dual_mode_enabled(vha)) {
4632         temp = vha->ql2xiniexchg + vha->ql2xexchoffld;
4633         if (temp > max_cnt) {
4634             vha->ql2xiniexchg -= (temp - max_cnt)/2;
4635             vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
4636             temp = max_cnt;
4637             icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
4638         }
4639 
4640         if (temp > FW_DEF_EXCHANGES_CNT)
4641             *ret_cnt = temp;
4642     }
4643 }
4644 
4645 int
4646 qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
4647 {
4648     int rval;
4649     u16 size, max_cnt;
4650     u32 actual_cnt, totsz;
4651     struct qla_hw_data *ha = vha->hw;
4652 
4653     if (!ha->flags.exchoffld_enabled)
4654         return QLA_SUCCESS;
4655 
4656     if (!IS_EXCHG_OFFLD_CAPABLE(ha))
4657         return QLA_SUCCESS;
4658 
4659     max_cnt = 0;
4660     rval = qla_get_exchoffld_status(vha, &size, &max_cnt);
4661     if (rval != QLA_SUCCESS) {
4662         ql_log_pci(ql_log_fatal, ha->pdev, 0xd012,
4663             "Failed to get exlogin status.\n");
4664         return rval;
4665     }
4666 
4667     qla2x00_number_of_exch(vha, &actual_cnt, max_cnt);
4668     ql_log(ql_log_info, vha, 0xd014,
4669         "Actual exchange offload count: %d.\n", actual_cnt);
4670 
4671     totsz = actual_cnt * size;
4672 
4673     if (totsz != ha->exchoffld_size) {
4674         qla2x00_free_exchoffld_buffer(ha);
4675         if (actual_cnt <= FW_DEF_EXCHANGES_CNT) {
4676             ha->exchoffld_size = 0;
4677             ha->flags.exchoffld_enabled = 0;
4678             return QLA_SUCCESS;
4679         }
4680 
4681         ha->exchoffld_size = totsz;
4682 
4683         ql_log(ql_log_info, vha, 0xd016,
4684             "Exchange offload: max_count=%d, actual count=%d entry sz=0x%x, total sz=0x%x\n",
4685             max_cnt, actual_cnt, size, totsz);
4686 
4687         ql_log(ql_log_info, vha, 0xd017,
4688             "Exchange Buffers requested size = 0x%x\n",
4689             ha->exchoffld_size);
4690 
4691         /* Get consistent memory for extended logins */
4692         ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev,
4693             ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
4694         if (!ha->exchoffld_buf) {
4695             ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
4696             "Failed to allocate memory for Exchange Offload.\n");
4697 
4698             if (ha->max_exchg >
4699                 (FW_DEF_EXCHANGES_CNT + REDUCE_EXCHANGES_CNT)) {
4700                 ha->max_exchg -= REDUCE_EXCHANGES_CNT;
4701             } else if (ha->max_exchg >
4702                 (FW_DEF_EXCHANGES_CNT + 512)) {
4703                 ha->max_exchg -= 512;
4704             } else {
4705                 ha->flags.exchoffld_enabled = 0;
4706                 ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
4707                     "Disabling Exchange offload due to lack of memory\n");
4708             }
4709             ha->exchoffld_size = 0;
4710 
4711             return -ENOMEM;
4712         }
4713     } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) {
4714         /* pathological case */
4715         qla2x00_free_exchoffld_buffer(ha);
4716         ha->exchoffld_size = 0;
4717         ha->flags.exchoffld_enabled = 0;
4718         ql_log(ql_log_info, vha, 0xd016,
4719             "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n",
4720             ha->exchoffld_size, actual_cnt, size, totsz);
4721         return 0;
4722     }
4723 
4724     /* Now configure the dma buffer */
4725     rval = qla_set_exchoffld_mem_cfg(vha);
4726     if (rval) {
4727         ql_log(ql_log_fatal, vha, 0xd02e,
4728             "Setup exchange offload buffer ****FAILED****.\n");
4729         qla2x00_free_exchoffld_buffer(ha);
4730     } else {
4731         /* re-adjust number of target exchange */
4732         struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb;
4733 
4734         if (qla_ini_mode_enabled(vha))
4735             icb->exchange_count = 0;
4736         else
4737             icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
4738     }
4739 
4740     return rval;
4741 }
4742 
4743 /*
4744 * qla2x00_free_exchoffld_buffer
4745 *
4746 * Input:
4747 *   ha = adapter block pointer
4748 */
4749 void
4750 qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha)
4751 {
4752     if (ha->exchoffld_buf) {
4753         dma_free_coherent(&ha->pdev->dev, ha->exchoffld_size,
4754             ha->exchoffld_buf, ha->exchoffld_buf_dma);
4755         ha->exchoffld_buf = NULL;
4756         ha->exchoffld_size = 0;
4757     }
4758 }
4759 
4760 /*
4761 * qla2x00_free_fw_dump
4762 *   Frees fw dump stuff.
4763 *
4764 * Input:
4765 *   ha = adapter block pointer
4766 */
4767 static void
4768 qla2x00_free_fw_dump(struct qla_hw_data *ha)
4769 {
4770     struct fwdt *fwdt = ha->fwdt;
4771     uint j;
4772 
4773     if (ha->fce)
4774         dma_free_coherent(&ha->pdev->dev,
4775             FCE_SIZE, ha->fce, ha->fce_dma);
4776 
4777     if (ha->eft)
4778         dma_free_coherent(&ha->pdev->dev,
4779             EFT_SIZE, ha->eft, ha->eft_dma);
4780 
4781     vfree(ha->fw_dump);
4782 
4783     ha->fce = NULL;
4784     ha->fce_dma = 0;
4785     ha->flags.fce_enabled = 0;
4786     ha->eft = NULL;
4787     ha->eft_dma = 0;
4788     ha->fw_dumped = false;
4789     ha->fw_dump_cap_flags = 0;
4790     ha->fw_dump_reading = 0;
4791     ha->fw_dump = NULL;
4792     ha->fw_dump_len = 0;
4793 
4794     for (j = 0; j < 2; j++, fwdt++) {
4795         vfree(fwdt->template);
4796         fwdt->template = NULL;
4797         fwdt->length = 0;
4798     }
4799 }
4800 
4801 /*
4802 * qla2x00_mem_free
4803 *      Frees all adapter allocated memory.
4804 *
4805 * Input:
4806 *      ha = adapter block pointer.
4807 */
4808 static void
4809 qla2x00_mem_free(struct qla_hw_data *ha)
4810 {
4811     qla2x00_free_fw_dump(ha);
4812 
4813     if (ha->mctp_dump)
4814         dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
4815             ha->mctp_dump_dma);
4816     ha->mctp_dump = NULL;
4817 
4818     mempool_destroy(ha->srb_mempool);
4819     ha->srb_mempool = NULL;
4820 
4821     if (ha->dcbx_tlv)
4822         dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
4823             ha->dcbx_tlv, ha->dcbx_tlv_dma);
4824     ha->dcbx_tlv = NULL;
4825 
4826     if (ha->xgmac_data)
4827         dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
4828             ha->xgmac_data, ha->xgmac_data_dma);
4829     ha->xgmac_data = NULL;
4830 
4831     if (ha->sns_cmd)
4832         dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
4833         ha->sns_cmd, ha->sns_cmd_dma);
4834     ha->sns_cmd = NULL;
4835     ha->sns_cmd_dma = 0;
4836 
4837     if (ha->ct_sns)
4838         dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
4839         ha->ct_sns, ha->ct_sns_dma);
4840     ha->ct_sns = NULL;
4841     ha->ct_sns_dma = 0;
4842 
4843     if (ha->sfp_data)
4844         dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data,
4845             ha->sfp_data_dma);
4846     ha->sfp_data = NULL;
4847 
4848     if (ha->flt)
4849         dma_free_coherent(&ha->pdev->dev,
4850             sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE,
4851             ha->flt, ha->flt_dma);
4852     ha->flt = NULL;
4853     ha->flt_dma = 0;
4854 
4855     if (ha->ms_iocb)
4856         dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
4857     ha->ms_iocb = NULL;
4858     ha->ms_iocb_dma = 0;
4859 
4860     if (ha->sf_init_cb)
4861         dma_pool_free(ha->s_dma_pool,
4862                   ha->sf_init_cb, ha->sf_init_cb_dma);
4863 
4864     if (ha->ex_init_cb)
4865         dma_pool_free(ha->s_dma_pool,
4866             ha->ex_init_cb, ha->ex_init_cb_dma);
4867     ha->ex_init_cb = NULL;
4868     ha->ex_init_cb_dma = 0;
4869 
4870     if (ha->async_pd)
4871         dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
4872     ha->async_pd = NULL;
4873     ha->async_pd_dma = 0;
4874 
4875     dma_pool_destroy(ha->s_dma_pool);
4876     ha->s_dma_pool = NULL;
4877 
4878     if (ha->gid_list)
4879         dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
4880         ha->gid_list, ha->gid_list_dma);
4881     ha->gid_list = NULL;
4882     ha->gid_list_dma = 0;
4883 
4884     if (IS_QLA82XX(ha)) {
4885         if (!list_empty(&ha->gbl_dsd_list)) {
4886             struct dsd_dma *dsd_ptr, *tdsd_ptr;
4887 
4888             /* clean up allocated prev pool */
4889             list_for_each_entry_safe(dsd_ptr,
4890                 tdsd_ptr, &ha->gbl_dsd_list, list) {
4891                 dma_pool_free(ha->dl_dma_pool,
4892                 dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
4893                 list_del(&dsd_ptr->list);
4894                 kfree(dsd_ptr);
4895             }
4896         }
4897     }
4898 
4899     dma_pool_destroy(ha->dl_dma_pool);
4900     ha->dl_dma_pool = NULL;
4901 
4902     dma_pool_destroy(ha->fcp_cmnd_dma_pool);
4903     ha->fcp_cmnd_dma_pool = NULL;
4904 
4905     mempool_destroy(ha->ctx_mempool);
4906     ha->ctx_mempool = NULL;
4907 
4908     if (ql2xenabledif && ha->dif_bundl_pool) {
4909         struct dsd_dma *dsd, *nxt;
4910 
4911         list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
4912                      list) {
4913             list_del(&dsd->list);
4914             dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
4915                       dsd->dsd_list_dma);
4916             ha->dif_bundle_dma_allocs--;
4917             kfree(dsd);
4918             ha->dif_bundle_kallocs--;
4919             ha->pool.unusable.count--;
4920         }
4921         list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) {
4922             list_del(&dsd->list);
4923             dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr,
4924                       dsd->dsd_list_dma);
4925             ha->dif_bundle_dma_allocs--;
4926             kfree(dsd);
4927             ha->dif_bundle_kallocs--;
4928         }
4929     }
4930 
4931     dma_pool_destroy(ha->dif_bundl_pool);
4932     ha->dif_bundl_pool = NULL;
4933 
4934     qlt_mem_free(ha);
4935     qla_remove_hostmap(ha);
4936 
4937     if (ha->init_cb)
4938         dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
4939             ha->init_cb, ha->init_cb_dma);
4940 
4941     dma_pool_destroy(ha->purex_dma_pool);
4942     ha->purex_dma_pool = NULL;
4943 
4944     if (ha->elsrej.c) {
4945         dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
4946             ha->elsrej.c, ha->elsrej.cdma);
4947         ha->elsrej.c = NULL;
4948     }
4949 
4950     ha->init_cb = NULL;
4951     ha->init_cb_dma = 0;
4952 
4953     vfree(ha->optrom_buffer);
4954     ha->optrom_buffer = NULL;
4955     kfree(ha->nvram);
4956     ha->nvram = NULL;
4957     kfree(ha->npiv_info);
4958     ha->npiv_info = NULL;
4959     kfree(ha->swl);
4960     ha->swl = NULL;
4961     kfree(ha->loop_id_map);
4962     ha->sf_init_cb = NULL;
4963     ha->sf_init_cb_dma = 0;
4964     ha->loop_id_map = NULL;
4965 }
4966 
4967 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4968                         struct qla_hw_data *ha)
4969 {
4970     struct Scsi_Host *host;
4971     struct scsi_qla_host *vha = NULL;
4972 
4973     host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
4974     if (!host) {
4975         ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
4976             "Failed to allocate host from the scsi layer, aborting.\n");
4977         return NULL;
4978     }
4979 
4980     /* Clear our data area */
4981     vha = shost_priv(host);
4982     memset(vha, 0, sizeof(scsi_qla_host_t));
4983 
4984     vha->host = host;
4985     vha->host_no = host->host_no;
4986     vha->hw = ha;
4987 
4988     vha->qlini_mode = ql2x_ini_mode;
4989     vha->ql2xexchoffld = ql2xexchoffld;
4990     vha->ql2xiniexchg = ql2xiniexchg;
4991 
4992     INIT_LIST_HEAD(&vha->vp_fcports);
4993     INIT_LIST_HEAD(&vha->work_list);
4994     INIT_LIST_HEAD(&vha->list);
4995     INIT_LIST_HEAD(&vha->qla_cmd_list);
4996     INIT_LIST_HEAD(&vha->logo_list);
4997     INIT_LIST_HEAD(&vha->plogi_ack_list);
4998     INIT_LIST_HEAD(&vha->qp_list);
4999     INIT_LIST_HEAD(&vha->gnl.fcports);
5000     INIT_LIST_HEAD(&vha->gpnid_list);
5001     INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
5002 
5003     INIT_LIST_HEAD(&vha->purex_list.head);
5004     spin_lock_init(&vha->purex_list.lock);
5005 
5006     spin_lock_init(&vha->work_lock);
5007     spin_lock_init(&vha->cmd_list_lock);
5008     init_waitqueue_head(&vha->fcport_waitQ);
5009     init_waitqueue_head(&vha->vref_waitq);
5010     qla_enode_init(vha);
5011     qla_edb_init(vha);
5012 
5013 
5014     vha->gnl.size = sizeof(struct get_name_list_extended) *
5015             (ha->max_loop_id + 1);
5016     vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev,
5017         vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL);
5018     if (!vha->gnl.l) {
5019         ql_log(ql_log_fatal, vha, 0xd04a,
5020             "Alloc failed for name list.\n");
5021         scsi_host_put(vha->host);
5022         return NULL;
5023     }
5024 
5025     /* todo: what about ext login? */
5026     vha->scan.size = ha->max_fibre_devices * sizeof(struct fab_scan_rp);
5027     vha->scan.l = vmalloc(vha->scan.size);
5028     if (!vha->scan.l) {
5029         ql_log(ql_log_fatal, vha, 0xd04a,
5030             "Alloc failed for scan database.\n");
5031         dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
5032             vha->gnl.l, vha->gnl.ldma);
5033         vha->gnl.l = NULL;
5034         scsi_host_put(vha->host);
5035         return NULL;
5036     }
5037     INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
5038 
5039     sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
5040     ql_dbg(ql_dbg_init, vha, 0x0041,
5041         "Allocated the host=%p hw=%p vha=%p dev_name=%s",
5042         vha->host, vha->hw, vha,
5043         dev_name(&(ha->pdev->dev)));
5044 
5045     return vha;
5046 }
5047 
5048 struct qla_work_evt *
5049 qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
5050 {
5051     struct qla_work_evt *e;
5052     uint8_t bail;
5053 
5054     if (test_bit(UNLOADING, &vha->dpc_flags))
5055         return NULL;
5056 
5057     QLA_VHA_MARK_BUSY(vha, bail);
5058     if (bail)
5059         return NULL;
5060 
5061     e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
5062     if (!e) {
5063         QLA_VHA_MARK_NOT_BUSY(vha);
5064         return NULL;
5065     }
5066 
5067     INIT_LIST_HEAD(&e->list);
5068     e->type = type;
5069     e->flags = QLA_EVT_FLAG_FREE;
5070     return e;
5071 }
5072 
5073 int
5074 qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
5075 {
5076     unsigned long flags;
5077     bool q = false;
5078 
5079     spin_lock_irqsave(&vha->work_lock, flags);
5080     list_add_tail(&e->list, &vha->work_list);
5081 
5082     if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
5083         q = true;
5084 
5085     spin_unlock_irqrestore(&vha->work_lock, flags);
5086 
5087     if (q)
5088         queue_work(vha->hw->wq, &vha->iocb_work);
5089 
5090     return QLA_SUCCESS;
5091 }
5092 
5093 int
5094 qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
5095     u32 data)
5096 {
5097     struct qla_work_evt *e;
5098 
5099     e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
5100     if (!e)
5101         return QLA_FUNCTION_FAILED;
5102 
5103     e->u.aen.code = code;
5104     e->u.aen.data = data;
5105     return qla2x00_post_work(vha, e);
5106 }
5107 
5108 int
5109 qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
5110 {
5111     struct qla_work_evt *e;
5112 
5113     e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
5114     if (!e)
5115         return QLA_FUNCTION_FAILED;
5116 
5117     memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
5118     return qla2x00_post_work(vha, e);
5119 }
5120 
5121 #define qla2x00_post_async_work(name, type) \
5122 int qla2x00_post_async_##name##_work(       \
5123     struct scsi_qla_host *vha,          \
5124     fc_port_t *fcport, uint16_t *data)      \
5125 {                       \
5126     struct qla_work_evt *e;         \
5127                         \
5128     e = qla2x00_alloc_work(vha, type);  \
5129     if (!e)                 \
5130         return QLA_FUNCTION_FAILED; \
5131                         \
5132     e->u.logio.fcport = fcport;     \
5133     if (data) {             \
5134         e->u.logio.data[0] = data[0];   \
5135         e->u.logio.data[1] = data[1];   \
5136     }                   \
5137     fcport->flags |= FCF_ASYNC_ACTIVE;  \
5138     return qla2x00_post_work(vha, e);   \
5139 }
5140 
5141 qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
5142 qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
5143 qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
5144 qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
5145 qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
5146 
5147 int
5148 qla2x00_post_uevent_work(struct scsi_qla_host *vha, u32 code)
5149 {
5150     struct qla_work_evt *e;
5151 
5152     e = qla2x00_alloc_work(vha, QLA_EVT_UEVENT);
5153     if (!e)
5154         return QLA_FUNCTION_FAILED;
5155 
5156     e->u.uevent.code = code;
5157     return qla2x00_post_work(vha, e);
5158 }
5159 
5160 static void
5161 qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
5162 {
5163     char event_string[40];
5164     char *envp[] = { event_string, NULL };
5165 
5166     switch (code) {
5167     case QLA_UEVENT_CODE_FW_DUMP:
5168         snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
5169             vha->host_no);
5170         break;
5171     default:
5172         /* do nothing */
5173         break;
5174     }
5175     kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
5176 }
5177 
5178 int
5179 qlafx00_post_aenfx_work(struct scsi_qla_host *vha,  uint32_t evtcode,
5180             uint32_t *data, int cnt)
5181 {
5182     struct qla_work_evt *e;
5183 
5184     e = qla2x00_alloc_work(vha, QLA_EVT_AENFX);
5185     if (!e)
5186         return QLA_FUNCTION_FAILED;
5187 
5188     e->u.aenfx.evtcode = evtcode;
5189     e->u.aenfx.count = cnt;
5190     memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt);
5191     return qla2x00_post_work(vha, e);
5192 }
5193 
5194 void qla24xx_sched_upd_fcport(fc_port_t *fcport)
5195 {
5196     unsigned long flags;
5197 
5198     if (IS_SW_RESV_ADDR(fcport->d_id))
5199         return;
5200 
5201     spin_lock_irqsave(&fcport->vha->work_lock, flags);
5202     if (fcport->disc_state == DSC_UPD_FCPORT) {
5203         spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
5204         return;
5205     }
5206     fcport->jiffies_at_registration = jiffies;
5207     fcport->sec_since_registration = 0;
5208     fcport->next_disc_state = DSC_DELETED;
5209     qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
5210     spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
5211 
5212     queue_work(system_unbound_wq, &fcport->reg_work);
5213 }
5214 
5215 static
5216 void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
5217 {
5218     unsigned long flags;
5219     fc_port_t *fcport =  NULL, *tfcp;
5220     struct qlt_plogi_ack_t *pla =
5221         (struct qlt_plogi_ack_t *)e->u.new_sess.pla;
5222     uint8_t free_fcport = 0;
5223 
5224     ql_dbg(ql_dbg_disc, vha, 0xffff,
5225         "%s %d %8phC enter\n",
5226         __func__, __LINE__, e->u.new_sess.port_name);
5227 
5228     spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5229     fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
5230     if (fcport) {
5231         fcport->d_id = e->u.new_sess.id;
5232         if (pla) {
5233             fcport->fw_login_state = DSC_LS_PLOGI_PEND;
5234             memcpy(fcport->node_name,
5235                 pla->iocb.u.isp24.u.plogi.node_name,
5236                 WWN_SIZE);
5237             qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN);
5238             /* we took an extra ref_count to prevent PLOGI ACK when
5239              * fcport/sess has not been created.
5240              */
5241             pla->ref_count--;
5242         }
5243     } else {
5244         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5245         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5246         if (fcport) {
5247             fcport->d_id = e->u.new_sess.id;
5248             fcport->flags |= FCF_FABRIC_DEVICE;
5249             fcport->fw_login_state = DSC_LS_PLOGI_PEND;
5250             fcport->tgt_short_link_down_cnt = 0;
5251 
5252             memcpy(fcport->port_name, e->u.new_sess.port_name,
5253                 WWN_SIZE);
5254 
5255             fcport->fc4_type = e->u.new_sess.fc4_type;
5256             if (NVME_PRIORITY(vha->hw, fcport))
5257                 fcport->do_prli_nvme = 1;
5258             else
5259                 fcport->do_prli_nvme = 0;
5260 
5261             if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
5262                 fcport->dm_login_expire = jiffies +
5263                     QLA_N2N_WAIT_TIME * HZ;
5264                 fcport->fc4_type = FS_FC4TYPE_FCP;
5265                 fcport->n2n_flag = 1;
5266                 if (vha->flags.nvme_enabled)
5267                     fcport->fc4_type |= FS_FC4TYPE_NVME;
5268             }
5269 
5270         } else {
5271             ql_dbg(ql_dbg_disc, vha, 0xffff,
5272                    "%s %8phC mem alloc fail.\n",
5273                    __func__, e->u.new_sess.port_name);
5274 
5275             if (pla) {
5276                 list_del(&pla->list);
5277                 kmem_cache_free(qla_tgt_plogi_cachep, pla);
5278             }
5279             return;
5280         }
5281 
5282         spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5283         /* search again to make sure no one else got ahead */
5284         tfcp = qla2x00_find_fcport_by_wwpn(vha,
5285             e->u.new_sess.port_name, 1);
5286         if (tfcp) {
5287             /* should rarily happen */
5288             ql_dbg(ql_dbg_disc, vha, 0xffff,
5289                 "%s %8phC found existing fcport b4 add. DS %d LS %d\n",
5290                 __func__, tfcp->port_name, tfcp->disc_state,
5291                 tfcp->fw_login_state);
5292 
5293             free_fcport = 1;
5294         } else {
5295             list_add_tail(&fcport->list, &vha->vp_fcports);
5296 
5297         }
5298         if (pla) {
5299             qlt_plogi_ack_link(vha, pla, fcport,
5300                 QLT_PLOGI_LINK_SAME_WWN);
5301             pla->ref_count--;
5302         }
5303     }
5304     spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5305 
5306     if (fcport) {
5307         fcport->id_changed = 1;
5308         fcport->scan_state = QLA_FCPORT_FOUND;
5309         fcport->chip_reset = vha->hw->base_qpair->chip_reset;
5310         memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
5311 
5312         if (pla) {
5313             if (pla->iocb.u.isp24.status_subcode == ELS_PRLI) {
5314                 u16 wd3_lo;
5315 
5316                 fcport->fw_login_state = DSC_LS_PRLI_PEND;
5317                 fcport->local = 0;
5318                 fcport->loop_id =
5319                     le16_to_cpu(
5320                         pla->iocb.u.isp24.nport_handle);
5321                 fcport->fw_login_state = DSC_LS_PRLI_PEND;
5322                 wd3_lo =
5323                     le16_to_cpu(
5324                     pla->iocb.u.isp24.u.prli.wd3_lo);
5325 
5326                 if (wd3_lo & BIT_7)
5327                     fcport->conf_compl_supported = 1;
5328 
5329                 if ((wd3_lo & BIT_4) == 0)
5330                     fcport->port_type = FCT_INITIATOR;
5331                 else
5332                     fcport->port_type = FCT_TARGET;
5333             }
5334             qlt_plogi_ack_unref(vha, pla);
5335         } else {
5336             fc_port_t *dfcp = NULL;
5337 
5338             spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5339             tfcp = qla2x00_find_fcport_by_nportid(vha,
5340                 &e->u.new_sess.id, 1);
5341             if (tfcp && (tfcp != fcport)) {
5342                 /*
5343                  * We have a conflict fcport with same NportID.
5344                  */
5345                 ql_dbg(ql_dbg_disc, vha, 0xffff,
5346                     "%s %8phC found conflict b4 add. DS %d LS %d\n",
5347                     __func__, tfcp->port_name, tfcp->disc_state,
5348                     tfcp->fw_login_state);
5349 
5350                 switch (tfcp->disc_state) {
5351                 case DSC_DELETED:
5352                     break;
5353                 case DSC_DELETE_PEND:
5354                     fcport->login_pause = 1;
5355                     tfcp->conflict = fcport;
5356                     break;
5357                 default:
5358                     fcport->login_pause = 1;
5359                     tfcp->conflict = fcport;
5360                     dfcp = tfcp;
5361                     break;
5362                 }
5363             }
5364             spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5365             if (dfcp)
5366                 qlt_schedule_sess_for_deletion(tfcp);
5367 
5368             if (N2N_TOPO(vha->hw)) {
5369                 fcport->flags &= ~FCF_FABRIC_DEVICE;
5370                 fcport->keep_nport_handle = 1;
5371                 if (vha->flags.nvme_enabled) {
5372                     fcport->fc4_type =
5373                         (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP);
5374                     fcport->n2n_flag = 1;
5375                 }
5376                 fcport->fw_login_state = 0;
5377 
5378                 schedule_delayed_work(&vha->scan.scan_work, 5);
5379             } else {
5380                 qla24xx_fcport_handle_login(vha, fcport);
5381             }
5382         }
5383     }
5384 
5385     if (free_fcport) {
5386         qla2x00_free_fcport(fcport);
5387         if (pla) {
5388             list_del(&pla->list);
5389             kmem_cache_free(qla_tgt_plogi_cachep, pla);
5390         }
5391     }
5392 }
5393 
5394 static void qla_sp_retry(struct scsi_qla_host *vha, struct qla_work_evt *e)
5395 {
5396     struct srb *sp = e->u.iosb.sp;
5397     int rval;
5398 
5399     rval = qla2x00_start_sp(sp);
5400     if (rval != QLA_SUCCESS) {
5401         ql_dbg(ql_dbg_disc, vha, 0x2043,
5402             "%s: %s: Re-issue IOCB failed (%d).\n",
5403             __func__, sp->name, rval);
5404         qla24xx_sp_unmap(vha, sp);
5405     }
5406 }
5407 
5408 void
5409 qla2x00_do_work(struct scsi_qla_host *vha)
5410 {
5411     struct qla_work_evt *e, *tmp;
5412     unsigned long flags;
5413     LIST_HEAD(work);
5414     int rc;
5415 
5416     spin_lock_irqsave(&vha->work_lock, flags);
5417     list_splice_init(&vha->work_list, &work);
5418     spin_unlock_irqrestore(&vha->work_lock, flags);
5419 
5420     list_for_each_entry_safe(e, tmp, &work, list) {
5421         rc = QLA_SUCCESS;
5422         switch (e->type) {
5423         case QLA_EVT_AEN:
5424             fc_host_post_event(vha->host, fc_get_event_number(),
5425                 e->u.aen.code, e->u.aen.data);
5426             break;
5427         case QLA_EVT_IDC_ACK:
5428             qla81xx_idc_ack(vha, e->u.idc_ack.mb);
5429             break;
5430         case QLA_EVT_ASYNC_LOGIN:
5431             qla2x00_async_login(vha, e->u.logio.fcport,
5432                 e->u.logio.data);
5433             break;
5434         case QLA_EVT_ASYNC_LOGOUT:
5435             rc = qla2x00_async_logout(vha, e->u.logio.fcport);
5436             break;
5437         case QLA_EVT_ASYNC_ADISC:
5438             qla2x00_async_adisc(vha, e->u.logio.fcport,
5439                 e->u.logio.data);
5440             break;
5441         case QLA_EVT_UEVENT:
5442             qla2x00_uevent_emit(vha, e->u.uevent.code);
5443             break;
5444         case QLA_EVT_AENFX:
5445             qlafx00_process_aen(vha, e);
5446             break;
5447         case QLA_EVT_GPNID:
5448             qla24xx_async_gpnid(vha, &e->u.gpnid.id);
5449             break;
5450         case QLA_EVT_UNMAP:
5451             qla24xx_sp_unmap(vha, e->u.iosb.sp);
5452             break;
5453         case QLA_EVT_RELOGIN:
5454             qla2x00_relogin(vha);
5455             break;
5456         case QLA_EVT_NEW_SESS:
5457             qla24xx_create_new_sess(vha, e);
5458             break;
5459         case QLA_EVT_GPDB:
5460             qla24xx_async_gpdb(vha, e->u.fcport.fcport,
5461                 e->u.fcport.opt);
5462             break;
5463         case QLA_EVT_PRLI:
5464             qla24xx_async_prli(vha, e->u.fcport.fcport);
5465             break;
5466         case QLA_EVT_GPSC:
5467             qla24xx_async_gpsc(vha, e->u.fcport.fcport);
5468             break;
5469         case QLA_EVT_GNL:
5470             qla24xx_async_gnl(vha, e->u.fcport.fcport);
5471             break;
5472         case QLA_EVT_NACK:
5473             qla24xx_do_nack_work(vha, e);
5474             break;
5475         case QLA_EVT_ASYNC_PRLO:
5476             rc = qla2x00_async_prlo(vha, e->u.logio.fcport);
5477             break;
5478         case QLA_EVT_ASYNC_PRLO_DONE:
5479             qla2x00_async_prlo_done(vha, e->u.logio.fcport,
5480                 e->u.logio.data);
5481             break;
5482         case QLA_EVT_GPNFT:
5483             qla24xx_async_gpnft(vha, e->u.gpnft.fc4_type,
5484                 e->u.gpnft.sp);
5485             break;
5486         case QLA_EVT_GPNFT_DONE:
5487             qla24xx_async_gpnft_done(vha, e->u.iosb.sp);
5488             break;
5489         case QLA_EVT_GNNFT_DONE:
5490             qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
5491             break;
5492         case QLA_EVT_GNNID:
5493             qla24xx_async_gnnid(vha, e->u.fcport.fcport);
5494             break;
5495         case QLA_EVT_GFPNID:
5496             qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
5497             break;
5498         case QLA_EVT_SP_RETRY:
5499             qla_sp_retry(vha, e);
5500             break;
5501         case QLA_EVT_IIDMA:
5502             qla_do_iidma_work(vha, e->u.fcport.fcport);
5503             break;
5504         case QLA_EVT_ELS_PLOGI:
5505             qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
5506                 e->u.fcport.fcport, false);
5507             break;
5508         case QLA_EVT_SA_REPLACE:
5509             rc = qla24xx_issue_sa_replace_iocb(vha, e);
5510             break;
5511         }
5512 
5513         if (rc == EAGAIN) {
5514             /* put 'work' at head of 'vha->work_list' */
5515             spin_lock_irqsave(&vha->work_lock, flags);
5516             list_splice(&work, &vha->work_list);
5517             spin_unlock_irqrestore(&vha->work_lock, flags);
5518             break;
5519         }
5520         list_del_init(&e->list);
5521         if (e->flags & QLA_EVT_FLAG_FREE)
5522             kfree(e);
5523 
5524         /* For each work completed decrement vha ref count */
5525         QLA_VHA_MARK_NOT_BUSY(vha);
5526     }
5527 }
5528 
5529 int qla24xx_post_relogin_work(struct scsi_qla_host *vha)
5530 {
5531     struct qla_work_evt *e;
5532 
5533     e = qla2x00_alloc_work(vha, QLA_EVT_RELOGIN);
5534 
5535     if (!e) {
5536         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5537         return QLA_FUNCTION_FAILED;
5538     }
5539 
5540     return qla2x00_post_work(vha, e);
5541 }
5542 
5543 /* Relogins all the fcports of a vport
5544  * Context: dpc thread
5545  */
5546 void qla2x00_relogin(struct scsi_qla_host *vha)
5547 {
5548     fc_port_t       *fcport;
5549     int status, relogin_needed = 0;
5550     struct event_arg ea;
5551 
5552     list_for_each_entry(fcport, &vha->vp_fcports, list) {
5553         /*
5554          * If the port is not ONLINE then try to login
5555          * to it if we haven't run out of retries.
5556          */
5557         if (atomic_read(&fcport->state) != FCS_ONLINE &&
5558             fcport->login_retry) {
5559             if (fcport->scan_state != QLA_FCPORT_FOUND ||
5560                 fcport->disc_state == DSC_LOGIN_AUTH_PEND ||
5561                 fcport->disc_state == DSC_LOGIN_COMPLETE)
5562                 continue;
5563 
5564             if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) ||
5565                 fcport->disc_state == DSC_DELETE_PEND) {
5566                 relogin_needed = 1;
5567             } else {
5568                 if (vha->hw->current_topology != ISP_CFG_NL) {
5569                     memset(&ea, 0, sizeof(ea));
5570                     ea.fcport = fcport;
5571                     qla24xx_handle_relogin_event(vha, &ea);
5572                 } else if (vha->hw->current_topology ==
5573                      ISP_CFG_NL &&
5574                     IS_QLA2XXX_MIDTYPE(vha->hw)) {
5575                     (void)qla24xx_fcport_handle_login(vha,
5576                                     fcport);
5577                 } else if (vha->hw->current_topology ==
5578                     ISP_CFG_NL) {
5579                     fcport->login_retry--;
5580                     status =
5581                         qla2x00_local_device_login(vha,
5582                         fcport);
5583                     if (status == QLA_SUCCESS) {
5584                         fcport->old_loop_id =
5585                             fcport->loop_id;
5586                         ql_dbg(ql_dbg_disc, vha, 0x2003,
5587                             "Port login OK: logged in ID 0x%x.\n",
5588                             fcport->loop_id);
5589                         qla2x00_update_fcport
5590                             (vha, fcport);
5591                     } else if (status == 1) {
5592                         set_bit(RELOGIN_NEEDED,
5593                             &vha->dpc_flags);
5594                         /* retry the login again */
5595                         ql_dbg(ql_dbg_disc, vha, 0x2007,
5596                             "Retrying %d login again loop_id 0x%x.\n",
5597                             fcport->login_retry,
5598                             fcport->loop_id);
5599                     } else {
5600                         fcport->login_retry = 0;
5601                     }
5602 
5603                     if (fcport->login_retry == 0 &&
5604                         status != QLA_SUCCESS)
5605                         qla2x00_clear_loop_id(fcport);
5606                 }
5607             }
5608         }
5609         if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5610             break;
5611     }
5612 
5613     if (relogin_needed)
5614         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5615 
5616     ql_dbg(ql_dbg_disc, vha, 0x400e,
5617         "Relogin end.\n");
5618 }
5619 
5620 /* Schedule work on any of the dpc-workqueues */
5621 void
5622 qla83xx_schedule_work(scsi_qla_host_t *base_vha, int work_code)
5623 {
5624     struct qla_hw_data *ha = base_vha->hw;
5625 
5626     switch (work_code) {
5627     case MBA_IDC_AEN: /* 0x8200 */
5628         if (ha->dpc_lp_wq)
5629             queue_work(ha->dpc_lp_wq, &ha->idc_aen);
5630         break;
5631 
5632     case QLA83XX_NIC_CORE_RESET: /* 0x1 */
5633         if (!ha->flags.nic_core_reset_hdlr_active) {
5634             if (ha->dpc_hp_wq)
5635                 queue_work(ha->dpc_hp_wq, &ha->nic_core_reset);
5636         } else
5637             ql_dbg(ql_dbg_p3p, base_vha, 0xb05e,
5638                 "NIC Core reset is already active. Skip "
5639                 "scheduling it again.\n");
5640         break;
5641     case QLA83XX_IDC_STATE_HANDLER: /* 0x2 */
5642         if (ha->dpc_hp_wq)
5643             queue_work(ha->dpc_hp_wq, &ha->idc_state_handler);
5644         break;
5645     case QLA83XX_NIC_CORE_UNRECOVERABLE: /* 0x3 */
5646         if (ha->dpc_hp_wq)
5647             queue_work(ha->dpc_hp_wq, &ha->nic_core_unrecoverable);
5648         break;
5649     default:
5650         ql_log(ql_log_warn, base_vha, 0xb05f,
5651             "Unknown work-code=0x%x.\n", work_code);
5652     }
5653 
5654     return;
5655 }
5656 
5657 /* Work: Perform NIC Core Unrecoverable state handling */
5658 void
5659 qla83xx_nic_core_unrecoverable_work(struct work_struct *work)
5660 {
5661     struct qla_hw_data *ha =
5662         container_of(work, struct qla_hw_data, nic_core_unrecoverable);
5663     scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5664     uint32_t dev_state = 0;
5665 
5666     qla83xx_idc_lock(base_vha, 0);
5667     qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5668     qla83xx_reset_ownership(base_vha);
5669     if (ha->flags.nic_core_reset_owner) {
5670         ha->flags.nic_core_reset_owner = 0;
5671         qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
5672             QLA8XXX_DEV_FAILED);
5673         ql_log(ql_log_info, base_vha, 0xb060, "HW State: FAILED.\n");
5674         qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
5675     }
5676     qla83xx_idc_unlock(base_vha, 0);
5677 }
5678 
5679 /* Work: Execute IDC state handler */
5680 void
5681 qla83xx_idc_state_handler_work(struct work_struct *work)
5682 {
5683     struct qla_hw_data *ha =
5684         container_of(work, struct qla_hw_data, idc_state_handler);
5685     scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5686     uint32_t dev_state = 0;
5687 
5688     qla83xx_idc_lock(base_vha, 0);
5689     qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5690     if (dev_state == QLA8XXX_DEV_FAILED ||
5691             dev_state == QLA8XXX_DEV_NEED_QUIESCENT)
5692         qla83xx_idc_state_handler(base_vha);
5693     qla83xx_idc_unlock(base_vha, 0);
5694 }
5695 
5696 static int
5697 qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
5698 {
5699     int rval = QLA_SUCCESS;
5700     unsigned long heart_beat_wait = jiffies + (1 * HZ);
5701     uint32_t heart_beat_counter1, heart_beat_counter2;
5702 
5703     do {
5704         if (time_after(jiffies, heart_beat_wait)) {
5705             ql_dbg(ql_dbg_p3p, base_vha, 0xb07c,
5706                 "Nic Core f/w is not alive.\n");
5707             rval = QLA_FUNCTION_FAILED;
5708             break;
5709         }
5710 
5711         qla83xx_idc_lock(base_vha, 0);
5712         qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
5713             &heart_beat_counter1);
5714         qla83xx_idc_unlock(base_vha, 0);
5715         msleep(100);
5716         qla83xx_idc_lock(base_vha, 0);
5717         qla83xx_rd_reg(base_vha, QLA83XX_FW_HEARTBEAT,
5718             &heart_beat_counter2);
5719         qla83xx_idc_unlock(base_vha, 0);
5720     } while (heart_beat_counter1 == heart_beat_counter2);
5721 
5722     return rval;
5723 }
5724 
5725 /* Work: Perform NIC Core Reset handling */
5726 void
5727 qla83xx_nic_core_reset_work(struct work_struct *work)
5728 {
5729     struct qla_hw_data *ha =
5730         container_of(work, struct qla_hw_data, nic_core_reset);
5731     scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5732     uint32_t dev_state = 0;
5733 
5734     if (IS_QLA2031(ha)) {
5735         if (qla2xxx_mctp_dump(base_vha) != QLA_SUCCESS)
5736             ql_log(ql_log_warn, base_vha, 0xb081,
5737                 "Failed to dump mctp\n");
5738         return;
5739     }
5740 
5741     if (!ha->flags.nic_core_reset_hdlr_active) {
5742         if (qla83xx_check_nic_core_fw_alive(base_vha) == QLA_SUCCESS) {
5743             qla83xx_idc_lock(base_vha, 0);
5744             qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE,
5745                 &dev_state);
5746             qla83xx_idc_unlock(base_vha, 0);
5747             if (dev_state != QLA8XXX_DEV_NEED_RESET) {
5748                 ql_dbg(ql_dbg_p3p, base_vha, 0xb07a,
5749                     "Nic Core f/w is alive.\n");
5750                 return;
5751             }
5752         }
5753 
5754         ha->flags.nic_core_reset_hdlr_active = 1;
5755         if (qla83xx_nic_core_reset(base_vha)) {
5756             /* NIC Core reset failed. */
5757             ql_dbg(ql_dbg_p3p, base_vha, 0xb061,
5758                 "NIC Core reset failed.\n");
5759         }
5760         ha->flags.nic_core_reset_hdlr_active = 0;
5761     }
5762 }
5763 
5764 /* Work: Handle 8200 IDC aens */
5765 void
5766 qla83xx_service_idc_aen(struct work_struct *work)
5767 {
5768     struct qla_hw_data *ha =
5769         container_of(work, struct qla_hw_data, idc_aen);
5770     scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5771     uint32_t dev_state, idc_control;
5772 
5773     qla83xx_idc_lock(base_vha, 0);
5774     qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
5775     qla83xx_rd_reg(base_vha, QLA83XX_IDC_CONTROL, &idc_control);
5776     qla83xx_idc_unlock(base_vha, 0);
5777     if (dev_state == QLA8XXX_DEV_NEED_RESET) {
5778         if (idc_control & QLA83XX_IDC_GRACEFUL_RESET) {
5779             ql_dbg(ql_dbg_p3p, base_vha, 0xb062,
5780                 "Application requested NIC Core Reset.\n");
5781             qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
5782         } else if (qla83xx_check_nic_core_fw_alive(base_vha) ==
5783             QLA_SUCCESS) {
5784             ql_dbg(ql_dbg_p3p, base_vha, 0xb07b,
5785                 "Other protocol driver requested NIC Core Reset.\n");
5786             qla83xx_schedule_work(base_vha, QLA83XX_NIC_CORE_RESET);
5787         }
5788     } else if (dev_state == QLA8XXX_DEV_FAILED ||
5789             dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
5790         qla83xx_schedule_work(base_vha, QLA83XX_IDC_STATE_HANDLER);
5791     }
5792 }
5793 
5794 /*
5795  * Control the frequency of IDC lock retries
5796  */
5797 #define QLA83XX_WAIT_LOGIC_MS   100
5798 
5799 static int
5800 qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
5801 {
5802     int rval;
5803     uint32_t data;
5804     uint32_t idc_lck_rcvry_stage_mask = 0x3;
5805     uint32_t idc_lck_rcvry_owner_mask = 0x3c;
5806     struct qla_hw_data *ha = base_vha->hw;
5807 
5808     ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
5809         "Trying force recovery of the IDC lock.\n");
5810 
5811     rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
5812     if (rval)
5813         return rval;
5814 
5815     if ((data & idc_lck_rcvry_stage_mask) > 0) {
5816         return QLA_SUCCESS;
5817     } else {
5818         data = (IDC_LOCK_RECOVERY_STAGE1) | (ha->portnum << 2);
5819         rval = qla83xx_wr_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
5820             data);
5821         if (rval)
5822             return rval;
5823 
5824         msleep(200);
5825 
5826         rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY,
5827             &data);
5828         if (rval)
5829             return rval;
5830 
5831         if (((data & idc_lck_rcvry_owner_mask) >> 2) == ha->portnum) {
5832             data &= (IDC_LOCK_RECOVERY_STAGE2 |
5833                     ~(idc_lck_rcvry_stage_mask));
5834             rval = qla83xx_wr_reg(base_vha,
5835                 QLA83XX_IDC_LOCK_RECOVERY, data);
5836             if (rval)
5837                 return rval;
5838 
5839             /* Forcefully perform IDC UnLock */
5840             rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK,
5841                 &data);
5842             if (rval)
5843                 return rval;
5844             /* Clear lock-id by setting 0xff */
5845             rval = qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
5846                 0xff);
5847             if (rval)
5848                 return rval;
5849             /* Clear lock-recovery by setting 0x0 */
5850             rval = qla83xx_wr_reg(base_vha,
5851                 QLA83XX_IDC_LOCK_RECOVERY, 0x0);
5852             if (rval)
5853                 return rval;
5854         } else
5855             return QLA_SUCCESS;
5856     }
5857 
5858     return rval;
5859 }
5860 
5861 static int
5862 qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
5863 {
5864     int rval = QLA_SUCCESS;
5865     uint32_t o_drv_lockid, n_drv_lockid;
5866     unsigned long lock_recovery_timeout;
5867 
5868     lock_recovery_timeout = jiffies + QLA83XX_MAX_LOCK_RECOVERY_WAIT;
5869 retry_lockid:
5870     rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &o_drv_lockid);
5871     if (rval)
5872         goto exit;
5873 
5874     /* MAX wait time before forcing IDC Lock recovery = 2 secs */
5875     if (time_after_eq(jiffies, lock_recovery_timeout)) {
5876         if (qla83xx_force_lock_recovery(base_vha) == QLA_SUCCESS)
5877             return QLA_SUCCESS;
5878         else
5879             return QLA_FUNCTION_FAILED;
5880     }
5881 
5882     rval = qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &n_drv_lockid);
5883     if (rval)
5884         goto exit;
5885 
5886     if (o_drv_lockid == n_drv_lockid) {
5887         msleep(QLA83XX_WAIT_LOGIC_MS);
5888         goto retry_lockid;
5889     } else
5890         return QLA_SUCCESS;
5891 
5892 exit:
5893     return rval;
5894 }
5895 
5896 /*
5897  * Context: task, can sleep
5898  */
5899 void
5900 qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
5901 {
5902     uint32_t data;
5903     uint32_t lock_owner;
5904     struct qla_hw_data *ha = base_vha->hw;
5905 
5906     might_sleep();
5907 
5908     /* IDC-lock implementation using driver-lock/lock-id remote registers */
5909 retry_lock:
5910     if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCK, &data)
5911         == QLA_SUCCESS) {
5912         if (data) {
5913             /* Setting lock-id to our function-number */
5914             qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
5915                 ha->portnum);
5916         } else {
5917             qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
5918                 &lock_owner);
5919             ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
5920                 "Failed to acquire IDC lock, acquired by %d, "
5921                 "retrying...\n", lock_owner);
5922 
5923             /* Retry/Perform IDC-Lock recovery */
5924             if (qla83xx_idc_lock_recovery(base_vha)
5925                 == QLA_SUCCESS) {
5926                 msleep(QLA83XX_WAIT_LOGIC_MS);
5927                 goto retry_lock;
5928             } else
5929                 ql_log(ql_log_warn, base_vha, 0xb075,
5930                     "IDC Lock recovery FAILED.\n");
5931         }
5932 
5933     }
5934 
5935     return;
5936 }
5937 
5938 static bool
5939 qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
5940     struct purex_entry_24xx *purex)
5941 {
5942     char fwstr[16];
5943     u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0];
5944     struct port_database_24xx *pdb;
5945 
5946     /* Domain Controller is always logged-out. */
5947     /* if RDP request is not from Domain Controller: */
5948     if (sid != 0xfffc01)
5949         return false;
5950 
5951     ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid);
5952 
5953     pdb = kzalloc(sizeof(*pdb), GFP_KERNEL);
5954     if (!pdb) {
5955         ql_dbg(ql_dbg_init, vha, 0x0181,
5956             "%s: Failed allocate pdb\n", __func__);
5957     } else if (qla24xx_get_port_database(vha,
5958                 le16_to_cpu(purex->nport_handle), pdb)) {
5959         ql_dbg(ql_dbg_init, vha, 0x0181,
5960             "%s: Failed get pdb sid=%x\n", __func__, sid);
5961     } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE &&
5962         pdb->current_login_state != PDS_PRLI_COMPLETE) {
5963         ql_dbg(ql_dbg_init, vha, 0x0181,
5964             "%s: Port not logged in sid=%#x\n", __func__, sid);
5965     } else {
5966         /* RDP request is from logged in port */
5967         kfree(pdb);
5968         return false;
5969     }
5970     kfree(pdb);
5971 
5972     vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr));
5973     fwstr[strcspn(fwstr, " ")] = 0;
5974     /* if FW version allows RDP response length upto 2048 bytes: */
5975     if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0)
5976         return false;
5977 
5978     ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr);
5979 
5980     /* RDP response length is to be reduced to maximum 256 bytes */
5981     return true;
5982 }
5983 
5984 /*
5985  * Function Name: qla24xx_process_purex_iocb
5986  *
5987  * Description:
5988  * Prepare a RDP response and send to Fabric switch
5989  *
5990  * PARAMETERS:
5991  * vha: SCSI qla host
5992  * purex: RDP request received by HBA
5993  */
5994 void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
5995                    struct purex_item *item)
5996 {
5997     struct qla_hw_data *ha = vha->hw;
5998     struct purex_entry_24xx *purex =
5999         (struct purex_entry_24xx *)&item->iocb;
6000     dma_addr_t rsp_els_dma;
6001     dma_addr_t rsp_payload_dma;
6002     dma_addr_t stat_dma;
6003     dma_addr_t sfp_dma;
6004     struct els_entry_24xx *rsp_els = NULL;
6005     struct rdp_rsp_payload *rsp_payload = NULL;
6006     struct link_statistics *stat = NULL;
6007     uint8_t *sfp = NULL;
6008     uint16_t sfp_flags = 0;
6009     uint rsp_payload_length = sizeof(*rsp_payload);
6010     int rval;
6011 
6012     ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180,
6013         "%s: Enter\n", __func__);
6014 
6015     ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181,
6016         "-------- ELS REQ -------\n");
6017     ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182,
6018         purex, sizeof(*purex));
6019 
6020     if (qla25xx_rdp_rsp_reduce_size(vha, purex)) {
6021         rsp_payload_length =
6022             offsetof(typeof(*rsp_payload), optical_elmt_desc);
6023         ql_dbg(ql_dbg_init, vha, 0x0181,
6024             "Reducing RSP payload length to %u bytes...\n",
6025             rsp_payload_length);
6026     }
6027 
6028     rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els),
6029         &rsp_els_dma, GFP_KERNEL);
6030     if (!rsp_els) {
6031         ql_log(ql_log_warn, vha, 0x0183,
6032             "Failed allocate dma buffer ELS RSP.\n");
6033         goto dealloc;
6034     }
6035 
6036     rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
6037         &rsp_payload_dma, GFP_KERNEL);
6038     if (!rsp_payload) {
6039         ql_log(ql_log_warn, vha, 0x0184,
6040             "Failed allocate dma buffer ELS RSP payload.\n");
6041         goto dealloc;
6042     }
6043 
6044     sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
6045         &sfp_dma, GFP_KERNEL);
6046 
6047     stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat),
6048         &stat_dma, GFP_KERNEL);
6049 
6050     /* Prepare Response IOCB */
6051     rsp_els->entry_type = ELS_IOCB_TYPE;
6052     rsp_els->entry_count = 1;
6053     rsp_els->sys_define = 0;
6054     rsp_els->entry_status = 0;
6055     rsp_els->handle = 0;
6056     rsp_els->nport_handle = purex->nport_handle;
6057     rsp_els->tx_dsd_count = cpu_to_le16(1);
6058     rsp_els->vp_index = purex->vp_idx;
6059     rsp_els->sof_type = EST_SOFI3;
6060     rsp_els->rx_xchg_address = purex->rx_xchg_addr;
6061     rsp_els->rx_dsd_count = 0;
6062     rsp_els->opcode = purex->els_frame_payload[0];
6063 
6064     rsp_els->d_id[0] = purex->s_id[0];
6065     rsp_els->d_id[1] = purex->s_id[1];
6066     rsp_els->d_id[2] = purex->s_id[2];
6067 
6068     rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC);
6069     rsp_els->rx_byte_count = 0;
6070     rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length);
6071 
6072     put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address);
6073     rsp_els->tx_len = rsp_els->tx_byte_count;
6074 
6075     rsp_els->rx_address = 0;
6076     rsp_els->rx_len = 0;
6077 
6078     /* Prepare Response Payload */
6079     rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */
6080     rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) -
6081                        sizeof(rsp_payload->hdr));
6082 
6083     /* Link service Request Info Descriptor */
6084     rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1);
6085     rsp_payload->ls_req_info_desc.desc_len =
6086         cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc));
6087     rsp_payload->ls_req_info_desc.req_payload_word_0 =
6088         cpu_to_be32p((uint32_t *)purex->els_frame_payload);
6089 
6090     /* Link service Request Info Descriptor 2 */
6091     rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1);
6092     rsp_payload->ls_req_info_desc2.desc_len =
6093         cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2));
6094     rsp_payload->ls_req_info_desc2.req_payload_word_0 =
6095         cpu_to_be32p((uint32_t *)purex->els_frame_payload);
6096 
6097 
6098     rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000);
6099     rsp_payload->sfp_diag_desc.desc_len =
6100         cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc));
6101 
6102     if (sfp) {
6103         /* SFP Flags */
6104         memset(sfp, 0, SFP_RTDI_LEN);
6105         rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0);
6106         if (!rval) {
6107             /* SFP Flags bits 3-0: Port Tx Laser Type */
6108             if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5))
6109                 sfp_flags |= BIT_0; /* short wave */
6110             else if (sfp[0] & BIT_1)
6111                 sfp_flags |= BIT_1; /* long wave 1310nm */
6112             else if (sfp[1] & BIT_4)
6113                 sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */
6114         }
6115 
6116         /* SFP Type */
6117         memset(sfp, 0, SFP_RTDI_LEN);
6118         rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0);
6119         if (!rval) {
6120             sfp_flags |= BIT_4; /* optical */
6121             if (sfp[0] == 0x3)
6122                 sfp_flags |= BIT_6; /* sfp+ */
6123         }
6124 
6125         rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags);
6126 
6127         /* SFP Diagnostics */
6128         memset(sfp, 0, SFP_RTDI_LEN);
6129         rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0);
6130         if (!rval) {
6131             __be16 *trx = (__force __be16 *)sfp; /* already be16 */
6132             rsp_payload->sfp_diag_desc.temperature = trx[0];
6133             rsp_payload->sfp_diag_desc.vcc = trx[1];
6134             rsp_payload->sfp_diag_desc.tx_bias = trx[2];
6135             rsp_payload->sfp_diag_desc.tx_power = trx[3];
6136             rsp_payload->sfp_diag_desc.rx_power = trx[4];
6137         }
6138     }
6139 
6140     /* Port Speed Descriptor */
6141     rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001);
6142     rsp_payload->port_speed_desc.desc_len =
6143         cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc));
6144     rsp_payload->port_speed_desc.speed_capab = cpu_to_be16(
6145         qla25xx_fdmi_port_speed_capability(ha));
6146     rsp_payload->port_speed_desc.operating_speed = cpu_to_be16(
6147         qla25xx_fdmi_port_speed_currently(ha));
6148 
6149     /* Link Error Status Descriptor */
6150     rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002);
6151     rsp_payload->ls_err_desc.desc_len =
6152         cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc));
6153 
6154     if (stat) {
6155         rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0);
6156         if (!rval) {
6157             rsp_payload->ls_err_desc.link_fail_cnt =
6158                 cpu_to_be32(le32_to_cpu(stat->link_fail_cnt));
6159             rsp_payload->ls_err_desc.loss_sync_cnt =
6160                 cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt));
6161             rsp_payload->ls_err_desc.loss_sig_cnt =
6162                 cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt));
6163             rsp_payload->ls_err_desc.prim_seq_err_cnt =
6164                 cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt));
6165             rsp_payload->ls_err_desc.inval_xmit_word_cnt =
6166                 cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt));
6167             rsp_payload->ls_err_desc.inval_crc_cnt =
6168                 cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt));
6169             rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6;
6170         }
6171     }
6172 
6173     /* Portname Descriptor */
6174     rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003);
6175     rsp_payload->port_name_diag_desc.desc_len =
6176         cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc));
6177     memcpy(rsp_payload->port_name_diag_desc.WWNN,
6178         vha->node_name,
6179         sizeof(rsp_payload->port_name_diag_desc.WWNN));
6180     memcpy(rsp_payload->port_name_diag_desc.WWPN,
6181         vha->port_name,
6182         sizeof(rsp_payload->port_name_diag_desc.WWPN));
6183 
6184     /* F-Port Portname Descriptor */
6185     rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003);
6186     rsp_payload->port_name_direct_desc.desc_len =
6187         cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc));
6188     memcpy(rsp_payload->port_name_direct_desc.WWNN,
6189         vha->fabric_node_name,
6190         sizeof(rsp_payload->port_name_direct_desc.WWNN));
6191     memcpy(rsp_payload->port_name_direct_desc.WWPN,
6192         vha->fabric_port_name,
6193         sizeof(rsp_payload->port_name_direct_desc.WWPN));
6194 
6195     /* Bufer Credit Descriptor */
6196     rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006);
6197     rsp_payload->buffer_credit_desc.desc_len =
6198         cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc));
6199     rsp_payload->buffer_credit_desc.fcport_b2b = 0;
6200     rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0);
6201     rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0);
6202 
6203     if (ha->flags.plogi_template_valid) {
6204         uint32_t tmp =
6205         be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred);
6206         rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp);
6207     }
6208 
6209     if (rsp_payload_length < sizeof(*rsp_payload))
6210         goto send;
6211 
6212     /* Optical Element Descriptor, Temperature */
6213     rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007);
6214     rsp_payload->optical_elmt_desc[0].desc_len =
6215         cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6216     /* Optical Element Descriptor, Voltage */
6217     rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007);
6218     rsp_payload->optical_elmt_desc[1].desc_len =
6219         cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6220     /* Optical Element Descriptor, Tx Bias Current */
6221     rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007);
6222     rsp_payload->optical_elmt_desc[2].desc_len =
6223         cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6224     /* Optical Element Descriptor, Tx Power */
6225     rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007);
6226     rsp_payload->optical_elmt_desc[3].desc_len =
6227         cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6228     /* Optical Element Descriptor, Rx Power */
6229     rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007);
6230     rsp_payload->optical_elmt_desc[4].desc_len =
6231         cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc));
6232 
6233     if (sfp) {
6234         memset(sfp, 0, SFP_RTDI_LEN);
6235         rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0);
6236         if (!rval) {
6237             __be16 *trx = (__force __be16 *)sfp; /* already be16 */
6238 
6239             /* Optical Element Descriptor, Temperature */
6240             rsp_payload->optical_elmt_desc[0].high_alarm = trx[0];
6241             rsp_payload->optical_elmt_desc[0].low_alarm = trx[1];
6242             rsp_payload->optical_elmt_desc[0].high_warn = trx[2];
6243             rsp_payload->optical_elmt_desc[0].low_warn = trx[3];
6244             rsp_payload->optical_elmt_desc[0].element_flags =
6245                 cpu_to_be32(1 << 28);
6246 
6247             /* Optical Element Descriptor, Voltage */
6248             rsp_payload->optical_elmt_desc[1].high_alarm = trx[4];
6249             rsp_payload->optical_elmt_desc[1].low_alarm = trx[5];
6250             rsp_payload->optical_elmt_desc[1].high_warn = trx[6];
6251             rsp_payload->optical_elmt_desc[1].low_warn = trx[7];
6252             rsp_payload->optical_elmt_desc[1].element_flags =
6253                 cpu_to_be32(2 << 28);
6254 
6255             /* Optical Element Descriptor, Tx Bias Current */
6256             rsp_payload->optical_elmt_desc[2].high_alarm = trx[8];
6257             rsp_payload->optical_elmt_desc[2].low_alarm = trx[9];
6258             rsp_payload->optical_elmt_desc[2].high_warn = trx[10];
6259             rsp_payload->optical_elmt_desc[2].low_warn = trx[11];
6260             rsp_payload->optical_elmt_desc[2].element_flags =
6261                 cpu_to_be32(3 << 28);
6262 
6263             /* Optical Element Descriptor, Tx Power */
6264             rsp_payload->optical_elmt_desc[3].high_alarm = trx[12];
6265             rsp_payload->optical_elmt_desc[3].low_alarm = trx[13];
6266             rsp_payload->optical_elmt_desc[3].high_warn = trx[14];
6267             rsp_payload->optical_elmt_desc[3].low_warn = trx[15];
6268             rsp_payload->optical_elmt_desc[3].element_flags =
6269                 cpu_to_be32(4 << 28);
6270 
6271             /* Optical Element Descriptor, Rx Power */
6272             rsp_payload->optical_elmt_desc[4].high_alarm = trx[16];
6273             rsp_payload->optical_elmt_desc[4].low_alarm = trx[17];
6274             rsp_payload->optical_elmt_desc[4].high_warn = trx[18];
6275             rsp_payload->optical_elmt_desc[4].low_warn = trx[19];
6276             rsp_payload->optical_elmt_desc[4].element_flags =
6277                 cpu_to_be32(5 << 28);
6278         }
6279 
6280         memset(sfp, 0, SFP_RTDI_LEN);
6281         rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0);
6282         if (!rval) {
6283             /* Temperature high/low alarm/warning */
6284             rsp_payload->optical_elmt_desc[0].element_flags |=
6285                 cpu_to_be32(
6286                 (sfp[0] >> 7 & 1) << 3 |
6287                 (sfp[0] >> 6 & 1) << 2 |
6288                 (sfp[4] >> 7 & 1) << 1 |
6289                 (sfp[4] >> 6 & 1) << 0);
6290 
6291             /* Voltage high/low alarm/warning */
6292             rsp_payload->optical_elmt_desc[1].element_flags |=
6293                 cpu_to_be32(
6294                 (sfp[0] >> 5 & 1) << 3 |
6295                 (sfp[0] >> 4 & 1) << 2 |
6296                 (sfp[4] >> 5 & 1) << 1 |
6297                 (sfp[4] >> 4 & 1) << 0);
6298 
6299             /* Tx Bias Current high/low alarm/warning */
6300             rsp_payload->optical_elmt_desc[2].element_flags |=
6301                 cpu_to_be32(
6302                 (sfp[0] >> 3 & 1) << 3 |
6303                 (sfp[0] >> 2 & 1) << 2 |
6304                 (sfp[4] >> 3 & 1) << 1 |
6305                 (sfp[4] >> 2 & 1) << 0);
6306 
6307             /* Tx Power high/low alarm/warning */
6308             rsp_payload->optical_elmt_desc[3].element_flags |=
6309                 cpu_to_be32(
6310                 (sfp[0] >> 1 & 1) << 3 |
6311                 (sfp[0] >> 0 & 1) << 2 |
6312                 (sfp[4] >> 1 & 1) << 1 |
6313                 (sfp[4] >> 0 & 1) << 0);
6314 
6315             /* Rx Power high/low alarm/warning */
6316             rsp_payload->optical_elmt_desc[4].element_flags |=
6317                 cpu_to_be32(
6318                 (sfp[1] >> 7 & 1) << 3 |
6319                 (sfp[1] >> 6 & 1) << 2 |
6320                 (sfp[5] >> 7 & 1) << 1 |
6321                 (sfp[5] >> 6 & 1) << 0);
6322         }
6323     }
6324 
6325     /* Optical Product Data Descriptor */
6326     rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008);
6327     rsp_payload->optical_prod_desc.desc_len =
6328         cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc));
6329 
6330     if (sfp) {
6331         memset(sfp, 0, SFP_RTDI_LEN);
6332         rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0);
6333         if (!rval) {
6334             memcpy(rsp_payload->optical_prod_desc.vendor_name,
6335                 sfp + 0,
6336                 sizeof(rsp_payload->optical_prod_desc.vendor_name));
6337             memcpy(rsp_payload->optical_prod_desc.part_number,
6338                 sfp + 20,
6339                 sizeof(rsp_payload->optical_prod_desc.part_number));
6340             memcpy(rsp_payload->optical_prod_desc.revision,
6341                 sfp + 36,
6342                 sizeof(rsp_payload->optical_prod_desc.revision));
6343             memcpy(rsp_payload->optical_prod_desc.serial_number,
6344                 sfp + 48,
6345                 sizeof(rsp_payload->optical_prod_desc.serial_number));
6346         }
6347 
6348         memset(sfp, 0, SFP_RTDI_LEN);
6349         rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0);
6350         if (!rval) {
6351             memcpy(rsp_payload->optical_prod_desc.date,
6352                 sfp + 0,
6353                 sizeof(rsp_payload->optical_prod_desc.date));
6354         }
6355     }
6356 
6357 send:
6358     ql_dbg(ql_dbg_init, vha, 0x0183,
6359         "Sending ELS Response to RDP Request...\n");
6360     ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184,
6361         "-------- ELS RSP -------\n");
6362     ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185,
6363         rsp_els, sizeof(*rsp_els));
6364     ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186,
6365         "-------- ELS RSP PAYLOAD -------\n");
6366     ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187,
6367         rsp_payload, rsp_payload_length);
6368 
6369     rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0);
6370 
6371     if (rval) {
6372         ql_log(ql_log_warn, vha, 0x0188,
6373             "%s: iocb failed to execute -> %x\n", __func__, rval);
6374     } else if (rsp_els->comp_status) {
6375         ql_log(ql_log_warn, vha, 0x0189,
6376             "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
6377             __func__, rsp_els->comp_status,
6378             rsp_els->error_subcode_1, rsp_els->error_subcode_2);
6379     } else {
6380         ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__);
6381     }
6382 
6383 dealloc:
6384     if (stat)
6385         dma_free_coherent(&ha->pdev->dev, sizeof(*stat),
6386             stat, stat_dma);
6387     if (sfp)
6388         dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN,
6389             sfp, sfp_dma);
6390     if (rsp_payload)
6391         dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload),
6392             rsp_payload, rsp_payload_dma);
6393     if (rsp_els)
6394         dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els),
6395             rsp_els, rsp_els_dma);
6396 }
6397 
6398 void
6399 qla24xx_free_purex_item(struct purex_item *item)
6400 {
6401     if (item == &item->vha->default_item)
6402         memset(&item->vha->default_item, 0, sizeof(struct purex_item));
6403     else
6404         kfree(item);
6405 }
6406 
6407 void qla24xx_process_purex_list(struct purex_list *list)
6408 {
6409     struct list_head head = LIST_HEAD_INIT(head);
6410     struct purex_item *item, *next;
6411     ulong flags;
6412 
6413     spin_lock_irqsave(&list->lock, flags);
6414     list_splice_init(&list->head, &head);
6415     spin_unlock_irqrestore(&list->lock, flags);
6416 
6417     list_for_each_entry_safe(item, next, &head, list) {
6418         list_del(&item->list);
6419         item->process_item(item->vha, item);
6420         qla24xx_free_purex_item(item);
6421     }
6422 }
6423 
6424 /*
6425  * Context: task, can sleep
6426  */
6427 void
6428 qla83xx_idc_unlock(scsi_qla_host_t *base_vha, uint16_t requester_id)
6429 {
6430 #if 0
6431     uint16_t options = (requester_id << 15) | BIT_7;
6432 #endif
6433     uint16_t retry;
6434     uint32_t data;
6435     struct qla_hw_data *ha = base_vha->hw;
6436 
6437     might_sleep();
6438 
6439     /* IDC-unlock implementation using driver-unlock/lock-id
6440      * remote registers
6441      */
6442     retry = 0;
6443 retry_unlock:
6444     if (qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID, &data)
6445         == QLA_SUCCESS) {
6446         if (data == ha->portnum) {
6447             qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_UNLOCK, &data);
6448             /* Clearing lock-id by setting 0xff */
6449             qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID, 0xff);
6450         } else if (retry < 10) {
6451             /* SV: XXX: IDC unlock retrying needed here? */
6452 
6453             /* Retry for IDC-unlock */
6454             msleep(QLA83XX_WAIT_LOGIC_MS);
6455             retry++;
6456             ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
6457                 "Failed to release IDC lock, retrying=%d\n", retry);
6458             goto retry_unlock;
6459         }
6460     } else if (retry < 10) {
6461         /* Retry for IDC-unlock */
6462         msleep(QLA83XX_WAIT_LOGIC_MS);
6463         retry++;
6464         ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
6465             "Failed to read drv-lockid, retrying=%d\n", retry);
6466         goto retry_unlock;
6467     }
6468 
6469     return;
6470 
6471 #if 0
6472     /* XXX: IDC-unlock implementation using access-control mbx */
6473     retry = 0;
6474 retry_unlock2:
6475     if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) {
6476         if (retry < 10) {
6477             /* Retry for IDC-unlock */
6478             msleep(QLA83XX_WAIT_LOGIC_MS);
6479             retry++;
6480             ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
6481                 "Failed to release IDC lock, retrying=%d\n", retry);
6482             goto retry_unlock2;
6483         }
6484     }
6485 
6486     return;
6487 #endif
6488 }
6489 
6490 int
6491 __qla83xx_set_drv_presence(scsi_qla_host_t *vha)
6492 {
6493     int rval = QLA_SUCCESS;
6494     struct qla_hw_data *ha = vha->hw;
6495     uint32_t drv_presence;
6496 
6497     rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6498     if (rval == QLA_SUCCESS) {
6499         drv_presence |= (1 << ha->portnum);
6500         rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
6501             drv_presence);
6502     }
6503 
6504     return rval;
6505 }
6506 
6507 int
6508 qla83xx_set_drv_presence(scsi_qla_host_t *vha)
6509 {
6510     int rval = QLA_SUCCESS;
6511 
6512     qla83xx_idc_lock(vha, 0);
6513     rval = __qla83xx_set_drv_presence(vha);
6514     qla83xx_idc_unlock(vha, 0);
6515 
6516     return rval;
6517 }
6518 
6519 int
6520 __qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
6521 {
6522     int rval = QLA_SUCCESS;
6523     struct qla_hw_data *ha = vha->hw;
6524     uint32_t drv_presence;
6525 
6526     rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6527     if (rval == QLA_SUCCESS) {
6528         drv_presence &= ~(1 << ha->portnum);
6529         rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
6530             drv_presence);
6531     }
6532 
6533     return rval;
6534 }
6535 
6536 int
6537 qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
6538 {
6539     int rval = QLA_SUCCESS;
6540 
6541     qla83xx_idc_lock(vha, 0);
6542     rval = __qla83xx_clear_drv_presence(vha);
6543     qla83xx_idc_unlock(vha, 0);
6544 
6545     return rval;
6546 }
6547 
6548 static void
6549 qla83xx_need_reset_handler(scsi_qla_host_t *vha)
6550 {
6551     struct qla_hw_data *ha = vha->hw;
6552     uint32_t drv_ack, drv_presence;
6553     unsigned long ack_timeout;
6554 
6555     /* Wait for IDC ACK from all functions (DRV-ACK == DRV-PRESENCE) */
6556     ack_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
6557     while (1) {
6558         qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6559         qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6560         if ((drv_ack & drv_presence) == drv_presence)
6561             break;
6562 
6563         if (time_after_eq(jiffies, ack_timeout)) {
6564             ql_log(ql_log_warn, vha, 0xb067,
6565                 "RESET ACK TIMEOUT! drv_presence=0x%x "
6566                 "drv_ack=0x%x\n", drv_presence, drv_ack);
6567             /*
6568              * The function(s) which did not ack in time are forced
6569              * to withdraw any further participation in the IDC
6570              * reset.
6571              */
6572             if (drv_ack != drv_presence)
6573                 qla83xx_wr_reg(vha, QLA83XX_IDC_DRV_PRESENCE,
6574                     drv_ack);
6575             break;
6576         }
6577 
6578         qla83xx_idc_unlock(vha, 0);
6579         msleep(1000);
6580         qla83xx_idc_lock(vha, 0);
6581     }
6582 
6583     qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_COLD);
6584     ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
6585 }
6586 
6587 static int
6588 qla83xx_device_bootstrap(scsi_qla_host_t *vha)
6589 {
6590     int rval = QLA_SUCCESS;
6591     uint32_t idc_control;
6592 
6593     qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_INITIALIZING);
6594     ql_log(ql_log_info, vha, 0xb069, "HW State: INITIALIZING.\n");
6595 
6596     /* Clearing IDC-Control Graceful-Reset Bit before resetting f/w */
6597     __qla83xx_get_idc_control(vha, &idc_control);
6598     idc_control &= ~QLA83XX_IDC_GRACEFUL_RESET;
6599     __qla83xx_set_idc_control(vha, 0);
6600 
6601     qla83xx_idc_unlock(vha, 0);
6602     rval = qla83xx_restart_nic_firmware(vha);
6603     qla83xx_idc_lock(vha, 0);
6604 
6605     if (rval != QLA_SUCCESS) {
6606         ql_log(ql_log_fatal, vha, 0xb06a,
6607             "Failed to restart NIC f/w.\n");
6608         qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_FAILED);
6609         ql_log(ql_log_info, vha, 0xb06b, "HW State: FAILED.\n");
6610     } else {
6611         ql_dbg(ql_dbg_p3p, vha, 0xb06c,
6612             "Success in restarting nic f/w.\n");
6613         qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE, QLA8XXX_DEV_READY);
6614         ql_log(ql_log_info, vha, 0xb06d, "HW State: READY.\n");
6615     }
6616 
6617     return rval;
6618 }
6619 
6620 /* Assumes idc_lock always held on entry */
6621 int
6622 qla83xx_idc_state_handler(scsi_qla_host_t *base_vha)
6623 {
6624     struct qla_hw_data *ha = base_vha->hw;
6625     int rval = QLA_SUCCESS;
6626     unsigned long dev_init_timeout;
6627     uint32_t dev_state;
6628 
6629     /* Wait for MAX-INIT-TIMEOUT for the device to go ready */
6630     dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
6631 
6632     while (1) {
6633 
6634         if (time_after_eq(jiffies, dev_init_timeout)) {
6635             ql_log(ql_log_warn, base_vha, 0xb06e,
6636                 "Initialization TIMEOUT!\n");
6637             /* Init timeout. Disable further NIC Core
6638              * communication.
6639              */
6640             qla83xx_wr_reg(base_vha, QLA83XX_IDC_DEV_STATE,
6641                 QLA8XXX_DEV_FAILED);
6642             ql_log(ql_log_info, base_vha, 0xb06f,
6643                 "HW State: FAILED.\n");
6644         }
6645 
6646         qla83xx_rd_reg(base_vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6647         switch (dev_state) {
6648         case QLA8XXX_DEV_READY:
6649             if (ha->flags.nic_core_reset_owner)
6650                 qla83xx_idc_audit(base_vha,
6651                     IDC_AUDIT_COMPLETION);
6652             ha->flags.nic_core_reset_owner = 0;
6653             ql_dbg(ql_dbg_p3p, base_vha, 0xb070,
6654                 "Reset_owner reset by 0x%x.\n",
6655                 ha->portnum);
6656             goto exit;
6657         case QLA8XXX_DEV_COLD:
6658             if (ha->flags.nic_core_reset_owner)
6659                 rval = qla83xx_device_bootstrap(base_vha);
6660             else {
6661             /* Wait for AEN to change device-state */
6662                 qla83xx_idc_unlock(base_vha, 0);
6663                 msleep(1000);
6664                 qla83xx_idc_lock(base_vha, 0);
6665             }
6666             break;
6667         case QLA8XXX_DEV_INITIALIZING:
6668             /* Wait for AEN to change device-state */
6669             qla83xx_idc_unlock(base_vha, 0);
6670             msleep(1000);
6671             qla83xx_idc_lock(base_vha, 0);
6672             break;
6673         case QLA8XXX_DEV_NEED_RESET:
6674             if (!ql2xdontresethba && ha->flags.nic_core_reset_owner)
6675                 qla83xx_need_reset_handler(base_vha);
6676             else {
6677                 /* Wait for AEN to change device-state */
6678                 qla83xx_idc_unlock(base_vha, 0);
6679                 msleep(1000);
6680                 qla83xx_idc_lock(base_vha, 0);
6681             }
6682             /* reset timeout value after need reset handler */
6683             dev_init_timeout = jiffies +
6684                 (ha->fcoe_dev_init_timeout * HZ);
6685             break;
6686         case QLA8XXX_DEV_NEED_QUIESCENT:
6687             /* XXX: DEBUG for now */
6688             qla83xx_idc_unlock(base_vha, 0);
6689             msleep(1000);
6690             qla83xx_idc_lock(base_vha, 0);
6691             break;
6692         case QLA8XXX_DEV_QUIESCENT:
6693             /* XXX: DEBUG for now */
6694             if (ha->flags.quiesce_owner)
6695                 goto exit;
6696 
6697             qla83xx_idc_unlock(base_vha, 0);
6698             msleep(1000);
6699             qla83xx_idc_lock(base_vha, 0);
6700             dev_init_timeout = jiffies +
6701                 (ha->fcoe_dev_init_timeout * HZ);
6702             break;
6703         case QLA8XXX_DEV_FAILED:
6704             if (ha->flags.nic_core_reset_owner)
6705                 qla83xx_idc_audit(base_vha,
6706                     IDC_AUDIT_COMPLETION);
6707             ha->flags.nic_core_reset_owner = 0;
6708             __qla83xx_clear_drv_presence(base_vha);
6709             qla83xx_idc_unlock(base_vha, 0);
6710             qla8xxx_dev_failed_handler(base_vha);
6711             rval = QLA_FUNCTION_FAILED;
6712             qla83xx_idc_lock(base_vha, 0);
6713             goto exit;
6714         case QLA8XXX_BAD_VALUE:
6715             qla83xx_idc_unlock(base_vha, 0);
6716             msleep(1000);
6717             qla83xx_idc_lock(base_vha, 0);
6718             break;
6719         default:
6720             ql_log(ql_log_warn, base_vha, 0xb071,
6721                 "Unknown Device State: %x.\n", dev_state);
6722             qla83xx_idc_unlock(base_vha, 0);
6723             qla8xxx_dev_failed_handler(base_vha);
6724             rval = QLA_FUNCTION_FAILED;
6725             qla83xx_idc_lock(base_vha, 0);
6726             goto exit;
6727         }
6728     }
6729 
6730 exit:
6731     return rval;
6732 }
6733 
6734 void
6735 qla2x00_disable_board_on_pci_error(struct work_struct *work)
6736 {
6737     struct qla_hw_data *ha = container_of(work, struct qla_hw_data,
6738         board_disable);
6739     struct pci_dev *pdev = ha->pdev;
6740     scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6741 
6742     ql_log(ql_log_warn, base_vha, 0x015b,
6743         "Disabling adapter.\n");
6744 
6745     if (!atomic_read(&pdev->enable_cnt)) {
6746         ql_log(ql_log_info, base_vha, 0xfffc,
6747             "PCI device disabled, no action req for PCI error=%lx\n",
6748             base_vha->pci_flags);
6749         return;
6750     }
6751 
6752     /*
6753      * if UNLOADING flag is already set, then continue unload,
6754      * where it was set first.
6755      */
6756     if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
6757         return;
6758 
6759     qla2x00_wait_for_sess_deletion(base_vha);
6760 
6761     qla2x00_delete_all_vps(ha, base_vha);
6762 
6763     qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
6764 
6765     qla2x00_dfs_remove(base_vha);
6766 
6767     qla84xx_put_chip(base_vha);
6768 
6769     if (base_vha->timer_active)
6770         qla2x00_stop_timer(base_vha);
6771 
6772     base_vha->flags.online = 0;
6773 
6774     qla2x00_destroy_deferred_work(ha);
6775 
6776     /*
6777      * Do not try to stop beacon blink as it will issue a mailbox
6778      * command.
6779      */
6780     qla2x00_free_sysfs_attr(base_vha, false);
6781 
6782     fc_remove_host(base_vha->host);
6783 
6784     scsi_remove_host(base_vha->host);
6785 
6786     base_vha->flags.init_done = 0;
6787     qla25xx_delete_queues(base_vha);
6788     qla2x00_free_fcports(base_vha);
6789     qla2x00_free_irqs(base_vha);
6790     qla2x00_mem_free(ha);
6791     qla82xx_md_free(base_vha);
6792     qla2x00_free_queues(ha);
6793 
6794     qla2x00_unmap_iobases(ha);
6795 
6796     pci_release_selected_regions(ha->pdev, ha->bars);
6797     pci_disable_pcie_error_reporting(pdev);
6798     pci_disable_device(pdev);
6799 
6800     /*
6801      * Let qla2x00_remove_one cleanup qla_hw_data on device removal.
6802      */
6803 }
6804 
6805 /**************************************************************************
6806 * qla2x00_do_dpc
6807 *   This kernel thread is a task that is schedule by the interrupt handler
6808 *   to perform the background processing for interrupts.
6809 *
6810 * Notes:
6811 * This task always run in the context of a kernel thread.  It
6812 * is kick-off by the driver's detect code and starts up
6813 * up one per adapter. It immediately goes to sleep and waits for
6814 * some fibre event.  When either the interrupt handler or
6815 * the timer routine detects a event it will one of the task
6816 * bits then wake us up.
6817 **************************************************************************/
6818 static int
6819 qla2x00_do_dpc(void *data)
6820 {
6821     scsi_qla_host_t *base_vha;
6822     struct qla_hw_data *ha;
6823     uint32_t online;
6824     struct qla_qpair *qpair;
6825 
6826     ha = (struct qla_hw_data *)data;
6827     base_vha = pci_get_drvdata(ha->pdev);
6828 
6829     set_user_nice(current, MIN_NICE);
6830 
6831     set_current_state(TASK_INTERRUPTIBLE);
6832     while (!kthread_should_stop()) {
6833         ql_dbg(ql_dbg_dpc, base_vha, 0x4000,
6834             "DPC handler sleeping.\n");
6835 
6836         schedule();
6837 
6838         if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags))
6839             qla_pci_set_eeh_busy(base_vha);
6840 
6841         if (!base_vha->flags.init_done || ha->flags.mbox_busy)
6842             goto end_loop;
6843 
6844         if (ha->flags.eeh_busy) {
6845             ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
6846                 "eeh_busy=%d.\n", ha->flags.eeh_busy);
6847             goto end_loop;
6848         }
6849 
6850         ha->dpc_active = 1;
6851 
6852         ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
6853             "DPC handler waking up, dpc_flags=0x%lx.\n",
6854             base_vha->dpc_flags);
6855 
6856         if (test_bit(UNLOADING, &base_vha->dpc_flags))
6857             break;
6858 
6859         if (IS_P3P_TYPE(ha)) {
6860             if (IS_QLA8044(ha)) {
6861                 if (test_and_clear_bit(ISP_UNRECOVERABLE,
6862                     &base_vha->dpc_flags)) {
6863                     qla8044_idc_lock(ha);
6864                     qla8044_wr_direct(base_vha,
6865                         QLA8044_CRB_DEV_STATE_INDEX,
6866                         QLA8XXX_DEV_FAILED);
6867                     qla8044_idc_unlock(ha);
6868                     ql_log(ql_log_info, base_vha, 0x4004,
6869                         "HW State: FAILED.\n");
6870                     qla8044_device_state_handler(base_vha);
6871                     continue;
6872                 }
6873 
6874             } else {
6875                 if (test_and_clear_bit(ISP_UNRECOVERABLE,
6876                     &base_vha->dpc_flags)) {
6877                     qla82xx_idc_lock(ha);
6878                     qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
6879                         QLA8XXX_DEV_FAILED);
6880                     qla82xx_idc_unlock(ha);
6881                     ql_log(ql_log_info, base_vha, 0x0151,
6882                         "HW State: FAILED.\n");
6883                     qla82xx_device_state_handler(base_vha);
6884                     continue;
6885                 }
6886             }
6887 
6888             if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
6889                 &base_vha->dpc_flags)) {
6890 
6891                 ql_dbg(ql_dbg_dpc, base_vha, 0x4005,
6892                     "FCoE context reset scheduled.\n");
6893                 if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
6894                     &base_vha->dpc_flags))) {
6895                     if (qla82xx_fcoe_ctx_reset(base_vha)) {
6896                         /* FCoE-ctx reset failed.
6897                          * Escalate to chip-reset
6898                          */
6899                         set_bit(ISP_ABORT_NEEDED,
6900                             &base_vha->dpc_flags);
6901                     }
6902                     clear_bit(ABORT_ISP_ACTIVE,
6903                         &base_vha->dpc_flags);
6904                 }
6905 
6906                 ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
6907                     "FCoE context reset end.\n");
6908             }
6909         } else if (IS_QLAFX00(ha)) {
6910             if (test_and_clear_bit(ISP_UNRECOVERABLE,
6911                 &base_vha->dpc_flags)) {
6912                 ql_dbg(ql_dbg_dpc, base_vha, 0x4020,
6913                     "Firmware Reset Recovery\n");
6914                 if (qlafx00_reset_initialize(base_vha)) {
6915                     /* Failed. Abort isp later. */
6916                     if (!test_bit(UNLOADING,
6917                         &base_vha->dpc_flags)) {
6918                         set_bit(ISP_UNRECOVERABLE,
6919                             &base_vha->dpc_flags);
6920                         ql_dbg(ql_dbg_dpc, base_vha,
6921                             0x4021,
6922                             "Reset Recovery Failed\n");
6923                     }
6924                 }
6925             }
6926 
6927             if (test_and_clear_bit(FX00_TARGET_SCAN,
6928                 &base_vha->dpc_flags)) {
6929                 ql_dbg(ql_dbg_dpc, base_vha, 0x4022,
6930                     "ISPFx00 Target Scan scheduled\n");
6931                 if (qlafx00_rescan_isp(base_vha)) {
6932                     if (!test_bit(UNLOADING,
6933                         &base_vha->dpc_flags))
6934                         set_bit(ISP_UNRECOVERABLE,
6935                             &base_vha->dpc_flags);
6936                     ql_dbg(ql_dbg_dpc, base_vha, 0x401e,
6937                         "ISPFx00 Target Scan Failed\n");
6938                 }
6939                 ql_dbg(ql_dbg_dpc, base_vha, 0x401f,
6940                     "ISPFx00 Target Scan End\n");
6941             }
6942             if (test_and_clear_bit(FX00_HOST_INFO_RESEND,
6943                 &base_vha->dpc_flags)) {
6944                 ql_dbg(ql_dbg_dpc, base_vha, 0x4023,
6945                     "ISPFx00 Host Info resend scheduled\n");
6946                 qlafx00_fx_disc(base_vha,
6947                     &base_vha->hw->mr.fcport,
6948                     FXDISC_REG_HOST_INFO);
6949             }
6950         }
6951 
6952         if (test_and_clear_bit(DETECT_SFP_CHANGE,
6953             &base_vha->dpc_flags)) {
6954             /* Semantic:
6955              *  - NO-OP -- await next ISP-ABORT. Preferred method
6956              *             to minimize disruptions that will occur
6957              *             when a forced chip-reset occurs.
6958              *  - Force -- ISP-ABORT scheduled.
6959              */
6960             /* set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); */
6961         }
6962 
6963         if (test_and_clear_bit
6964             (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
6965             !test_bit(UNLOADING, &base_vha->dpc_flags)) {
6966             bool do_reset = true;
6967 
6968             switch (base_vha->qlini_mode) {
6969             case QLA2XXX_INI_MODE_ENABLED:
6970                 break;
6971             case QLA2XXX_INI_MODE_DISABLED:
6972                 if (!qla_tgt_mode_enabled(base_vha) &&
6973                     !ha->flags.fw_started)
6974                     do_reset = false;
6975                 break;
6976             case QLA2XXX_INI_MODE_DUAL:
6977                 if (!qla_dual_mode_enabled(base_vha) &&
6978                     !ha->flags.fw_started)
6979                     do_reset = false;
6980                 break;
6981             default:
6982                 break;
6983             }
6984 
6985             if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
6986                 &base_vha->dpc_flags))) {
6987                 base_vha->flags.online = 1;
6988                 ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
6989                     "ISP abort scheduled.\n");
6990                 if (ha->isp_ops->abort_isp(base_vha)) {
6991                     /* failed. retry later */
6992                     set_bit(ISP_ABORT_NEEDED,
6993                         &base_vha->dpc_flags);
6994                 }
6995                 clear_bit(ABORT_ISP_ACTIVE,
6996                         &base_vha->dpc_flags);
6997                 ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
6998                     "ISP abort end.\n");
6999             }
7000         }
7001 
7002         if (test_bit(PROCESS_PUREX_IOCB, &base_vha->dpc_flags)) {
7003             if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
7004                 qla24xx_process_purex_list
7005                     (&base_vha->purex_list);
7006                 clear_bit(PROCESS_PUREX_IOCB,
7007                     &base_vha->dpc_flags);
7008             }
7009         }
7010 
7011         if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
7012             &base_vha->dpc_flags)) {
7013             qla2x00_update_fcports(base_vha);
7014         }
7015 
7016         if (IS_QLAFX00(ha))
7017             goto loop_resync_check;
7018 
7019         if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
7020             ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
7021                 "Quiescence mode scheduled.\n");
7022             if (IS_P3P_TYPE(ha)) {
7023                 if (IS_QLA82XX(ha))
7024                     qla82xx_device_state_handler(base_vha);
7025                 if (IS_QLA8044(ha))
7026                     qla8044_device_state_handler(base_vha);
7027                 clear_bit(ISP_QUIESCE_NEEDED,
7028                     &base_vha->dpc_flags);
7029                 if (!ha->flags.quiesce_owner) {
7030                     qla2x00_perform_loop_resync(base_vha);
7031                     if (IS_QLA82XX(ha)) {
7032                         qla82xx_idc_lock(ha);
7033                         qla82xx_clear_qsnt_ready(
7034                             base_vha);
7035                         qla82xx_idc_unlock(ha);
7036                     } else if (IS_QLA8044(ha)) {
7037                         qla8044_idc_lock(ha);
7038                         qla8044_clear_qsnt_ready(
7039                             base_vha);
7040                         qla8044_idc_unlock(ha);
7041                     }
7042                 }
7043             } else {
7044                 clear_bit(ISP_QUIESCE_NEEDED,
7045                     &base_vha->dpc_flags);
7046                 qla2x00_quiesce_io(base_vha);
7047             }
7048             ql_dbg(ql_dbg_dpc, base_vha, 0x400a,
7049                 "Quiescence mode end.\n");
7050         }
7051 
7052         if (test_and_clear_bit(RESET_MARKER_NEEDED,
7053                 &base_vha->dpc_flags) &&
7054             (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
7055 
7056             ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
7057                 "Reset marker scheduled.\n");
7058             qla2x00_rst_aen(base_vha);
7059             clear_bit(RESET_ACTIVE, &base_vha->dpc_flags);
7060             ql_dbg(ql_dbg_dpc, base_vha, 0x400c,
7061                 "Reset marker end.\n");
7062         }
7063 
7064         /* Retry each device up to login retry count */
7065         if (test_bit(RELOGIN_NEEDED, &base_vha->dpc_flags) &&
7066             !test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags) &&
7067             atomic_read(&base_vha->loop_state) != LOOP_DOWN) {
7068 
7069             if (!base_vha->relogin_jif ||
7070                 time_after_eq(jiffies, base_vha->relogin_jif)) {
7071                 base_vha->relogin_jif = jiffies + HZ;
7072                 clear_bit(RELOGIN_NEEDED, &base_vha->dpc_flags);
7073 
7074                 ql_dbg(ql_dbg_disc, base_vha, 0x400d,
7075                     "Relogin scheduled.\n");
7076                 qla24xx_post_relogin_work(base_vha);
7077             }
7078         }
7079 loop_resync_check:
7080         if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
7081             &base_vha->dpc_flags)) {
7082 
7083             ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
7084                 "Loop resync scheduled.\n");
7085 
7086             if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
7087                 &base_vha->dpc_flags))) {
7088 
7089                 qla2x00_loop_resync(base_vha);
7090 
7091                 clear_bit(LOOP_RESYNC_ACTIVE,
7092                         &base_vha->dpc_flags);
7093             }
7094 
7095             ql_dbg(ql_dbg_dpc, base_vha, 0x4010,
7096                 "Loop resync end.\n");
7097         }
7098 
7099         if (IS_QLAFX00(ha))
7100             goto intr_on_check;
7101 
7102         if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
7103             atomic_read(&base_vha->loop_state) == LOOP_READY) {
7104             clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
7105             qla2xxx_flash_npiv_conf(base_vha);
7106         }
7107 
7108 intr_on_check:
7109         if (!ha->interrupts_on)
7110             ha->isp_ops->enable_intrs(ha);
7111 
7112         if (test_and_clear_bit(BEACON_BLINK_NEEDED,
7113                     &base_vha->dpc_flags)) {
7114             if (ha->beacon_blink_led == 1)
7115                 ha->isp_ops->beacon_blink(base_vha);
7116         }
7117 
7118         /* qpair online check */
7119         if (test_and_clear_bit(QPAIR_ONLINE_CHECK_NEEDED,
7120             &base_vha->dpc_flags)) {
7121             if (ha->flags.eeh_busy ||
7122                 ha->flags.pci_channel_io_perm_failure)
7123                 online = 0;
7124             else
7125                 online = 1;
7126 
7127             mutex_lock(&ha->mq_lock);
7128             list_for_each_entry(qpair, &base_vha->qp_list,
7129                 qp_list_elem)
7130             qpair->online = online;
7131             mutex_unlock(&ha->mq_lock);
7132         }
7133 
7134         if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
7135                        &base_vha->dpc_flags)) {
7136             u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold;
7137 
7138             if (threshold > ha->orig_fw_xcb_count)
7139                 threshold = ha->orig_fw_xcb_count;
7140 
7141             ql_log(ql_log_info, base_vha, 0xffffff,
7142                    "SET ZIO Activity exchange threshold to %d.\n",
7143                    threshold);
7144             if (qla27xx_set_zio_threshold(base_vha, threshold)) {
7145                 ql_log(ql_log_info, base_vha, 0xffffff,
7146                        "Unable to SET ZIO Activity exchange threshold to %d.\n",
7147                        threshold);
7148             }
7149         }
7150 
7151         if (!IS_QLAFX00(ha))
7152             qla2x00_do_dpc_all_vps(base_vha);
7153 
7154         if (test_and_clear_bit(N2N_LINK_RESET,
7155             &base_vha->dpc_flags)) {
7156             qla2x00_lip_reset(base_vha);
7157         }
7158 
7159         ha->dpc_active = 0;
7160 end_loop:
7161         set_current_state(TASK_INTERRUPTIBLE);
7162     } /* End of while(1) */
7163     __set_current_state(TASK_RUNNING);
7164 
7165     ql_dbg(ql_dbg_dpc, base_vha, 0x4011,
7166         "DPC handler exiting.\n");
7167 
7168     /*
7169      * Make sure that nobody tries to wake us up again.
7170      */
7171     ha->dpc_active = 0;
7172 
7173     /* Cleanup any residual CTX SRBs. */
7174     qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
7175 
7176     return 0;
7177 }
7178 
7179 void
7180 qla2xxx_wake_dpc(struct scsi_qla_host *vha)
7181 {
7182     struct qla_hw_data *ha = vha->hw;
7183     struct task_struct *t = ha->dpc_thread;
7184 
7185     if (!test_bit(UNLOADING, &vha->dpc_flags) && t)
7186         wake_up_process(t);
7187 }
7188 
7189 /*
7190 *  qla2x00_rst_aen
7191 *      Processes asynchronous reset.
7192 *
7193 * Input:
7194 *      ha  = adapter block pointer.
7195 */
7196 static void
7197 qla2x00_rst_aen(scsi_qla_host_t *vha)
7198 {
7199     if (vha->flags.online && !vha->flags.reset_active &&
7200         !atomic_read(&vha->loop_down_timer) &&
7201         !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) {
7202         do {
7203             clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
7204 
7205             /*
7206              * Issue marker command only when we are going to start
7207              * the I/O.
7208              */
7209             vha->marker_needed = 1;
7210         } while (!atomic_read(&vha->loop_down_timer) &&
7211             (test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)));
7212     }
7213 }
7214 
7215 static bool qla_do_heartbeat(struct scsi_qla_host *vha)
7216 {
7217     struct qla_hw_data *ha = vha->hw;
7218     u32 cmpl_cnt;
7219     u16 i;
7220     bool do_heartbeat = false;
7221 
7222     /*
7223      * Allow do_heartbeat only if we don’t have any active interrupts,
7224      * but there are still IOs outstanding with firmware.
7225      */
7226     cmpl_cnt = ha->base_qpair->cmd_completion_cnt;
7227     if (cmpl_cnt == ha->base_qpair->prev_completion_cnt &&
7228         cmpl_cnt != ha->base_qpair->cmd_cnt) {
7229         do_heartbeat = true;
7230         goto skip;
7231     }
7232     ha->base_qpair->prev_completion_cnt = cmpl_cnt;
7233 
7234     for (i = 0; i < ha->max_qpairs; i++) {
7235         if (ha->queue_pair_map[i]) {
7236             cmpl_cnt = ha->queue_pair_map[i]->cmd_completion_cnt;
7237             if (cmpl_cnt == ha->queue_pair_map[i]->prev_completion_cnt &&
7238                 cmpl_cnt != ha->queue_pair_map[i]->cmd_cnt) {
7239                 do_heartbeat = true;
7240                 break;
7241             }
7242             ha->queue_pair_map[i]->prev_completion_cnt = cmpl_cnt;
7243         }
7244     }
7245 
7246 skip:
7247     return do_heartbeat;
7248 }
7249 
7250 static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started)
7251 {
7252     struct qla_hw_data *ha = vha->hw;
7253 
7254     if (vha->vp_idx)
7255         return;
7256 
7257     if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha))
7258         return;
7259 
7260     /*
7261      * dpc thread cannot run if heartbeat is running at the same time.
7262      * We also do not want to starve heartbeat task. Therefore, do
7263      * heartbeat task at least once every 5 seconds.
7264      */
7265     if (dpc_started &&
7266         time_before(jiffies, ha->last_heartbeat_run_jiffies + 5 * HZ))
7267         return;
7268 
7269     if (qla_do_heartbeat(vha)) {
7270         ha->last_heartbeat_run_jiffies = jiffies;
7271         queue_work(ha->wq, &ha->heartbeat_work);
7272     }
7273 }
7274 
7275 static void qla_wind_down_chip(scsi_qla_host_t *vha)
7276 {
7277     struct qla_hw_data *ha = vha->hw;
7278 
7279     if (!ha->flags.eeh_busy)
7280         return;
7281     if (ha->pci_error_state)
7282         /* system is trying to recover */
7283         return;
7284 
7285     /*
7286      * Current system is not handling PCIE error.  At this point, this is
7287      * best effort to wind down the adapter.
7288      */
7289     if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) &&
7290         !ha->flags.eeh_flush) {
7291         ql_log(ql_log_info, vha, 0x9009,
7292             "PCI Error detected, attempting to reset hardware.\n");
7293 
7294         ha->isp_ops->reset_chip(vha);
7295         ha->isp_ops->disable_intrs(ha);
7296 
7297         ha->flags.eeh_flush = EEH_FLUSH_RDY;
7298         ha->eeh_jif = jiffies;
7299 
7300     } else if (ha->flags.eeh_flush == EEH_FLUSH_RDY &&
7301         time_after_eq(jiffies, ha->eeh_jif +  5 * HZ)) {
7302         pci_clear_master(ha->pdev);
7303 
7304         /* flush all command */
7305         qla2x00_abort_isp_cleanup(vha);
7306         ha->flags.eeh_flush = EEH_FLUSH_DONE;
7307 
7308         ql_log(ql_log_info, vha, 0x900a,
7309             "PCI Error handling complete, all IOs aborted.\n");
7310     }
7311 }
7312 
7313 /**************************************************************************
7314 *   qla2x00_timer
7315 *
7316 * Description:
7317 *   One second timer
7318 *
7319 * Context: Interrupt
7320 ***************************************************************************/
7321 void
7322 qla2x00_timer(struct timer_list *t)
7323 {
7324     scsi_qla_host_t *vha = from_timer(vha, t, timer);
7325     unsigned long   cpu_flags = 0;
7326     int     start_dpc = 0;
7327     int     index;
7328     srb_t       *sp;
7329     uint16_t        w;
7330     struct qla_hw_data *ha = vha->hw;
7331     struct req_que *req;
7332     unsigned long flags;
7333     fc_port_t *fcport = NULL;
7334 
7335     if (ha->flags.eeh_busy) {
7336         qla_wind_down_chip(vha);
7337 
7338         ql_dbg(ql_dbg_timer, vha, 0x6000,
7339             "EEH = %d, restarting timer.\n",
7340             ha->flags.eeh_busy);
7341         qla2x00_restart_timer(vha, WATCH_INTERVAL);
7342         return;
7343     }
7344 
7345     /*
7346      * Hardware read to raise pending EEH errors during mailbox waits. If
7347      * the read returns -1 then disable the board.
7348      */
7349     if (!pci_channel_offline(ha->pdev)) {
7350         pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
7351         qla2x00_check_reg16_for_disconnect(vha, w);
7352     }
7353 
7354     /* Make sure qla82xx_watchdog is run only for physical port */
7355     if (!vha->vp_idx && IS_P3P_TYPE(ha)) {
7356         if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
7357             start_dpc++;
7358         if (IS_QLA82XX(ha))
7359             qla82xx_watchdog(vha);
7360         else if (IS_QLA8044(ha))
7361             qla8044_watchdog(vha);
7362     }
7363 
7364     if (!vha->vp_idx && IS_QLAFX00(ha))
7365         qlafx00_timer_routine(vha);
7366 
7367     if (vha->link_down_time < QLA2XX_MAX_LINK_DOWN_TIME)
7368         vha->link_down_time++;
7369 
7370     spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
7371     list_for_each_entry(fcport, &vha->vp_fcports, list) {
7372         if (fcport->tgt_link_down_time < QLA2XX_MAX_LINK_DOWN_TIME)
7373             fcport->tgt_link_down_time++;
7374     }
7375     spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
7376 
7377     /* Loop down handler. */
7378     if (atomic_read(&vha->loop_down_timer) > 0 &&
7379         !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
7380         !(test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))
7381         && vha->flags.online) {
7382 
7383         if (atomic_read(&vha->loop_down_timer) ==
7384             vha->loop_down_abort_time) {
7385 
7386             ql_log(ql_log_info, vha, 0x6008,
7387                 "Loop down - aborting the queues before time expires.\n");
7388 
7389             if (!IS_QLA2100(ha) && vha->link_down_timeout)
7390                 atomic_set(&vha->loop_state, LOOP_DEAD);
7391 
7392             /*
7393              * Schedule an ISP abort to return any FCP2-device
7394              * commands.
7395              */
7396             /* NPIV - scan physical port only */
7397             if (!vha->vp_idx) {
7398                 spin_lock_irqsave(&ha->hardware_lock,
7399                     cpu_flags);
7400                 req = ha->req_q_map[0];
7401                 for (index = 1;
7402                     index < req->num_outstanding_cmds;
7403                     index++) {
7404                     fc_port_t *sfcp;
7405 
7406                     sp = req->outstanding_cmds[index];
7407                     if (!sp)
7408                         continue;
7409                     if (sp->cmd_type != TYPE_SRB)
7410                         continue;
7411                     if (sp->type != SRB_SCSI_CMD)
7412                         continue;
7413                     sfcp = sp->fcport;
7414                     if (!(sfcp->flags & FCF_FCP2_DEVICE))
7415                         continue;
7416 
7417                     if (IS_QLA82XX(ha))
7418                         set_bit(FCOE_CTX_RESET_NEEDED,
7419                             &vha->dpc_flags);
7420                     else
7421                         set_bit(ISP_ABORT_NEEDED,
7422                             &vha->dpc_flags);
7423                     break;
7424                 }
7425                 spin_unlock_irqrestore(&ha->hardware_lock,
7426                                 cpu_flags);
7427             }
7428             start_dpc++;
7429         }
7430 
7431         /* if the loop has been down for 4 minutes, reinit adapter */
7432         if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
7433             if (!(vha->device_flags & DFLG_NO_CABLE)) {
7434                 ql_log(ql_log_warn, vha, 0x6009,
7435                     "Loop down - aborting ISP.\n");
7436 
7437                 if (IS_QLA82XX(ha))
7438                     set_bit(FCOE_CTX_RESET_NEEDED,
7439                         &vha->dpc_flags);
7440                 else
7441                     set_bit(ISP_ABORT_NEEDED,
7442                         &vha->dpc_flags);
7443             }
7444         }
7445         ql_dbg(ql_dbg_timer, vha, 0x600a,
7446             "Loop down - seconds remaining %d.\n",
7447             atomic_read(&vha->loop_down_timer));
7448     }
7449     /* Check if beacon LED needs to be blinked for physical host only */
7450     if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
7451         /* There is no beacon_blink function for ISP82xx */
7452         if (!IS_P3P_TYPE(ha)) {
7453             set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
7454             start_dpc++;
7455         }
7456     }
7457 
7458     /* check if edif running */
7459     if (vha->hw->flags.edif_enabled)
7460         qla_edif_timer(vha);
7461 
7462     /* Process any deferred work. */
7463     if (!list_empty(&vha->work_list)) {
7464         unsigned long flags;
7465         bool q = false;
7466 
7467         spin_lock_irqsave(&vha->work_lock, flags);
7468         if (!test_and_set_bit(IOCB_WORK_ACTIVE, &vha->dpc_flags))
7469             q = true;
7470         spin_unlock_irqrestore(&vha->work_lock, flags);
7471         if (q)
7472             queue_work(vha->hw->wq, &vha->iocb_work);
7473     }
7474 
7475     /*
7476      * FC-NVME
7477      * see if the active AEN count has changed from what was last reported.
7478      */
7479     index = atomic_read(&ha->nvme_active_aen_cnt);
7480     if (!vha->vp_idx &&
7481         (index != ha->nvme_last_rptd_aen) &&
7482         ha->zio_mode == QLA_ZIO_MODE_6 &&
7483         !ha->flags.host_shutting_down) {
7484         ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
7485         ql_log(ql_log_info, vha, 0x3002,
7486             "nvme: Sched: Set ZIO exchange threshold to %d.\n",
7487             ha->nvme_last_rptd_aen);
7488         set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
7489         start_dpc++;
7490     }
7491 
7492     if (!vha->vp_idx &&
7493         atomic_read(&ha->zio_threshold) != ha->last_zio_threshold &&
7494         IS_ZIO_THRESHOLD_CAPABLE(ha)) {
7495         ql_log(ql_log_info, vha, 0x3002,
7496             "Sched: Set ZIO exchange threshold to %d.\n",
7497             ha->last_zio_threshold);
7498         ha->last_zio_threshold = atomic_read(&ha->zio_threshold);
7499         set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
7500         start_dpc++;
7501     }
7502 
7503     /* borrowing w to signify dpc will run */
7504     w = 0;
7505     /* Schedule the DPC routine if needed */
7506     if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
7507         test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
7508         test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
7509         start_dpc ||
7510         test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
7511         test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
7512         test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
7513         test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
7514         test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
7515         test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
7516         test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) {
7517         ql_dbg(ql_dbg_timer, vha, 0x600b,
7518             "isp_abort_needed=%d loop_resync_needed=%d "
7519             "fcport_update_needed=%d start_dpc=%d "
7520             "reset_marker_needed=%d",
7521             test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
7522             test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
7523             test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
7524             start_dpc,
7525             test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
7526         ql_dbg(ql_dbg_timer, vha, 0x600c,
7527             "beacon_blink_needed=%d isp_unrecoverable=%d "
7528             "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
7529             "relogin_needed=%d, Process_purex_iocb=%d.\n",
7530             test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
7531             test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
7532             test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
7533             test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
7534             test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
7535             test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags));
7536         qla2xxx_wake_dpc(vha);
7537         w = 1;
7538     }
7539 
7540     qla_heart_beat(vha, w);
7541 
7542     qla2x00_restart_timer(vha, WATCH_INTERVAL);
7543 }
7544 
7545 /* Firmware interface routines. */
7546 
7547 #define FW_ISP21XX  0
7548 #define FW_ISP22XX  1
7549 #define FW_ISP2300  2
7550 #define FW_ISP2322  3
7551 #define FW_ISP24XX  4
7552 #define FW_ISP25XX  5
7553 #define FW_ISP81XX  6
7554 #define FW_ISP82XX  7
7555 #define FW_ISP2031  8
7556 #define FW_ISP8031  9
7557 #define FW_ISP27XX  10
7558 #define FW_ISP28XX  11
7559 
7560 #define FW_FILE_ISP21XX "ql2100_fw.bin"
7561 #define FW_FILE_ISP22XX "ql2200_fw.bin"
7562 #define FW_FILE_ISP2300 "ql2300_fw.bin"
7563 #define FW_FILE_ISP2322 "ql2322_fw.bin"
7564 #define FW_FILE_ISP24XX "ql2400_fw.bin"
7565 #define FW_FILE_ISP25XX "ql2500_fw.bin"
7566 #define FW_FILE_ISP81XX "ql8100_fw.bin"
7567 #define FW_FILE_ISP82XX "ql8200_fw.bin"
7568 #define FW_FILE_ISP2031 "ql2600_fw.bin"
7569 #define FW_FILE_ISP8031 "ql8300_fw.bin"
7570 #define FW_FILE_ISP27XX "ql2700_fw.bin"
7571 #define FW_FILE_ISP28XX "ql2800_fw.bin"
7572 
7573 
7574 static DEFINE_MUTEX(qla_fw_lock);
7575 
7576 static struct fw_blob qla_fw_blobs[] = {
7577     { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
7578     { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
7579     { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
7580     { .name = FW_FILE_ISP2322, .segs = { 0x800, 0x1c000, 0x1e000, 0 }, },
7581     { .name = FW_FILE_ISP24XX, },
7582     { .name = FW_FILE_ISP25XX, },
7583     { .name = FW_FILE_ISP81XX, },
7584     { .name = FW_FILE_ISP82XX, },
7585     { .name = FW_FILE_ISP2031, },
7586     { .name = FW_FILE_ISP8031, },
7587     { .name = FW_FILE_ISP27XX, },
7588     { .name = FW_FILE_ISP28XX, },
7589     { .name = NULL, },
7590 };
7591 
7592 struct fw_blob *
7593 qla2x00_request_firmware(scsi_qla_host_t *vha)
7594 {
7595     struct qla_hw_data *ha = vha->hw;
7596     struct fw_blob *blob;
7597 
7598     if (IS_QLA2100(ha)) {
7599         blob = &qla_fw_blobs[FW_ISP21XX];
7600     } else if (IS_QLA2200(ha)) {
7601         blob = &qla_fw_blobs[FW_ISP22XX];
7602     } else if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
7603         blob = &qla_fw_blobs[FW_ISP2300];
7604     } else if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
7605         blob = &qla_fw_blobs[FW_ISP2322];
7606     } else if (IS_QLA24XX_TYPE(ha)) {
7607         blob = &qla_fw_blobs[FW_ISP24XX];
7608     } else if (IS_QLA25XX(ha)) {
7609         blob = &qla_fw_blobs[FW_ISP25XX];
7610     } else if (IS_QLA81XX(ha)) {
7611         blob = &qla_fw_blobs[FW_ISP81XX];
7612     } else if (IS_QLA82XX(ha)) {
7613         blob = &qla_fw_blobs[FW_ISP82XX];
7614     } else if (IS_QLA2031(ha)) {
7615         blob = &qla_fw_blobs[FW_ISP2031];
7616     } else if (IS_QLA8031(ha)) {
7617         blob = &qla_fw_blobs[FW_ISP8031];
7618     } else if (IS_QLA27XX(ha)) {
7619         blob = &qla_fw_blobs[FW_ISP27XX];
7620     } else if (IS_QLA28XX(ha)) {
7621         blob = &qla_fw_blobs[FW_ISP28XX];
7622     } else {
7623         return NULL;
7624     }
7625 
7626     if (!blob->name)
7627         return NULL;
7628 
7629     mutex_lock(&qla_fw_lock);
7630     if (blob->fw)
7631         goto out;
7632 
7633     if (request_firmware(&blob->fw, blob->name, &ha->pdev->dev)) {
7634         ql_log(ql_log_warn, vha, 0x0063,
7635             "Failed to load firmware image (%s).\n", blob->name);
7636         blob->fw = NULL;
7637         blob = NULL;
7638     }
7639 
7640 out:
7641     mutex_unlock(&qla_fw_lock);
7642     return blob;
7643 }
7644 
7645 static void
7646 qla2x00_release_firmware(void)
7647 {
7648     struct fw_blob *blob;
7649 
7650     mutex_lock(&qla_fw_lock);
7651     for (blob = qla_fw_blobs; blob->name; blob++)
7652         release_firmware(blob->fw);
7653     mutex_unlock(&qla_fw_lock);
7654 }
7655 
7656 static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
7657 {
7658     struct qla_hw_data *ha = vha->hw;
7659     scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
7660     struct qla_qpair *qpair = NULL;
7661     struct scsi_qla_host *vp, *tvp;
7662     fc_port_t *fcport;
7663     int i;
7664     unsigned long flags;
7665 
7666     ql_dbg(ql_dbg_aer, vha, 0x9000,
7667            "%s\n", __func__);
7668     ha->chip_reset++;
7669 
7670     ha->base_qpair->chip_reset = ha->chip_reset;
7671     for (i = 0; i < ha->max_qpairs; i++) {
7672         if (ha->queue_pair_map[i])
7673             ha->queue_pair_map[i]->chip_reset =
7674                 ha->base_qpair->chip_reset;
7675     }
7676 
7677     /*
7678      * purge mailbox might take a while. Slot Reset/chip reset
7679      * will take care of the purge
7680      */
7681 
7682     mutex_lock(&ha->mq_lock);
7683     ha->base_qpair->online = 0;
7684     list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
7685         qpair->online = 0;
7686     wmb();
7687     mutex_unlock(&ha->mq_lock);
7688 
7689     qla2x00_mark_all_devices_lost(vha);
7690 
7691     spin_lock_irqsave(&ha->vport_slock, flags);
7692     list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7693         atomic_inc(&vp->vref_count);
7694         spin_unlock_irqrestore(&ha->vport_slock, flags);
7695         qla2x00_mark_all_devices_lost(vp);
7696         spin_lock_irqsave(&ha->vport_slock, flags);
7697         atomic_dec(&vp->vref_count);
7698     }
7699     spin_unlock_irqrestore(&ha->vport_slock, flags);
7700 
7701     /* Clear all async request states across all VPs. */
7702     list_for_each_entry(fcport, &vha->vp_fcports, list)
7703         fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7704 
7705     spin_lock_irqsave(&ha->vport_slock, flags);
7706     list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7707         atomic_inc(&vp->vref_count);
7708         spin_unlock_irqrestore(&ha->vport_slock, flags);
7709         list_for_each_entry(fcport, &vp->vp_fcports, list)
7710             fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
7711         spin_lock_irqsave(&ha->vport_slock, flags);
7712         atomic_dec(&vp->vref_count);
7713     }
7714     spin_unlock_irqrestore(&ha->vport_slock, flags);
7715 }
7716 
7717 
7718 static pci_ers_result_t
7719 qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
7720 {
7721     scsi_qla_host_t *vha = pci_get_drvdata(pdev);
7722     struct qla_hw_data *ha = vha->hw;
7723     pci_ers_result_t ret = PCI_ERS_RESULT_NEED_RESET;
7724 
7725     ql_log(ql_log_warn, vha, 0x9000,
7726            "PCI error detected, state %x.\n", state);
7727     ha->pci_error_state = QLA_PCI_ERR_DETECTED;
7728 
7729     if (!atomic_read(&pdev->enable_cnt)) {
7730         ql_log(ql_log_info, vha, 0xffff,
7731             "PCI device is disabled,state %x\n", state);
7732         ret = PCI_ERS_RESULT_NEED_RESET;
7733         goto out;
7734     }
7735 
7736     switch (state) {
7737     case pci_channel_io_normal:
7738         qla_pci_set_eeh_busy(vha);
7739         if (ql2xmqsupport || ql2xnvmeenable) {
7740             set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
7741             qla2xxx_wake_dpc(vha);
7742         }
7743         ret = PCI_ERS_RESULT_CAN_RECOVER;
7744         break;
7745     case pci_channel_io_frozen:
7746         qla_pci_set_eeh_busy(vha);
7747         ret = PCI_ERS_RESULT_NEED_RESET;
7748         break;
7749     case pci_channel_io_perm_failure:
7750         ha->flags.pci_channel_io_perm_failure = 1;
7751         qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
7752         if (ql2xmqsupport || ql2xnvmeenable) {
7753             set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags);
7754             qla2xxx_wake_dpc(vha);
7755         }
7756         ret = PCI_ERS_RESULT_DISCONNECT;
7757     }
7758 out:
7759     ql_dbg(ql_dbg_aer, vha, 0x600d,
7760            "PCI error detected returning [%x].\n", ret);
7761     return ret;
7762 }
7763 
7764 static pci_ers_result_t
7765 qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
7766 {
7767     int risc_paused = 0;
7768     uint32_t stat;
7769     unsigned long flags;
7770     scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
7771     struct qla_hw_data *ha = base_vha->hw;
7772     struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
7773     struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
7774 
7775     ql_log(ql_log_warn, base_vha, 0x9000,
7776            "mmio enabled\n");
7777 
7778     ha->pci_error_state = QLA_PCI_MMIO_ENABLED;
7779 
7780     if (IS_QLA82XX(ha))
7781         return PCI_ERS_RESULT_RECOVERED;
7782 
7783     if (qla2x00_isp_reg_stat(ha)) {
7784         ql_log(ql_log_info, base_vha, 0x803f,
7785             "During mmio enabled, PCI/Register disconnect still detected.\n");
7786         goto out;
7787     }
7788 
7789     spin_lock_irqsave(&ha->hardware_lock, flags);
7790     if (IS_QLA2100(ha) || IS_QLA2200(ha)){
7791         stat = rd_reg_word(&reg->hccr);
7792         if (stat & HCCR_RISC_PAUSE)
7793             risc_paused = 1;
7794     } else if (IS_QLA23XX(ha)) {
7795         stat = rd_reg_dword(&reg->u.isp2300.host_status);
7796         if (stat & HSR_RISC_PAUSED)
7797             risc_paused = 1;
7798     } else if (IS_FWI2_CAPABLE(ha)) {
7799         stat = rd_reg_dword(&reg24->host_status);
7800         if (stat & HSRX_RISC_PAUSED)
7801             risc_paused = 1;
7802     }
7803     spin_unlock_irqrestore(&ha->hardware_lock, flags);
7804 
7805     if (risc_paused) {
7806         ql_log(ql_log_info, base_vha, 0x9003,
7807             "RISC paused -- mmio_enabled, Dumping firmware.\n");
7808         qla2xxx_dump_fw(base_vha);
7809     }
7810 out:
7811     /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */
7812     ql_dbg(ql_dbg_aer, base_vha, 0x600d,
7813            "mmio enabled returning.\n");
7814     return PCI_ERS_RESULT_NEED_RESET;
7815 }
7816 
7817 static pci_ers_result_t
7818 qla2xxx_pci_slot_reset(struct pci_dev *pdev)
7819 {
7820     pci_ers_result_t ret = PCI_ERS_RESULT_DISCONNECT;
7821     scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
7822     struct qla_hw_data *ha = base_vha->hw;
7823     int rc;
7824     struct qla_qpair *qpair = NULL;
7825 
7826     ql_log(ql_log_warn, base_vha, 0x9004,
7827            "Slot Reset.\n");
7828 
7829     ha->pci_error_state = QLA_PCI_SLOT_RESET;
7830     /* Workaround: qla2xxx driver which access hardware earlier
7831      * needs error state to be pci_channel_io_online.
7832      * Otherwise mailbox command timesout.
7833      */
7834     pdev->error_state = pci_channel_io_normal;
7835 
7836     pci_restore_state(pdev);
7837 
7838     /* pci_restore_state() clears the saved_state flag of the device
7839      * save restored state which resets saved_state flag
7840      */
7841     pci_save_state(pdev);
7842 
7843     if (ha->mem_only)
7844         rc = pci_enable_device_mem(pdev);
7845     else
7846         rc = pci_enable_device(pdev);
7847 
7848     if (rc) {
7849         ql_log(ql_log_warn, base_vha, 0x9005,
7850             "Can't re-enable PCI device after reset.\n");
7851         goto exit_slot_reset;
7852     }
7853 
7854 
7855     if (ha->isp_ops->pci_config(base_vha))
7856         goto exit_slot_reset;
7857 
7858     mutex_lock(&ha->mq_lock);
7859     list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
7860         qpair->online = 1;
7861     mutex_unlock(&ha->mq_lock);
7862 
7863     ha->flags.eeh_busy = 0;
7864     base_vha->flags.online = 1;
7865     set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
7866     ha->isp_ops->abort_isp(base_vha);
7867     clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
7868 
7869     if (qla2x00_isp_reg_stat(ha)) {
7870         ha->flags.eeh_busy = 1;
7871         qla_pci_error_cleanup(base_vha);
7872         ql_log(ql_log_warn, base_vha, 0x9005,
7873                "Device unable to recover from PCI error.\n");
7874     } else {
7875         ret =  PCI_ERS_RESULT_RECOVERED;
7876     }
7877 
7878 exit_slot_reset:
7879     ql_dbg(ql_dbg_aer, base_vha, 0x900e,
7880         "Slot Reset returning %x.\n", ret);
7881 
7882     return ret;
7883 }
7884 
7885 static void
7886 qla2xxx_pci_resume(struct pci_dev *pdev)
7887 {
7888     scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
7889     struct qla_hw_data *ha = base_vha->hw;
7890     int ret;
7891 
7892     ql_log(ql_log_warn, base_vha, 0x900f,
7893            "Pci Resume.\n");
7894 
7895 
7896     ret = qla2x00_wait_for_hba_online(base_vha);
7897     if (ret != QLA_SUCCESS) {
7898         ql_log(ql_log_fatal, base_vha, 0x9002,
7899             "The device failed to resume I/O from slot/link_reset.\n");
7900     }
7901     ha->pci_error_state = QLA_PCI_RESUME;
7902     ql_dbg(ql_dbg_aer, base_vha, 0x600d,
7903            "Pci Resume returning.\n");
7904 }
7905 
7906 void qla_pci_set_eeh_busy(struct scsi_qla_host *vha)
7907 {
7908     struct qla_hw_data *ha = vha->hw;
7909     struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7910     bool do_cleanup = false;
7911     unsigned long flags;
7912 
7913     if (ha->flags.eeh_busy)
7914         return;
7915 
7916     spin_lock_irqsave(&base_vha->work_lock, flags);
7917     if (!ha->flags.eeh_busy) {
7918         ha->eeh_jif = jiffies;
7919         ha->flags.eeh_flush = 0;
7920 
7921         ha->flags.eeh_busy = 1;
7922         do_cleanup = true;
7923     }
7924     spin_unlock_irqrestore(&base_vha->work_lock, flags);
7925 
7926     if (do_cleanup)
7927         qla_pci_error_cleanup(base_vha);
7928 }
7929 
7930 /*
7931  * this routine will schedule a task to pause IO from interrupt context
7932  * if caller sees a PCIE error event (register read = 0xf's)
7933  */
7934 void qla_schedule_eeh_work(struct scsi_qla_host *vha)
7935 {
7936     struct qla_hw_data *ha = vha->hw;
7937     struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7938 
7939     if (ha->flags.eeh_busy)
7940         return;
7941 
7942     set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags);
7943     qla2xxx_wake_dpc(base_vha);
7944 }
7945 
7946 static void
7947 qla_pci_reset_prepare(struct pci_dev *pdev)
7948 {
7949     scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
7950     struct qla_hw_data *ha = base_vha->hw;
7951     struct qla_qpair *qpair;
7952 
7953     ql_log(ql_log_warn, base_vha, 0xffff,
7954         "%s.\n", __func__);
7955 
7956     /*
7957      * PCI FLR/function reset is about to reset the
7958      * slot. Stop the chip to stop all DMA access.
7959      * It is assumed that pci_reset_done will be called
7960      * after FLR to resume Chip operation.
7961      */
7962     ha->flags.eeh_busy = 1;
7963     mutex_lock(&ha->mq_lock);
7964     list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
7965         qpair->online = 0;
7966     mutex_unlock(&ha->mq_lock);
7967 
7968     set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
7969     qla2x00_abort_isp_cleanup(base_vha);
7970     qla2x00_abort_all_cmds(base_vha, DID_RESET << 16);
7971 }
7972 
7973 static void
7974 qla_pci_reset_done(struct pci_dev *pdev)
7975 {
7976     scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
7977     struct qla_hw_data *ha = base_vha->hw;
7978     struct qla_qpair *qpair;
7979 
7980     ql_log(ql_log_warn, base_vha, 0xffff,
7981         "%s.\n", __func__);
7982 
7983     /*
7984      * FLR just completed by PCI layer. Resume adapter
7985      */
7986     ha->flags.eeh_busy = 0;
7987     mutex_lock(&ha->mq_lock);
7988     list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem)
7989         qpair->online = 1;
7990     mutex_unlock(&ha->mq_lock);
7991 
7992     base_vha->flags.online = 1;
7993     ha->isp_ops->abort_isp(base_vha);
7994     clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
7995 }
7996 
7997 static int qla2xxx_map_queues(struct Scsi_Host *shost)
7998 {
7999     int rc;
8000     scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
8001     struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
8002 
8003     if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
8004         rc = blk_mq_map_queues(qmap);
8005     else
8006         rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
8007     return rc;
8008 }
8009 
8010 struct scsi_host_template qla2xxx_driver_template = {
8011     .module         = THIS_MODULE,
8012     .name           = QLA2XXX_DRIVER_NAME,
8013     .queuecommand       = qla2xxx_queuecommand,
8014 
8015     .eh_timed_out       = fc_eh_timed_out,
8016     .eh_abort_handler   = qla2xxx_eh_abort,
8017     .eh_should_retry_cmd    = fc_eh_should_retry_cmd,
8018     .eh_device_reset_handler = qla2xxx_eh_device_reset,
8019     .eh_target_reset_handler = qla2xxx_eh_target_reset,
8020     .eh_bus_reset_handler   = qla2xxx_eh_bus_reset,
8021     .eh_host_reset_handler  = qla2xxx_eh_host_reset,
8022 
8023     .slave_configure    = qla2xxx_slave_configure,
8024 
8025     .slave_alloc        = qla2xxx_slave_alloc,
8026     .slave_destroy      = qla2xxx_slave_destroy,
8027     .scan_finished      = qla2xxx_scan_finished,
8028     .scan_start     = qla2xxx_scan_start,
8029     .change_queue_depth = scsi_change_queue_depth,
8030     .map_queues             = qla2xxx_map_queues,
8031     .this_id        = -1,
8032     .cmd_per_lun        = 3,
8033     .sg_tablesize       = SG_ALL,
8034 
8035     .max_sectors        = 0xFFFF,
8036     .shost_groups       = qla2x00_host_groups,
8037 
8038     .supported_mode     = MODE_INITIATOR,
8039     .track_queue_depth  = 1,
8040     .cmd_size       = sizeof(srb_t),
8041 };
8042 
8043 static const struct pci_error_handlers qla2xxx_err_handler = {
8044     .error_detected = qla2xxx_pci_error_detected,
8045     .mmio_enabled = qla2xxx_pci_mmio_enabled,
8046     .slot_reset = qla2xxx_pci_slot_reset,
8047     .resume = qla2xxx_pci_resume,
8048     .reset_prepare = qla_pci_reset_prepare,
8049     .reset_done = qla_pci_reset_done,
8050 };
8051 
8052 static struct pci_device_id qla2xxx_pci_tbl[] = {
8053     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2100) },
8054     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2200) },
8055     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2300) },
8056     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2312) },
8057     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2322) },
8058     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6312) },
8059     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP6322) },
8060     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2422) },
8061     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2432) },
8062     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8432) },
8063     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
8064     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
8065     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
8066     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
8067     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
8068     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
8069     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
8070     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
8071     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
8072     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
8073     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
8074     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
8075     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) },
8076     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) },
8077     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) },
8078     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) },
8079     { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) },
8080     { 0 },
8081 };
8082 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
8083 
8084 static struct pci_driver qla2xxx_pci_driver = {
8085     .name       = QLA2XXX_DRIVER_NAME,
8086     .driver     = {
8087         .owner      = THIS_MODULE,
8088     },
8089     .id_table   = qla2xxx_pci_tbl,
8090     .probe      = qla2x00_probe_one,
8091     .remove     = qla2x00_remove_one,
8092     .shutdown   = qla2x00_shutdown,
8093     .err_handler    = &qla2xxx_err_handler,
8094 };
8095 
8096 static const struct file_operations apidev_fops = {
8097     .owner = THIS_MODULE,
8098     .llseek = noop_llseek,
8099 };
8100 
8101 /**
8102  * qla2x00_module_init - Module initialization.
8103  **/
8104 static int __init
8105 qla2x00_module_init(void)
8106 {
8107     int ret = 0;
8108 
8109     BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64);
8110     BUILD_BUG_ON(sizeof(cmd_entry_t) != 64);
8111     BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64);
8112     BUILD_BUG_ON(sizeof(cont_entry_t) != 64);
8113     BUILD_BUG_ON(sizeof(init_cb_t) != 96);
8114     BUILD_BUG_ON(sizeof(mrk_entry_t) != 64);
8115     BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64);
8116     BUILD_BUG_ON(sizeof(request_t) != 64);
8117     BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64);
8118     BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64);
8119     BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64);
8120     BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64);
8121     BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64);
8122     BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64);
8123     BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64);
8124     BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64);
8125     BUILD_BUG_ON(sizeof(struct cmd_type_7) != 64);
8126     BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64);
8127     BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64);
8128     BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64);
8129     BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2604);
8130     BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424);
8131     BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164);
8132     BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260);
8133     BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260);
8134     BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16);
8135     BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64);
8136     BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256);
8137     BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24);
8138     BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256);
8139     BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288);
8140     BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216);
8141     BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64);
8142     BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64);
8143     BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64);
8144     BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64);
8145     BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128);
8146     BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128);
8147     BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64);
8148     BUILD_BUG_ON(sizeof(struct mbx_entry) != 64);
8149     BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252);
8150     BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64);
8151     BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512);
8152     BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512);
8153     BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64);
8154     BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64);
8155     BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64);
8156     BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634);
8157     BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100);
8158     BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976);
8159     BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228);
8160     BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52);
8161     BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172);
8162     BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524);
8163     BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8);
8164     BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12);
8165     BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24);
8166     BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420);
8167     BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28);
8168     BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32);
8169     BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196);
8170     BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE);
8171     BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128);
8172     BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8);
8173     BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16);
8174     BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24);
8175     BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16);
8176     BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336);
8177     BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064);
8178     BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64);
8179     BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64);
8180     BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64);
8181     BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64);
8182     BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52);
8183     BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56);
8184     BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64);
8185     BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64);
8186     BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64);
8187     BUILD_BUG_ON(sizeof(sts21_entry_t) != 64);
8188     BUILD_BUG_ON(sizeof(sts22_entry_t) != 64);
8189     BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64);
8190     BUILD_BUG_ON(sizeof(sts_entry_t) != 64);
8191     BUILD_BUG_ON(sizeof(sw_info_t) != 32);
8192     BUILD_BUG_ON(sizeof(target_id_t) != 2);
8193 
8194     /* Allocate cache for SRBs. */
8195     srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0,
8196         SLAB_HWCACHE_ALIGN, NULL);
8197     if (srb_cachep == NULL) {
8198         ql_log(ql_log_fatal, NULL, 0x0001,
8199             "Unable to allocate SRB cache...Failing load!.\n");
8200         return -ENOMEM;
8201     }
8202 
8203     /* Initialize target kmem_cache and mem_pools */
8204     ret = qlt_init();
8205     if (ret < 0) {
8206         goto destroy_cache;
8207     } else if (ret > 0) {
8208         /*
8209          * If initiator mode is explictly disabled by qlt_init(),
8210          * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
8211          * performing scsi_scan_target() during LOOP UP event.
8212          */
8213         qla2xxx_transport_functions.disable_target_scan = 1;
8214         qla2xxx_transport_vport_functions.disable_target_scan = 1;
8215     }
8216 
8217     /* Derive version string. */
8218     strcpy(qla2x00_version_str, QLA2XXX_VERSION);
8219     if (ql2xextended_error_logging)
8220         strcat(qla2x00_version_str, "-debug");
8221     if (ql2xextended_error_logging == 1)
8222         ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
8223 
8224     qla2xxx_transport_template =
8225         fc_attach_transport(&qla2xxx_transport_functions);
8226     if (!qla2xxx_transport_template) {
8227         ql_log(ql_log_fatal, NULL, 0x0002,
8228             "fc_attach_transport failed...Failing load!.\n");
8229         ret = -ENODEV;
8230         goto qlt_exit;
8231     }
8232 
8233     apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
8234     if (apidev_major < 0) {
8235         ql_log(ql_log_fatal, NULL, 0x0003,
8236             "Unable to register char device %s.\n", QLA2XXX_APIDEV);
8237     }
8238 
8239     qla2xxx_transport_vport_template =
8240         fc_attach_transport(&qla2xxx_transport_vport_functions);
8241     if (!qla2xxx_transport_vport_template) {
8242         ql_log(ql_log_fatal, NULL, 0x0004,
8243             "fc_attach_transport vport failed...Failing load!.\n");
8244         ret = -ENODEV;
8245         goto unreg_chrdev;
8246     }
8247     ql_log(ql_log_info, NULL, 0x0005,
8248         "QLogic Fibre Channel HBA Driver: %s.\n",
8249         qla2x00_version_str);
8250     ret = pci_register_driver(&qla2xxx_pci_driver);
8251     if (ret) {
8252         ql_log(ql_log_fatal, NULL, 0x0006,
8253             "pci_register_driver failed...ret=%d Failing load!.\n",
8254             ret);
8255         goto release_vport_transport;
8256     }
8257     return ret;
8258 
8259 release_vport_transport:
8260     fc_release_transport(qla2xxx_transport_vport_template);
8261 
8262 unreg_chrdev:
8263     if (apidev_major >= 0)
8264         unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
8265     fc_release_transport(qla2xxx_transport_template);
8266 
8267 qlt_exit:
8268     qlt_exit();
8269 
8270 destroy_cache:
8271     kmem_cache_destroy(srb_cachep);
8272     return ret;
8273 }
8274 
8275 /**
8276  * qla2x00_module_exit - Module cleanup.
8277  **/
8278 static void __exit
8279 qla2x00_module_exit(void)
8280 {
8281     pci_unregister_driver(&qla2xxx_pci_driver);
8282     qla2x00_release_firmware();
8283     kmem_cache_destroy(ctx_cachep);
8284     fc_release_transport(qla2xxx_transport_vport_template);
8285     if (apidev_major >= 0)
8286         unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
8287     fc_release_transport(qla2xxx_transport_template);
8288     qlt_exit();
8289     kmem_cache_destroy(srb_cachep);
8290 }
8291 
8292 module_init(qla2x00_module_init);
8293 module_exit(qla2x00_module_exit);
8294 
8295 MODULE_AUTHOR("QLogic Corporation");
8296 MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
8297 MODULE_LICENSE("GPL");
8298 MODULE_FIRMWARE(FW_FILE_ISP21XX);
8299 MODULE_FIRMWARE(FW_FILE_ISP22XX);
8300 MODULE_FIRMWARE(FW_FILE_ISP2300);
8301 MODULE_FIRMWARE(FW_FILE_ISP2322);
8302 MODULE_FIRMWARE(FW_FILE_ISP24XX);
8303 MODULE_FIRMWARE(FW_FILE_ISP25XX);