Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *  qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
0004  *
0005  *  based on qla2x00t.c code:
0006  *
0007  *  Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
0008  *  Copyright (C) 2004 - 2005 Leonid Stoljar
0009  *  Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
0010  *  Copyright (C) 2006 - 2010 ID7 Ltd.
0011  *
0012  *  Forward port and refactoring to modern qla2xxx and target/configfs
0013  *
0014  *  Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
0015  */
0016 
0017 #include <linux/module.h>
0018 #include <linux/init.h>
0019 #include <linux/types.h>
0020 #include <linux/blkdev.h>
0021 #include <linux/interrupt.h>
0022 #include <linux/pci.h>
0023 #include <linux/delay.h>
0024 #include <linux/list.h>
0025 #include <linux/workqueue.h>
0026 #include <asm/unaligned.h>
0027 #include <scsi/scsi.h>
0028 #include <scsi/scsi_host.h>
0029 #include <scsi/scsi_tcq.h>
0030 
0031 #include "qla_def.h"
0032 #include "qla_target.h"
0033 
0034 static int ql2xtgt_tape_enable;
0035 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
0036 MODULE_PARM_DESC(ql2xtgt_tape_enable,
0037         "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
0038 
0039 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
0040 module_param(qlini_mode, charp, S_IRUGO);
0041 MODULE_PARM_DESC(qlini_mode,
0042     "Determines when initiator mode will be enabled. Possible values: "
0043     "\"exclusive\" - initiator mode will be enabled on load, "
0044     "disabled on enabling target mode and then on disabling target mode "
0045     "enabled back; "
0046     "\"disabled\" - initiator mode will never be enabled; "
0047     "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
0048     "when ready "
0049     "\"enabled\" (default) - initiator mode will always stay enabled.");
0050 
0051 int ql2xuctrlirq = 1;
0052 module_param(ql2xuctrlirq, int, 0644);
0053 MODULE_PARM_DESC(ql2xuctrlirq,
0054     "User to control IRQ placement via smp_affinity."
0055     "Valid with qlini_mode=disabled."
0056     "1(default): enable");
0057 
0058 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
0059 
0060 static int qla_sam_status = SAM_STAT_BUSY;
0061 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
0062 
0063 /*
0064  * From scsi/fc/fc_fcp.h
0065  */
0066 enum fcp_resp_rsp_codes {
0067     FCP_TMF_CMPL = 0,
0068     FCP_DATA_LEN_INVALID = 1,
0069     FCP_CMND_FIELDS_INVALID = 2,
0070     FCP_DATA_PARAM_MISMATCH = 3,
0071     FCP_TMF_REJECTED = 4,
0072     FCP_TMF_FAILED = 5,
0073     FCP_TMF_INVALID_LUN = 9,
0074 };
0075 
0076 /*
0077  * fc_pri_ta from scsi/fc/fc_fcp.h
0078  */
0079 #define FCP_PTA_SIMPLE      0   /* simple task attribute */
0080 #define FCP_PTA_HEADQ       1   /* head of queue task attribute */
0081 #define FCP_PTA_ORDERED     2   /* ordered task attribute */
0082 #define FCP_PTA_ACA         4   /* auto. contingent allegiance */
0083 #define FCP_PTA_MASK        7   /* mask for task attribute field */
0084 #define FCP_PRI_SHIFT       3   /* priority field starts in bit 3 */
0085 #define FCP_PRI_RESVD_MASK  0x80        /* reserved bits in priority field */
0086 
0087 /*
0088  * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
0089  * must be called under HW lock and could unlock/lock it inside.
0090  * It isn't an issue, since in the current implementation on the time when
0091  * those functions are called:
0092  *
0093  *   - Either context is IRQ and only IRQ handler can modify HW data,
0094  *     including rings related fields,
0095  *
0096  *   - Or access to target mode variables from struct qla_tgt doesn't
0097  *     cross those functions boundaries, except tgt_stop, which
0098  *     additionally protected by irq_cmd_count.
0099  */
0100 /* Predefs for callbacks handed to qla2xxx LLD */
0101 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
0102     struct atio_from_isp *pkt, uint8_t);
0103 static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
0104     response_t *pkt);
0105 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
0106     int fn, void *iocb, int flags);
0107 static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
0108     *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
0109 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
0110     struct atio_from_isp *atio, uint16_t status, int qfull);
0111 static void qlt_disable_vha(struct scsi_qla_host *vha);
0112 static void qlt_clear_tgt_db(struct qla_tgt *tgt);
0113 static void qlt_send_notify_ack(struct qla_qpair *qpair,
0114     struct imm_ntfy_from_isp *ntfy,
0115     uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
0116     uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
0117 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
0118     struct imm_ntfy_from_isp *imm, int ha_locked);
0119 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
0120     fc_port_t *fcport, bool local);
0121 void qlt_unreg_sess(struct fc_port *sess);
0122 static void qlt_24xx_handle_abts(struct scsi_qla_host *,
0123     struct abts_recv_from_24xx *);
0124 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
0125     uint16_t);
0126 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
0127 static inline uint32_t qlt_make_handle(struct qla_qpair *);
0128 
0129 /*
0130  * Global Variables
0131  */
0132 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
0133 struct kmem_cache *qla_tgt_plogi_cachep;
0134 static mempool_t *qla_tgt_mgmt_cmd_mempool;
0135 static struct workqueue_struct *qla_tgt_wq;
0136 static DEFINE_MUTEX(qla_tgt_mutex);
0137 static LIST_HEAD(qla_tgt_glist);
0138 
0139 static const char *prot_op_str(u32 prot_op)
0140 {
0141     switch (prot_op) {
0142     case TARGET_PROT_NORMAL:    return "NORMAL";
0143     case TARGET_PROT_DIN_INSERT:    return "DIN_INSERT";
0144     case TARGET_PROT_DOUT_INSERT:   return "DOUT_INSERT";
0145     case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
0146     case TARGET_PROT_DOUT_STRIP:    return "DOUT_STRIP";
0147     case TARGET_PROT_DIN_PASS:  return "DIN_PASS";
0148     case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
0149     default:            return "UNKNOWN";
0150     }
0151 }
0152 
0153 /* This API intentionally takes dest as a parameter, rather than returning
0154  * int value to avoid caller forgetting to issue wmb() after the store */
0155 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
0156 {
0157     scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
0158     *dest = atomic_inc_return(&base_vha->generation_tick);
0159     /* memory barrier */
0160     wmb();
0161 }
0162 
0163 /* Might release hw lock, then reaquire!! */
0164 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
0165 {
0166     /* Send marker if required */
0167     if (unlikely(vha->marker_needed != 0)) {
0168         int rc = qla2x00_issue_marker(vha, vha_locked);
0169 
0170         if (rc != QLA_SUCCESS) {
0171             ql_dbg(ql_dbg_tgt, vha, 0xe03d,
0172                 "qla_target(%d): issue_marker() failed\n",
0173                 vha->vp_idx);
0174         }
0175         return rc;
0176     }
0177     return QLA_SUCCESS;
0178 }
0179 
0180 struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha,
0181                         be_id_t d_id)
0182 {
0183     struct scsi_qla_host *host;
0184     uint32_t key;
0185 
0186     if (vha->d_id.b.area == d_id.area &&
0187         vha->d_id.b.domain == d_id.domain &&
0188         vha->d_id.b.al_pa == d_id.al_pa)
0189         return vha;
0190 
0191     key = be_to_port_id(d_id).b24;
0192 
0193     host = btree_lookup32(&vha->hw->host_map, key);
0194     if (!host)
0195         ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
0196             "Unable to find host %06x\n", key);
0197 
0198     return host;
0199 }
0200 
0201 static inline
0202 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
0203     uint16_t vp_idx)
0204 {
0205     struct qla_hw_data *ha = vha->hw;
0206 
0207     if (vha->vp_idx == vp_idx)
0208         return vha;
0209 
0210     BUG_ON(ha->tgt.tgt_vp_map == NULL);
0211     if (likely(test_bit(vp_idx, ha->vp_idx_map)))
0212         return ha->tgt.tgt_vp_map[vp_idx].vha;
0213 
0214     return NULL;
0215 }
0216 
0217 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
0218 {
0219     unsigned long flags;
0220 
0221     spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
0222 
0223     vha->hw->tgt.num_pend_cmds++;
0224     if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
0225         vha->qla_stats.stat_max_pend_cmds =
0226             vha->hw->tgt.num_pend_cmds;
0227     spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
0228 }
0229 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
0230 {
0231     unsigned long flags;
0232 
0233     spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
0234     vha->hw->tgt.num_pend_cmds--;
0235     spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
0236 }
0237 
0238 
0239 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
0240     struct atio_from_isp *atio, uint8_t ha_locked)
0241 {
0242     struct qla_tgt_sess_op *u;
0243     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
0244     unsigned long flags;
0245 
0246     if (tgt->tgt_stop) {
0247         ql_dbg(ql_dbg_async, vha, 0x502c,
0248             "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
0249             vha->vp_idx);
0250         goto out_term;
0251     }
0252 
0253     u = kzalloc(sizeof(*u), GFP_ATOMIC);
0254     if (u == NULL)
0255         goto out_term;
0256 
0257     u->vha = vha;
0258     memcpy(&u->atio, atio, sizeof(*atio));
0259     INIT_LIST_HEAD(&u->cmd_list);
0260 
0261     spin_lock_irqsave(&vha->cmd_list_lock, flags);
0262     list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
0263     spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
0264 
0265     schedule_delayed_work(&vha->unknown_atio_work, 1);
0266 
0267 out:
0268     return;
0269 
0270 out_term:
0271     qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
0272     goto out;
0273 }
0274 
0275 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
0276     uint8_t ha_locked)
0277 {
0278     struct qla_tgt_sess_op *u, *t;
0279     scsi_qla_host_t *host;
0280     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
0281     unsigned long flags;
0282     uint8_t queued = 0;
0283 
0284     list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
0285         if (u->aborted) {
0286             ql_dbg(ql_dbg_async, vha, 0x502e,
0287                 "Freeing unknown %s %p, because of Abort\n",
0288                 "ATIO_TYPE7", u);
0289             qlt_send_term_exchange(vha->hw->base_qpair, NULL,
0290                 &u->atio, ha_locked, 0);
0291             goto abort;
0292         }
0293 
0294         host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
0295         if (host != NULL) {
0296             ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
0297                 "Requeuing unknown ATIO_TYPE7 %p\n", u);
0298             qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
0299         } else if (tgt->tgt_stop) {
0300             ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
0301                 "Freeing unknown %s %p, because tgt is being stopped\n",
0302                 "ATIO_TYPE7", u);
0303             qlt_send_term_exchange(vha->hw->base_qpair, NULL,
0304                 &u->atio, ha_locked, 0);
0305         } else {
0306             ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
0307                 "Reschedule u %p, vha %p, host %p\n", u, vha, host);
0308             if (!queued) {
0309                 queued = 1;
0310                 schedule_delayed_work(&vha->unknown_atio_work,
0311                     1);
0312             }
0313             continue;
0314         }
0315 
0316 abort:
0317         spin_lock_irqsave(&vha->cmd_list_lock, flags);
0318         list_del(&u->cmd_list);
0319         spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
0320         kfree(u);
0321     }
0322 }
0323 
0324 void qlt_unknown_atio_work_fn(struct work_struct *work)
0325 {
0326     struct scsi_qla_host *vha = container_of(to_delayed_work(work),
0327         struct scsi_qla_host, unknown_atio_work);
0328 
0329     qlt_try_to_dequeue_unknown_atios(vha, 0);
0330 }
0331 
0332 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
0333     struct atio_from_isp *atio, uint8_t ha_locked)
0334 {
0335     ql_dbg(ql_dbg_tgt, vha, 0xe072,
0336         "%s: qla_target(%d): type %x ox_id %04x\n",
0337         __func__, vha->vp_idx, atio->u.raw.entry_type,
0338         be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
0339 
0340     switch (atio->u.raw.entry_type) {
0341     case ATIO_TYPE7:
0342     {
0343         struct scsi_qla_host *host = qla_find_host_by_d_id(vha,
0344             atio->u.isp24.fcp_hdr.d_id);
0345         if (unlikely(NULL == host)) {
0346             ql_dbg(ql_dbg_tgt, vha, 0xe03e,
0347                 "qla_target(%d): Received ATIO_TYPE7 "
0348                 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
0349                 atio->u.isp24.fcp_hdr.d_id.domain,
0350                 atio->u.isp24.fcp_hdr.d_id.area,
0351                 atio->u.isp24.fcp_hdr.d_id.al_pa);
0352 
0353 
0354             qlt_queue_unknown_atio(vha, atio, ha_locked);
0355             break;
0356         }
0357         if (unlikely(!list_empty(&vha->unknown_atio_list)))
0358             qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
0359 
0360         qlt_24xx_atio_pkt(host, atio, ha_locked);
0361         break;
0362     }
0363 
0364     case IMMED_NOTIFY_TYPE:
0365     {
0366         struct scsi_qla_host *host = vha;
0367         struct imm_ntfy_from_isp *entry =
0368             (struct imm_ntfy_from_isp *)atio;
0369 
0370         qlt_issue_marker(vha, ha_locked);
0371 
0372         if ((entry->u.isp24.vp_index != 0xFF) &&
0373             (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) {
0374             host = qlt_find_host_by_vp_idx(vha,
0375                 entry->u.isp24.vp_index);
0376             if (unlikely(!host)) {
0377                 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
0378                     "qla_target(%d): Received "
0379                     "ATIO (IMMED_NOTIFY_TYPE) "
0380                     "with unknown vp_index %d\n",
0381                     vha->vp_idx, entry->u.isp24.vp_index);
0382                 break;
0383             }
0384         }
0385         qlt_24xx_atio_pkt(host, atio, ha_locked);
0386         break;
0387     }
0388 
0389     case VP_RPT_ID_IOCB_TYPE:
0390         qla24xx_report_id_acquisition(vha,
0391             (struct vp_rpt_id_entry_24xx *)atio);
0392         break;
0393 
0394     case ABTS_RECV_24XX:
0395     {
0396         struct abts_recv_from_24xx *entry =
0397             (struct abts_recv_from_24xx *)atio;
0398         struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
0399             entry->vp_index);
0400         unsigned long flags;
0401 
0402         if (unlikely(!host)) {
0403             ql_dbg(ql_dbg_tgt, vha, 0xe00a,
0404                 "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
0405                 "received, with unknown vp_index %d\n",
0406                 vha->vp_idx, entry->vp_index);
0407             break;
0408         }
0409         if (!ha_locked)
0410             spin_lock_irqsave(&host->hw->hardware_lock, flags);
0411         qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
0412         if (!ha_locked)
0413             spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
0414         break;
0415     }
0416 
0417     /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
0418 
0419     default:
0420         ql_dbg(ql_dbg_tgt, vha, 0xe040,
0421             "qla_target(%d): Received unknown ATIO atio "
0422             "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
0423         break;
0424     }
0425 
0426     return false;
0427 }
0428 
0429 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
0430     struct rsp_que *rsp, response_t *pkt)
0431 {
0432     switch (pkt->entry_type) {
0433     case CTIO_CRC2:
0434         ql_dbg(ql_dbg_tgt, vha, 0xe073,
0435             "qla_target(%d):%s: CRC2 Response pkt\n",
0436             vha->vp_idx, __func__);
0437         fallthrough;
0438     case CTIO_TYPE7:
0439     {
0440         struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
0441         struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
0442             entry->vp_index);
0443         if (unlikely(!host)) {
0444             ql_dbg(ql_dbg_tgt, vha, 0xe041,
0445                 "qla_target(%d): Response pkt (CTIO_TYPE7) "
0446                 "received, with unknown vp_index %d\n",
0447                 vha->vp_idx, entry->vp_index);
0448             break;
0449         }
0450         qlt_response_pkt(host, rsp, pkt);
0451         break;
0452     }
0453 
0454     case IMMED_NOTIFY_TYPE:
0455     {
0456         struct scsi_qla_host *host;
0457         struct imm_ntfy_from_isp *entry =
0458             (struct imm_ntfy_from_isp *)pkt;
0459 
0460         host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
0461         if (unlikely(!host)) {
0462             ql_dbg(ql_dbg_tgt, vha, 0xe042,
0463                 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
0464                 "received, with unknown vp_index %d\n",
0465                 vha->vp_idx, entry->u.isp24.vp_index);
0466             break;
0467         }
0468         qlt_response_pkt(host, rsp, pkt);
0469         break;
0470     }
0471 
0472     case NOTIFY_ACK_TYPE:
0473     {
0474         struct scsi_qla_host *host = vha;
0475         struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
0476 
0477         if (0xFF != entry->u.isp24.vp_index) {
0478             host = qlt_find_host_by_vp_idx(vha,
0479                 entry->u.isp24.vp_index);
0480             if (unlikely(!host)) {
0481                 ql_dbg(ql_dbg_tgt, vha, 0xe043,
0482                     "qla_target(%d): Response "
0483                     "pkt (NOTIFY_ACK_TYPE) "
0484                     "received, with unknown "
0485                     "vp_index %d\n", vha->vp_idx,
0486                     entry->u.isp24.vp_index);
0487                 break;
0488             }
0489         }
0490         qlt_response_pkt(host, rsp, pkt);
0491         break;
0492     }
0493 
0494     case ABTS_RECV_24XX:
0495     {
0496         struct abts_recv_from_24xx *entry =
0497             (struct abts_recv_from_24xx *)pkt;
0498         struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
0499             entry->vp_index);
0500         if (unlikely(!host)) {
0501             ql_dbg(ql_dbg_tgt, vha, 0xe044,
0502                 "qla_target(%d): Response pkt "
0503                 "(ABTS_RECV_24XX) received, with unknown "
0504                 "vp_index %d\n", vha->vp_idx, entry->vp_index);
0505             break;
0506         }
0507         qlt_response_pkt(host, rsp, pkt);
0508         break;
0509     }
0510 
0511     case ABTS_RESP_24XX:
0512     {
0513         struct abts_resp_to_24xx *entry =
0514             (struct abts_resp_to_24xx *)pkt;
0515         struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
0516             entry->vp_index);
0517         if (unlikely(!host)) {
0518             ql_dbg(ql_dbg_tgt, vha, 0xe045,
0519                 "qla_target(%d): Response pkt "
0520                 "(ABTS_RECV_24XX) received, with unknown "
0521                 "vp_index %d\n", vha->vp_idx, entry->vp_index);
0522             break;
0523         }
0524         qlt_response_pkt(host, rsp, pkt);
0525         break;
0526     }
0527     default:
0528         qlt_response_pkt(vha, rsp, pkt);
0529         break;
0530     }
0531 
0532 }
0533 
0534 /*
0535  * All qlt_plogi_ack_t operations are protected by hardware_lock
0536  */
0537 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
0538     struct imm_ntfy_from_isp *ntfy, int type)
0539 {
0540     struct qla_work_evt *e;
0541 
0542     e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
0543     if (!e)
0544         return QLA_FUNCTION_FAILED;
0545 
0546     e->u.nack.fcport = fcport;
0547     e->u.nack.type = type;
0548     memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
0549     return qla2x00_post_work(vha, e);
0550 }
0551 
0552 static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
0553 {
0554     struct scsi_qla_host *vha = sp->vha;
0555     unsigned long flags;
0556 
0557     ql_dbg(ql_dbg_disc, vha, 0x20f2,
0558         "Async done-%s res %x %8phC  type %d\n",
0559         sp->name, res, sp->fcport->port_name, sp->type);
0560 
0561     spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
0562     sp->fcport->flags &= ~FCF_ASYNC_SENT;
0563     sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
0564 
0565     switch (sp->type) {
0566     case SRB_NACK_PLOGI:
0567         sp->fcport->login_gen++;
0568         sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
0569         sp->fcport->logout_on_delete = 1;
0570         sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
0571         sp->fcport->send_els_logo = 0;
0572 
0573         if (sp->fcport->flags & FCF_FCSP_DEVICE) {
0574             ql_dbg(ql_dbg_edif, vha, 0x20ef,
0575                 "%s %8phC edif: PLOGI- AUTH WAIT\n", __func__,
0576                 sp->fcport->port_name);
0577             qla2x00_set_fcport_disc_state(sp->fcport,
0578                 DSC_LOGIN_AUTH_PEND);
0579             qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
0580                 sp->fcport->d_id.b24);
0581             qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24,
0582                 0, sp->fcport);
0583         }
0584         break;
0585 
0586     case SRB_NACK_PRLI:
0587         sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
0588         sp->fcport->deleted = 0;
0589         sp->fcport->send_els_logo = 0;
0590 
0591         if (!sp->fcport->login_succ &&
0592             !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
0593             sp->fcport->login_succ = 1;
0594 
0595             vha->fcport_count++;
0596             spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
0597             qla24xx_sched_upd_fcport(sp->fcport);
0598             spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
0599         } else {
0600             sp->fcport->login_retry = 0;
0601             qla2x00_set_fcport_disc_state(sp->fcport,
0602                 DSC_LOGIN_COMPLETE);
0603             sp->fcport->deleted = 0;
0604             sp->fcport->logout_on_delete = 1;
0605         }
0606         break;
0607 
0608     case SRB_NACK_LOGO:
0609         sp->fcport->login_gen++;
0610         sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
0611         qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
0612         break;
0613     }
0614     spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
0615 
0616     kref_put(&sp->cmd_kref, qla2x00_sp_release);
0617 }
0618 
0619 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
0620     struct imm_ntfy_from_isp *ntfy, int type)
0621 {
0622     int rval = QLA_FUNCTION_FAILED;
0623     srb_t *sp;
0624     char *c = NULL;
0625 
0626     fcport->flags |= FCF_ASYNC_SENT;
0627     switch (type) {
0628     case SRB_NACK_PLOGI:
0629         fcport->fw_login_state = DSC_LS_PLOGI_PEND;
0630         c = "PLOGI";
0631         if (vha->hw->flags.edif_enabled &&
0632             (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP))
0633             fcport->flags |= FCF_FCSP_DEVICE;
0634         break;
0635     case SRB_NACK_PRLI:
0636         fcport->fw_login_state = DSC_LS_PRLI_PEND;
0637         fcport->deleted = 0;
0638         c = "PRLI";
0639         break;
0640     case SRB_NACK_LOGO:
0641         fcport->fw_login_state = DSC_LS_LOGO_PEND;
0642         c = "LOGO";
0643         break;
0644     }
0645 
0646     sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
0647     if (!sp)
0648         goto done;
0649 
0650     sp->type = type;
0651     sp->name = "nack";
0652     qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
0653                   qla2x00_async_nack_sp_done);
0654 
0655     sp->u.iocb_cmd.u.nack.ntfy = ntfy;
0656 
0657     ql_dbg(ql_dbg_disc, vha, 0x20f4,
0658         "Async-%s %8phC hndl %x %s\n",
0659         sp->name, fcport->port_name, sp->handle, c);
0660 
0661     rval = qla2x00_start_sp(sp);
0662     if (rval != QLA_SUCCESS)
0663         goto done_free_sp;
0664 
0665     return rval;
0666 
0667 done_free_sp:
0668     kref_put(&sp->cmd_kref, qla2x00_sp_release);
0669 done:
0670     fcport->flags &= ~FCF_ASYNC_SENT;
0671     return rval;
0672 }
0673 
0674 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
0675 {
0676     fc_port_t *t;
0677 
0678     switch (e->u.nack.type) {
0679     case SRB_NACK_PRLI:
0680         t = e->u.nack.fcport;
0681         flush_work(&t->del_work);
0682         flush_work(&t->free_work);
0683         mutex_lock(&vha->vha_tgt.tgt_mutex);
0684         t = qlt_create_sess(vha, e->u.nack.fcport, 0);
0685         mutex_unlock(&vha->vha_tgt.tgt_mutex);
0686         if (t) {
0687             ql_log(ql_log_info, vha, 0xd034,
0688                 "%s create sess success %p", __func__, t);
0689             /* create sess has an extra kref */
0690             vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
0691         }
0692         break;
0693     }
0694     qla24xx_async_notify_ack(vha, e->u.nack.fcport,
0695         (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type);
0696 }
0697 
0698 void qla24xx_delete_sess_fn(struct work_struct *work)
0699 {
0700     fc_port_t *fcport = container_of(work, struct fc_port, del_work);
0701     struct qla_hw_data *ha = NULL;
0702 
0703     if (!fcport || !fcport->vha || !fcport->vha->hw)
0704         return;
0705 
0706     ha = fcport->vha->hw;
0707 
0708     if (fcport->se_sess) {
0709         ha->tgt.tgt_ops->shutdown_sess(fcport);
0710         ha->tgt.tgt_ops->put_sess(fcport);
0711     } else {
0712         qlt_unreg_sess(fcport);
0713     }
0714 }
0715 
0716 /*
0717  * Called from qla2x00_reg_remote_port()
0718  */
0719 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
0720 {
0721     struct qla_hw_data *ha = vha->hw;
0722     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
0723     struct fc_port *sess = fcport;
0724     unsigned long flags;
0725 
0726     if (!vha->hw->tgt.tgt_ops)
0727         return;
0728 
0729     spin_lock_irqsave(&ha->tgt.sess_lock, flags);
0730     if (tgt->tgt_stop) {
0731         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
0732         return;
0733     }
0734 
0735     if (fcport->disc_state == DSC_DELETE_PEND) {
0736         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
0737         return;
0738     }
0739 
0740     if (!sess->se_sess) {
0741         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
0742 
0743         mutex_lock(&vha->vha_tgt.tgt_mutex);
0744         sess = qlt_create_sess(vha, fcport, false);
0745         mutex_unlock(&vha->vha_tgt.tgt_mutex);
0746 
0747         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
0748     } else {
0749         if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
0750             spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
0751             return;
0752         }
0753 
0754         if (!kref_get_unless_zero(&sess->sess_kref)) {
0755             ql_dbg(ql_dbg_disc, vha, 0x2107,
0756                 "%s: kref_get fail sess %8phC \n",
0757                 __func__, sess->port_name);
0758             spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
0759             return;
0760         }
0761 
0762         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
0763             "qla_target(%u): %ssession for port %8phC "
0764             "(loop ID %d) reappeared\n", vha->vp_idx,
0765             sess->local ? "local " : "", sess->port_name, sess->loop_id);
0766 
0767         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
0768             "Reappeared sess %p\n", sess);
0769 
0770         ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
0771             fcport->loop_id,
0772             (fcport->flags & FCF_CONF_COMP_SUPPORTED));
0773     }
0774 
0775     if (sess && sess->local) {
0776         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
0777             "qla_target(%u): local session for "
0778             "port %8phC (loop ID %d) became global\n", vha->vp_idx,
0779             fcport->port_name, sess->loop_id);
0780         sess->local = 0;
0781     }
0782     spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
0783 
0784     ha->tgt.tgt_ops->put_sess(sess);
0785 }
0786 
0787 /*
0788  * This is a zero-base ref-counting solution, since hardware_lock
0789  * guarantees that ref_count is not modified concurrently.
0790  * Upon successful return content of iocb is undefined
0791  */
0792 static struct qlt_plogi_ack_t *
0793 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
0794                struct imm_ntfy_from_isp *iocb)
0795 {
0796     struct qlt_plogi_ack_t *pla;
0797 
0798     lockdep_assert_held(&vha->hw->hardware_lock);
0799 
0800     list_for_each_entry(pla, &vha->plogi_ack_list, list) {
0801         if (pla->id.b24 == id->b24) {
0802             ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
0803                 "%s %d %8phC Term INOT due to new INOT",
0804                 __func__, __LINE__,
0805                 pla->iocb.u.isp24.port_name);
0806             qlt_send_term_imm_notif(vha, &pla->iocb, 1);
0807             memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
0808             return pla;
0809         }
0810     }
0811 
0812     pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
0813     if (!pla) {
0814         ql_dbg(ql_dbg_async, vha, 0x5088,
0815                "qla_target(%d): Allocation of plogi_ack failed\n",
0816                vha->vp_idx);
0817         return NULL;
0818     }
0819 
0820     memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
0821     pla->id = *id;
0822     list_add_tail(&pla->list, &vha->plogi_ack_list);
0823 
0824     return pla;
0825 }
0826 
0827 void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
0828     struct qlt_plogi_ack_t *pla)
0829 {
0830     struct imm_ntfy_from_isp *iocb = &pla->iocb;
0831     port_id_t port_id;
0832     uint16_t loop_id;
0833     fc_port_t *fcport = pla->fcport;
0834 
0835     BUG_ON(!pla->ref_count);
0836     pla->ref_count--;
0837 
0838     if (pla->ref_count)
0839         return;
0840 
0841     ql_dbg(ql_dbg_disc, vha, 0x5089,
0842         "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
0843         " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
0844         iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
0845         iocb->u.isp24.port_id[0],
0846         le16_to_cpu(iocb->u.isp24.nport_handle),
0847         iocb->u.isp24.exchange_address, iocb->ox_id);
0848 
0849     port_id.b.domain = iocb->u.isp24.port_id[2];
0850     port_id.b.area   = iocb->u.isp24.port_id[1];
0851     port_id.b.al_pa  = iocb->u.isp24.port_id[0];
0852     port_id.b.rsvd_1 = 0;
0853 
0854     loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
0855 
0856     fcport->loop_id = loop_id;
0857     fcport->d_id = port_id;
0858     if (iocb->u.isp24.status_subcode == ELS_PLOGI)
0859         qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
0860     else
0861         qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
0862 
0863     list_for_each_entry(fcport, &vha->vp_fcports, list) {
0864         if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
0865             fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
0866         if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
0867             fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
0868     }
0869 
0870     list_del(&pla->list);
0871     kmem_cache_free(qla_tgt_plogi_cachep, pla);
0872 }
0873 
0874 void
0875 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
0876     struct fc_port *sess, enum qlt_plogi_link_t link)
0877 {
0878     struct imm_ntfy_from_isp *iocb = &pla->iocb;
0879     /* Inc ref_count first because link might already be pointing at pla */
0880     pla->ref_count++;
0881 
0882     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
0883         "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
0884         " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
0885         sess, link, sess->port_name,
0886         iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
0887         iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
0888         pla->ref_count, pla, link);
0889 
0890     if (link == QLT_PLOGI_LINK_CONFLICT) {
0891         switch (sess->disc_state) {
0892         case DSC_DELETED:
0893         case DSC_DELETE_PEND:
0894             pla->ref_count--;
0895             return;
0896         default:
0897             break;
0898         }
0899     }
0900 
0901     if (sess->plogi_link[link])
0902         qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
0903 
0904     if (link == QLT_PLOGI_LINK_SAME_WWN)
0905         pla->fcport = sess;
0906 
0907     sess->plogi_link[link] = pla;
0908 }
0909 
0910 typedef struct {
0911     /* These fields must be initialized by the caller */
0912     port_id_t id;
0913     /*
0914      * number of cmds dropped while we were waiting for
0915      * initiator to ack LOGO initialize to 1 if LOGO is
0916      * triggered by a command, otherwise, to 0
0917      */
0918     int cmd_count;
0919 
0920     /* These fields are used by callee */
0921     struct list_head list;
0922 } qlt_port_logo_t;
0923 
0924 static void
0925 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
0926 {
0927     qlt_port_logo_t *tmp;
0928     int res;
0929 
0930     if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
0931         res = 0;
0932         goto out;
0933     }
0934 
0935     mutex_lock(&vha->vha_tgt.tgt_mutex);
0936 
0937     list_for_each_entry(tmp, &vha->logo_list, list) {
0938         if (tmp->id.b24 == logo->id.b24) {
0939             tmp->cmd_count += logo->cmd_count;
0940             mutex_unlock(&vha->vha_tgt.tgt_mutex);
0941             return;
0942         }
0943     }
0944 
0945     list_add_tail(&logo->list, &vha->logo_list);
0946 
0947     mutex_unlock(&vha->vha_tgt.tgt_mutex);
0948 
0949     res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
0950 
0951     mutex_lock(&vha->vha_tgt.tgt_mutex);
0952     list_del(&logo->list);
0953     mutex_unlock(&vha->vha_tgt.tgt_mutex);
0954 
0955 out:
0956     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
0957         "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
0958         logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
0959         logo->cmd_count, res);
0960 }
0961 
0962 void qlt_free_session_done(struct work_struct *work)
0963 {
0964     struct fc_port *sess = container_of(work, struct fc_port,
0965         free_work);
0966     struct qla_tgt *tgt = sess->tgt;
0967     struct scsi_qla_host *vha = sess->vha;
0968     struct qla_hw_data *ha = vha->hw;
0969     unsigned long flags;
0970     bool logout_started = false;
0971     scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
0972     struct qlt_plogi_ack_t *own =
0973         sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
0974 
0975     ql_dbg(ql_dbg_disc, vha, 0xf084,
0976         "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
0977         " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
0978         __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
0979         sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
0980         sess->logout_on_delete, sess->keep_nport_handle,
0981         sess->send_els_logo);
0982 
0983     if (!IS_SW_RESV_ADDR(sess->d_id)) {
0984         qla2x00_mark_device_lost(vha, sess, 0);
0985 
0986         if (sess->send_els_logo) {
0987             qlt_port_logo_t logo;
0988 
0989             logo.id = sess->d_id;
0990             logo.cmd_count = 0;
0991             INIT_LIST_HEAD(&logo.list);
0992             if (!own)
0993                 qlt_send_first_logo(vha, &logo);
0994             sess->send_els_logo = 0;
0995         }
0996 
0997         if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
0998             int rc;
0999 
1000             if (!own ||
1001                  (own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
1002                 sess->logout_completed = 0;
1003                 rc = qla2x00_post_async_logout_work(vha, sess,
1004                     NULL);
1005                 if (rc != QLA_SUCCESS)
1006                     ql_log(ql_log_warn, vha, 0xf085,
1007                         "Schedule logo failed sess %p rc %d\n",
1008                         sess, rc);
1009                 else
1010                     logout_started = true;
1011             } else if (own && (own->iocb.u.isp24.status_subcode ==
1012                 ELS_PRLI) && ha->flags.rida_fmt2) {
1013                 rc = qla2x00_post_async_prlo_work(vha, sess,
1014                     NULL);
1015                 if (rc != QLA_SUCCESS)
1016                     ql_log(ql_log_warn, vha, 0xf085,
1017                         "Schedule PRLO failed sess %p rc %d\n",
1018                         sess, rc);
1019                 else
1020                     logout_started = true;
1021             }
1022         } /* if sess->logout_on_delete */
1023 
1024         if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
1025             !(sess->nvme_flag & NVME_FLAG_DELETING)) {
1026             sess->nvme_flag |= NVME_FLAG_DELETING;
1027             qla_nvme_unregister_remote_port(sess);
1028         }
1029 
1030         if (ha->flags.edif_enabled &&
1031             (!own || (own &&
1032                   own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
1033             sess->edif.authok = 0;
1034             if (!ha->flags.host_shutting_down) {
1035                 ql_dbg(ql_dbg_edif, vha, 0x911e,
1036                        "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
1037                        __func__, sess->port_name);
1038                 qla2x00_release_all_sadb(vha, sess);
1039             } else {
1040                 ql_dbg(ql_dbg_edif, vha, 0x911e,
1041                        "%s bypassing release_all_sadb\n",
1042                        __func__);
1043             }
1044 
1045             qla_edif_clear_appdata(vha, sess);
1046             qla_edif_sess_down(vha, sess);
1047         }
1048     }
1049 
1050     /*
1051      * Release the target session for FC Nexus from fabric module code.
1052      */
1053     if (sess->se_sess != NULL)
1054         ha->tgt.tgt_ops->free_session(sess);
1055 
1056     if (logout_started) {
1057         bool traced = false;
1058         u16 cnt = 0;
1059 
1060         while (!READ_ONCE(sess->logout_completed)) {
1061             if (!traced) {
1062                 ql_dbg(ql_dbg_disc, vha, 0xf086,
1063                     "%s: waiting for sess %p logout\n",
1064                     __func__, sess);
1065                 traced = true;
1066             }
1067             msleep(100);
1068             cnt++;
1069             /*
1070              * Driver timeout is set to 22 Sec, update count value to loop
1071              * long enough for log-out to complete before advancing. Otherwise,
1072              * straddling logout can interfere with re-login attempt.
1073              */
1074             if (cnt > 230)
1075                 break;
1076         }
1077 
1078         ql_dbg(ql_dbg_disc, vha, 0xf087,
1079             "%s: sess %p logout completed\n", __func__, sess);
1080     }
1081 
1082     if (sess->logo_ack_needed) {
1083         sess->logo_ack_needed = 0;
1084         qla24xx_async_notify_ack(vha, sess,
1085             (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
1086     }
1087 
1088     spin_lock_irqsave(&vha->work_lock, flags);
1089     sess->flags &= ~FCF_ASYNC_SENT;
1090     spin_unlock_irqrestore(&vha->work_lock, flags);
1091 
1092     spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1093     if (sess->se_sess) {
1094         sess->se_sess = NULL;
1095         if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
1096             tgt->sess_count--;
1097     }
1098 
1099     qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
1100     sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1101     sess->deleted = QLA_SESS_DELETED;
1102 
1103     if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
1104         vha->fcport_count--;
1105         sess->login_succ = 0;
1106     }
1107 
1108     qla2x00_clear_loop_id(sess);
1109 
1110     if (sess->conflict) {
1111         sess->conflict->login_pause = 0;
1112         sess->conflict = NULL;
1113         if (!test_bit(UNLOADING, &vha->dpc_flags))
1114             set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1115     }
1116 
1117     {
1118         struct qlt_plogi_ack_t *con =
1119             sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1120         struct imm_ntfy_from_isp *iocb;
1121 
1122         own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
1123 
1124         if (con) {
1125             iocb = &con->iocb;
1126             ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1127                  "se_sess %p / sess %p port %8phC is gone,"
1128                  " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
1129                  sess->se_sess, sess, sess->port_name,
1130                  own ? "releasing own PLOGI" : "no own PLOGI pending",
1131                  own ? own->ref_count : -1,
1132                  iocb->u.isp24.port_name, con->ref_count);
1133             qlt_plogi_ack_unref(vha, con);
1134             sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
1135         } else {
1136             ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
1137                 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
1138                 sess->se_sess, sess, sess->port_name,
1139                 own ? "releasing own PLOGI" :
1140                 "no own PLOGI pending",
1141                 own ? own->ref_count : -1);
1142         }
1143 
1144         if (own) {
1145             sess->fw_login_state = DSC_LS_PLOGI_PEND;
1146             qlt_plogi_ack_unref(vha, own);
1147             sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
1148         }
1149     }
1150 
1151     sess->explicit_logout = 0;
1152     spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1153     sess->free_pending = 0;
1154 
1155     qla2x00_dfs_remove_rport(vha, sess);
1156 
1157     ql_dbg(ql_dbg_disc, vha, 0xf001,
1158         "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
1159         sess, sess->port_name, vha->fcport_count);
1160 
1161     if (tgt && (tgt->sess_count == 0))
1162         wake_up_all(&tgt->waitQ);
1163 
1164     if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) &&
1165         !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
1166         (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
1167         switch (vha->host->active_mode) {
1168         case MODE_INITIATOR:
1169         case MODE_DUAL:
1170             set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1171             qla2xxx_wake_dpc(vha);
1172             break;
1173         case MODE_TARGET:
1174         default:
1175             /* no-op */
1176             break;
1177         }
1178     }
1179 
1180     if (vha->fcport_count == 0)
1181         wake_up_all(&vha->fcport_waitQ);
1182 }
1183 
1184 /* ha->tgt.sess_lock supposed to be held on entry */
1185 void qlt_unreg_sess(struct fc_port *sess)
1186 {
1187     struct scsi_qla_host *vha = sess->vha;
1188     unsigned long flags;
1189 
1190     ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1191         "%s sess %p for deletion %8phC\n",
1192         __func__, sess, sess->port_name);
1193 
1194     spin_lock_irqsave(&sess->vha->work_lock, flags);
1195     if (sess->free_pending) {
1196         spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1197         return;
1198     }
1199     sess->free_pending = 1;
1200     /*
1201      * Use FCF_ASYNC_SENT flag to block other cmds used in sess
1202      * management from being sent.
1203      */
1204     sess->flags |= FCF_ASYNC_SENT;
1205     spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1206 
1207     if (sess->se_sess)
1208         vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1209 
1210     sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1211     qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
1212     sess->last_rscn_gen = sess->rscn_gen;
1213     sess->last_login_gen = sess->login_gen;
1214 
1215     queue_work(sess->vha->hw->wq, &sess->free_work);
1216 }
1217 EXPORT_SYMBOL(qlt_unreg_sess);
1218 
1219 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
1220 {
1221     struct qla_hw_data *ha = vha->hw;
1222     struct fc_port *sess = NULL;
1223     uint16_t loop_id;
1224     int res = 0;
1225     struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
1226     unsigned long flags;
1227 
1228     loop_id = le16_to_cpu(n->u.isp24.nport_handle);
1229     if (loop_id == 0xFFFF) {
1230         /* Global event */
1231         atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1232         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1233         qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1234         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1235     } else {
1236         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1237         sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1238         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1239     }
1240 
1241     ql_dbg(ql_dbg_tgt, vha, 0xe000,
1242         "Using sess for qla_tgt_reset: %p\n", sess);
1243     if (!sess) {
1244         res = -ESRCH;
1245         return res;
1246     }
1247 
1248     ql_dbg(ql_dbg_tgt, vha, 0xe047,
1249         "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
1250         "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1251         mcmd, loop_id);
1252 
1253     return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
1254 }
1255 
1256 static void qla24xx_chk_fcp_state(struct fc_port *sess)
1257 {
1258     if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
1259         sess->logout_on_delete = 0;
1260         sess->logo_ack_needed = 0;
1261         sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1262     }
1263 }
1264 
1265 void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1266 {
1267     struct qla_tgt *tgt = sess->tgt;
1268     unsigned long flags;
1269     u16 sec;
1270 
1271     switch (sess->disc_state) {
1272     case DSC_DELETE_PEND:
1273         return;
1274     case DSC_DELETED:
1275         if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
1276             !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) {
1277             if (tgt && tgt->tgt_stop && tgt->sess_count == 0)
1278                 wake_up_all(&tgt->waitQ);
1279 
1280             if (sess->vha->fcport_count == 0)
1281                 wake_up_all(&sess->vha->fcport_waitQ);
1282             return;
1283         }
1284         break;
1285     case DSC_UPD_FCPORT:
1286         /*
1287          * This port is not done reporting to upper layer.
1288          * let it finish
1289          */
1290         sess->next_disc_state = DSC_DELETE_PEND;
1291         sec = jiffies_to_msecs(jiffies -
1292             sess->jiffies_at_registration)/1000;
1293         if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
1294             sess->sec_since_registration = sec;
1295             ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
1296                 "%s %8phC : Slow Rport registration(%d Sec)\n",
1297                 __func__, sess->port_name, sec);
1298         }
1299         return;
1300     default:
1301         break;
1302     }
1303 
1304     spin_lock_irqsave(&sess->vha->work_lock, flags);
1305     if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1306         spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1307         return;
1308     }
1309     sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1310     spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1311 
1312     sess->prli_pend_timer = 0;
1313     qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
1314 
1315     qla24xx_chk_fcp_state(sess);
1316 
1317     ql_dbg(ql_log_warn, sess->vha, 0xe001,
1318         "Scheduling sess %p for deletion %8phC fc4_type %x\n",
1319         sess, sess->port_name, sess->fc4_type);
1320 
1321     WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
1322 }
1323 
1324 static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1325 {
1326     struct fc_port *sess;
1327     scsi_qla_host_t *vha = tgt->vha;
1328 
1329     list_for_each_entry(sess, &vha->vp_fcports, list) {
1330         if (sess->se_sess)
1331             qlt_schedule_sess_for_deletion(sess);
1332     }
1333 
1334     /* At this point tgt could be already dead */
1335 }
1336 
1337 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id,
1338     uint16_t *loop_id)
1339 {
1340     struct qla_hw_data *ha = vha->hw;
1341     dma_addr_t gid_list_dma;
1342     struct gid_list_info *gid_list, *gid;
1343     int res, rc, i;
1344     uint16_t entries;
1345 
1346     gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1347         &gid_list_dma, GFP_KERNEL);
1348     if (!gid_list) {
1349         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
1350             "qla_target(%d): DMA Alloc failed of %u\n",
1351             vha->vp_idx, qla2x00_gid_list_size(ha));
1352         return -ENOMEM;
1353     }
1354 
1355     /* Get list of logged in devices */
1356     rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1357     if (rc != QLA_SUCCESS) {
1358         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
1359             "qla_target(%d): get_id_list() failed: %x\n",
1360             vha->vp_idx, rc);
1361         res = -EBUSY;
1362         goto out_free_id_list;
1363     }
1364 
1365     gid = gid_list;
1366     res = -ENOENT;
1367     for (i = 0; i < entries; i++) {
1368         if (gid->al_pa == s_id.al_pa &&
1369             gid->area == s_id.area &&
1370             gid->domain == s_id.domain) {
1371             *loop_id = le16_to_cpu(gid->loop_id);
1372             res = 0;
1373             break;
1374         }
1375         gid = (void *)gid + ha->gid_list_info_size;
1376     }
1377 
1378 out_free_id_list:
1379     dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1380         gid_list, gid_list_dma);
1381     return res;
1382 }
1383 
1384 /*
1385  * Adds an extra ref to allow to drop hw lock after adding sess to the list.
1386  * Caller must put it.
1387  */
1388 static struct fc_port *qlt_create_sess(
1389     struct scsi_qla_host *vha,
1390     fc_port_t *fcport,
1391     bool local)
1392 {
1393     struct qla_hw_data *ha = vha->hw;
1394     struct fc_port *sess = fcport;
1395     unsigned long flags;
1396 
1397     if (vha->vha_tgt.qla_tgt->tgt_stop)
1398         return NULL;
1399 
1400     if (fcport->se_sess) {
1401         if (!kref_get_unless_zero(&sess->sess_kref)) {
1402             ql_dbg(ql_dbg_disc, vha, 0x20f6,
1403                 "%s: kref_get_unless_zero failed for %8phC\n",
1404                 __func__, sess->port_name);
1405             return NULL;
1406         }
1407         return fcport;
1408     }
1409     sess->tgt = vha->vha_tgt.qla_tgt;
1410     sess->local = local;
1411 
1412     /*
1413      * Under normal circumstances we want to logout from firmware when
1414      * session eventually ends and release corresponding nport handle.
1415      * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1416      * code will adjust these flags as necessary.
1417      */
1418     sess->logout_on_delete = 1;
1419     sess->keep_nport_handle = 0;
1420     sess->logout_completed = 0;
1421 
1422     if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
1423         &fcport->port_name[0], sess) < 0) {
1424         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1425             "(%d) %8phC check_initiator_node_acl failed\n",
1426             vha->vp_idx, fcport->port_name);
1427         return NULL;
1428     } else {
1429         kref_init(&fcport->sess_kref);
1430         /*
1431          * Take an extra reference to ->sess_kref here to handle
1432          * fc_port access across ->tgt.sess_lock reaquire.
1433          */
1434         if (!kref_get_unless_zero(&sess->sess_kref)) {
1435             ql_dbg(ql_dbg_disc, vha, 0x20f7,
1436                 "%s: kref_get_unless_zero failed for %8phC\n",
1437                 __func__, sess->port_name);
1438             return NULL;
1439         }
1440 
1441         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1442         if (!IS_SW_RESV_ADDR(sess->d_id))
1443             vha->vha_tgt.qla_tgt->sess_count++;
1444 
1445         qlt_do_generation_tick(vha, &sess->generation);
1446         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1447     }
1448 
1449     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
1450         "Adding sess %p se_sess %p  to tgt %p sess_count %d\n",
1451         sess, sess->se_sess, vha->vha_tgt.qla_tgt,
1452         vha->vha_tgt.qla_tgt->sess_count);
1453 
1454     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1455         "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
1456         "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
1457         vha->vp_idx, local ?  "local " : "", fcport->port_name,
1458         fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
1459         sess->d_id.b.al_pa, sess->conf_compl_supported ?  "" : "not ");
1460 
1461     return sess;
1462 }
1463 
1464 /*
1465  * max_gen - specifies maximum session generation
1466  * at which this deletion requestion is still valid
1467  */
1468 void
1469 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1470 {
1471     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1472     struct fc_port *sess = fcport;
1473     unsigned long flags;
1474 
1475     if (!vha->hw->tgt.tgt_ops)
1476         return;
1477 
1478     if (!tgt)
1479         return;
1480 
1481     spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1482     if (tgt->tgt_stop) {
1483         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1484         return;
1485     }
1486     if (!sess->se_sess) {
1487         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1488         return;
1489     }
1490 
1491     if (max_gen - sess->generation < 0) {
1492         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1493         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
1494             "Ignoring stale deletion request for se_sess %p / sess %p"
1495             " for port %8phC, req_gen %d, sess_gen %d\n",
1496             sess->se_sess, sess, sess->port_name, max_gen,
1497             sess->generation);
1498         return;
1499     }
1500 
1501     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1502 
1503     sess->local = 1;
1504     spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1505     qlt_schedule_sess_for_deletion(sess);
1506 }
1507 
1508 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
1509 {
1510     struct qla_hw_data *ha = tgt->ha;
1511     unsigned long flags;
1512     int res;
1513     /*
1514      * We need to protect against race, when tgt is freed before or
1515      * inside wake_up()
1516      */
1517     spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1518     ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1519         "tgt %p, sess_count=%d\n",
1520         tgt, tgt->sess_count);
1521     res = (tgt->sess_count == 0);
1522     spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1523 
1524     return res;
1525 }
1526 
1527 /* Called by tcm_qla2xxx configfs code */
1528 int qlt_stop_phase1(struct qla_tgt *tgt)
1529 {
1530     struct scsi_qla_host *vha = tgt->vha;
1531     struct qla_hw_data *ha = tgt->ha;
1532     unsigned long flags;
1533 
1534     mutex_lock(&ha->optrom_mutex);
1535     mutex_lock(&qla_tgt_mutex);
1536 
1537     if (tgt->tgt_stop || tgt->tgt_stopped) {
1538         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1539             "Already in tgt->tgt_stop or tgt_stopped state\n");
1540         mutex_unlock(&qla_tgt_mutex);
1541         mutex_unlock(&ha->optrom_mutex);
1542         return -EPERM;
1543     }
1544 
1545     ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1546         vha->host_no, vha);
1547     /*
1548      * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
1549      * Lock is needed, because we still can get an incoming packet.
1550      */
1551     mutex_lock(&vha->vha_tgt.tgt_mutex);
1552     tgt->tgt_stop = 1;
1553     qlt_clear_tgt_db(tgt);
1554     mutex_unlock(&vha->vha_tgt.tgt_mutex);
1555     mutex_unlock(&qla_tgt_mutex);
1556 
1557     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1558         "Waiting for sess works (tgt %p)", tgt);
1559     spin_lock_irqsave(&tgt->sess_work_lock, flags);
1560     while (!list_empty(&tgt->sess_works_list)) {
1561         spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1562         flush_scheduled_work();
1563         spin_lock_irqsave(&tgt->sess_work_lock, flags);
1564     }
1565     spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1566 
1567     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1568         "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1569 
1570     wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1571 
1572     /* Big hammer */
1573     if (!ha->flags.host_shutting_down &&
1574         (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1575         qlt_disable_vha(vha);
1576 
1577     /* Wait for sessions to clear out (just in case) */
1578     wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1579     mutex_unlock(&ha->optrom_mutex);
1580 
1581     return 0;
1582 }
1583 EXPORT_SYMBOL(qlt_stop_phase1);
1584 
1585 /* Called by tcm_qla2xxx configfs code */
1586 void qlt_stop_phase2(struct qla_tgt *tgt)
1587 {
1588     scsi_qla_host_t *vha = tgt->vha;
1589 
1590     if (tgt->tgt_stopped) {
1591         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1592             "Already in tgt->tgt_stopped state\n");
1593         dump_stack();
1594         return;
1595     }
1596     if (!tgt->tgt_stop) {
1597         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1598             "%s: phase1 stop is not completed\n", __func__);
1599         dump_stack();
1600         return;
1601     }
1602 
1603     mutex_lock(&tgt->ha->optrom_mutex);
1604     mutex_lock(&vha->vha_tgt.tgt_mutex);
1605     tgt->tgt_stop = 0;
1606     tgt->tgt_stopped = 1;
1607     mutex_unlock(&vha->vha_tgt.tgt_mutex);
1608     mutex_unlock(&tgt->ha->optrom_mutex);
1609 
1610     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1611         tgt);
1612 
1613     switch (vha->qlini_mode) {
1614     case QLA2XXX_INI_MODE_EXCLUSIVE:
1615         vha->flags.online = 1;
1616         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1617         break;
1618     default:
1619         break;
1620     }
1621 }
1622 EXPORT_SYMBOL(qlt_stop_phase2);
1623 
1624 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
1625 static void qlt_release(struct qla_tgt *tgt)
1626 {
1627     scsi_qla_host_t *vha = tgt->vha;
1628     void *node;
1629     u64 key = 0;
1630     u16 i;
1631     struct qla_qpair_hint *h;
1632     struct qla_hw_data *ha = vha->hw;
1633 
1634     if (!tgt->tgt_stop && !tgt->tgt_stopped)
1635         qlt_stop_phase1(tgt);
1636 
1637     if (!tgt->tgt_stopped)
1638         qlt_stop_phase2(tgt);
1639 
1640     for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
1641         unsigned long flags;
1642 
1643         h = &tgt->qphints[i];
1644         if (h->qpair) {
1645             spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
1646             list_del(&h->hint_elem);
1647             spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
1648             h->qpair = NULL;
1649         }
1650     }
1651     kfree(tgt->qphints);
1652     mutex_lock(&qla_tgt_mutex);
1653     list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
1654     mutex_unlock(&qla_tgt_mutex);
1655 
1656     btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
1657         btree_remove64(&tgt->lun_qpair_map, key);
1658 
1659     btree_destroy64(&tgt->lun_qpair_map);
1660 
1661     if (vha->vp_idx)
1662         if (ha->tgt.tgt_ops &&
1663             ha->tgt.tgt_ops->remove_target &&
1664             vha->vha_tgt.target_lport_ptr)
1665             ha->tgt.tgt_ops->remove_target(vha);
1666 
1667     vha->vha_tgt.qla_tgt = NULL;
1668 
1669     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1670         "Release of tgt %p finished\n", tgt);
1671 
1672     kfree(tgt);
1673 }
1674 
1675 /* ha->hardware_lock supposed to be held on entry */
1676 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1677     const void *param, unsigned int param_size)
1678 {
1679     struct qla_tgt_sess_work_param *prm;
1680     unsigned long flags;
1681 
1682     prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1683     if (!prm) {
1684         ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1685             "qla_target(%d): Unable to create session "
1686             "work, command will be refused", 0);
1687         return -ENOMEM;
1688     }
1689 
1690     ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1691         "Scheduling work (type %d, prm %p)"
1692         " to find session for param %p (size %d, tgt %p)\n",
1693         type, prm, param, param_size, tgt);
1694 
1695     prm->type = type;
1696     memcpy(&prm->tm_iocb, param, param_size);
1697 
1698     spin_lock_irqsave(&tgt->sess_work_lock, flags);
1699     list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1700     spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1701 
1702     schedule_work(&tgt->sess_work);
1703 
1704     return 0;
1705 }
1706 
1707 /*
1708  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1709  */
1710 static void qlt_send_notify_ack(struct qla_qpair *qpair,
1711     struct imm_ntfy_from_isp *ntfy,
1712     uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1713     uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1714 {
1715     struct scsi_qla_host *vha = qpair->vha;
1716     struct qla_hw_data *ha = vha->hw;
1717     request_t *pkt;
1718     struct nack_to_isp *nack;
1719 
1720     if (!ha->flags.fw_started)
1721         return;
1722 
1723     ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1724 
1725     pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
1726     if (!pkt) {
1727         ql_dbg(ql_dbg_tgt, vha, 0xe049,
1728             "qla_target(%d): %s failed: unable to allocate "
1729             "request packet\n", vha->vp_idx, __func__);
1730         return;
1731     }
1732 
1733     if (vha->vha_tgt.qla_tgt != NULL)
1734         vha->vha_tgt.qla_tgt->notify_ack_expected++;
1735 
1736     pkt->entry_type = NOTIFY_ACK_TYPE;
1737     pkt->entry_count = 1;
1738 
1739     nack = (struct nack_to_isp *)pkt;
1740     nack->ox_id = ntfy->ox_id;
1741 
1742     nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
1743     nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1744     if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1745         nack->u.isp24.flags = ntfy->u.isp24.flags &
1746             cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
1747     }
1748     nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1749     nack->u.isp24.status = ntfy->u.isp24.status;
1750     nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1751     nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1752     nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1753     nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1754     nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1755     nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1756     nack->u.isp24.srr_reject_code = srr_reject_code;
1757     nack->u.isp24.srr_reject_code_expl = srr_explan;
1758     nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1759 
1760     /* TODO qualify this with EDIF enable */
1761     if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
1762         (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
1763         nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
1764     }
1765 
1766     ql_dbg(ql_dbg_tgt, vha, 0xe005,
1767         "qla_target(%d): Sending 24xx Notify Ack %d\n",
1768         vha->vp_idx, nack->u.isp24.status);
1769 
1770     /* Memory Barrier */
1771     wmb();
1772     qla2x00_start_iocbs(vha, qpair->req);
1773 }
1774 
1775 static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
1776 {
1777     struct scsi_qla_host *vha = mcmd->vha;
1778     struct qla_hw_data *ha = vha->hw;
1779     struct abts_resp_to_24xx *resp;
1780     __le32 f_ctl;
1781     uint32_t h;
1782     uint8_t *p;
1783     int rc;
1784     struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
1785     struct qla_qpair *qpair = mcmd->qpair;
1786 
1787     ql_dbg(ql_dbg_tgt, vha, 0xe006,
1788         "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
1789         ha, mcmd->fc_tm_rsp);
1790 
1791     rc = qlt_check_reserve_free_req(qpair, 1);
1792     if (rc) {
1793         ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1794             "qla_target(%d): %s failed: unable to allocate request packet\n",
1795             vha->vp_idx, __func__);
1796         return -EAGAIN;
1797     }
1798 
1799     resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
1800     memset(resp, 0, sizeof(*resp));
1801 
1802     h = qlt_make_handle(qpair);
1803     if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1804         /*
1805          * CTIO type 7 from the firmware doesn't provide a way to
1806          * know the initiator's LOOP ID, hence we can't find
1807          * the session and, so, the command.
1808          */
1809         return -EAGAIN;
1810     } else {
1811         qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
1812     }
1813 
1814     resp->handle = make_handle(qpair->req->id, h);
1815     resp->entry_type = ABTS_RESP_24XX;
1816     resp->entry_count = 1;
1817     resp->nport_handle = abts->nport_handle;
1818     resp->vp_index = vha->vp_idx;
1819     resp->sof_type = abts->sof_type;
1820     resp->exchange_address = abts->exchange_address;
1821     resp->fcp_hdr_le = abts->fcp_hdr_le;
1822     f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1823         F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1824         F_CTL_SEQ_INITIATIVE);
1825     p = (uint8_t *)&f_ctl;
1826     resp->fcp_hdr_le.f_ctl[0] = *p++;
1827     resp->fcp_hdr_le.f_ctl[1] = *p++;
1828     resp->fcp_hdr_le.f_ctl[2] = *p;
1829 
1830     resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1831     resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1832 
1833     resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1834     if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
1835         resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1836         resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1837         resp->payload.ba_acct.low_seq_cnt = 0x0000;
1838         resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
1839         resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1840         resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1841     } else {
1842         resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1843         resp->payload.ba_rjt.reason_code =
1844             BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1845         /* Other bytes are zero */
1846     }
1847 
1848     vha->vha_tgt.qla_tgt->abts_resp_expected++;
1849 
1850     /* Memory Barrier */
1851     wmb();
1852     if (qpair->reqq_start_iocbs)
1853         qpair->reqq_start_iocbs(qpair);
1854     else
1855         qla2x00_start_iocbs(vha, qpair->req);
1856 
1857     return rc;
1858 }
1859 
1860 /*
1861  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1862  */
1863 static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
1864     struct abts_recv_from_24xx *abts, uint32_t status,
1865     bool ids_reversed)
1866 {
1867     struct scsi_qla_host *vha = qpair->vha;
1868     struct qla_hw_data *ha = vha->hw;
1869     struct abts_resp_to_24xx *resp;
1870     __le32 f_ctl;
1871     uint8_t *p;
1872 
1873     ql_dbg(ql_dbg_tgt, vha, 0xe006,
1874         "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1875         ha, abts, status);
1876 
1877     resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
1878         NULL);
1879     if (!resp) {
1880         ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1881             "qla_target(%d): %s failed: unable to allocate "
1882             "request packet", vha->vp_idx, __func__);
1883         return;
1884     }
1885 
1886     resp->entry_type = ABTS_RESP_24XX;
1887     resp->handle = QLA_TGT_SKIP_HANDLE;
1888     resp->entry_count = 1;
1889     resp->nport_handle = abts->nport_handle;
1890     resp->vp_index = vha->vp_idx;
1891     resp->sof_type = abts->sof_type;
1892     resp->exchange_address = abts->exchange_address;
1893     resp->fcp_hdr_le = abts->fcp_hdr_le;
1894     f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1895         F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1896         F_CTL_SEQ_INITIATIVE);
1897     p = (uint8_t *)&f_ctl;
1898     resp->fcp_hdr_le.f_ctl[0] = *p++;
1899     resp->fcp_hdr_le.f_ctl[1] = *p++;
1900     resp->fcp_hdr_le.f_ctl[2] = *p;
1901     if (ids_reversed) {
1902         resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id;
1903         resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id;
1904     } else {
1905         resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1906         resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1907     }
1908     resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1909     if (status == FCP_TMF_CMPL) {
1910         resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1911         resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1912         resp->payload.ba_acct.low_seq_cnt = 0x0000;
1913         resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
1914         resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1915         resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1916     } else {
1917         resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1918         resp->payload.ba_rjt.reason_code =
1919             BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1920         /* Other bytes are zero */
1921     }
1922 
1923     vha->vha_tgt.qla_tgt->abts_resp_expected++;
1924 
1925     /* Memory Barrier */
1926     wmb();
1927     if (qpair->reqq_start_iocbs)
1928         qpair->reqq_start_iocbs(qpair);
1929     else
1930         qla2x00_start_iocbs(vha, qpair->req);
1931 }
1932 
1933 /*
1934  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1935  */
1936 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1937     struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
1938 {
1939     struct ctio7_to_24xx *ctio;
1940     u16 tmp;
1941     struct abts_recv_from_24xx *entry;
1942 
1943     ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
1944     if (ctio == NULL) {
1945         ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1946             "qla_target(%d): %s failed: unable to allocate "
1947             "request packet\n", vha->vp_idx, __func__);
1948         return;
1949     }
1950 
1951     if (mcmd)
1952         /* abts from remote port */
1953         entry = &mcmd->orig_iocb.abts;
1954     else
1955         /* abts from this driver.  */
1956         entry = (struct abts_recv_from_24xx *)pkt;
1957 
1958     /*
1959      * We've got on entrance firmware's response on by us generated
1960      * ABTS response. So, in it ID fields are reversed.
1961      */
1962 
1963     ctio->entry_type = CTIO_TYPE7;
1964     ctio->entry_count = 1;
1965     ctio->nport_handle = entry->nport_handle;
1966     ctio->handle = QLA_TGT_SKIP_HANDLE |    CTIO_COMPLETION_HANDLE_MARK;
1967     ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1968     ctio->vp_index = vha->vp_idx;
1969     ctio->exchange_addr = entry->exchange_addr_to_abort;
1970     tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
1971 
1972     if (mcmd) {
1973         ctio->initiator_id = entry->fcp_hdr_le.s_id;
1974 
1975         if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
1976             tmp |= (mcmd->abort_io_attr << 9);
1977         else if (qpair->retry_term_cnt & 1)
1978             tmp |= (0x4 << 9);
1979     } else {
1980         ctio->initiator_id = entry->fcp_hdr_le.d_id;
1981 
1982         if (qpair->retry_term_cnt & 1)
1983             tmp |= (0x4 << 9);
1984     }
1985     ctio->u.status1.flags = cpu_to_le16(tmp);
1986     ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1987 
1988     ql_dbg(ql_dbg_tgt, vha, 0xe007,
1989         "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
1990         le16_to_cpu(ctio->u.status1.flags),
1991         le16_to_cpu(ctio->u.status1.ox_id),
1992         (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
1993 
1994     /* Memory Barrier */
1995     wmb();
1996     if (qpair->reqq_start_iocbs)
1997         qpair->reqq_start_iocbs(qpair);
1998     else
1999         qla2x00_start_iocbs(vha, qpair->req);
2000 
2001     if (mcmd)
2002         qlt_build_abts_resp_iocb(mcmd);
2003     else
2004         qlt_24xx_send_abts_resp(qpair,
2005             (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
2006 
2007 }
2008 
2009 /* drop cmds for the given lun
2010  * XXX only looks for cmds on the port through which lun reset was recieved
2011  * XXX does not go through the list of other port (which may have cmds
2012  *     for the same lun)
2013  */
2014 static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
2015 {
2016     struct qla_tgt_sess_op *op;
2017     struct qla_tgt_cmd *cmd;
2018     uint32_t key;
2019     unsigned long flags;
2020 
2021     key = sid_to_key(s_id);
2022     spin_lock_irqsave(&vha->cmd_list_lock, flags);
2023     list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
2024         uint32_t op_key;
2025         u64 op_lun;
2026 
2027         op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
2028         op_lun = scsilun_to_int(
2029             (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
2030         if (op_key == key && op_lun == lun)
2031             op->aborted = true;
2032     }
2033 
2034     list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
2035         uint32_t cmd_key;
2036         u64 cmd_lun;
2037 
2038         cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
2039         cmd_lun = scsilun_to_int(
2040             (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
2041         if (cmd_key == key && cmd_lun == lun)
2042             cmd->aborted = 1;
2043     }
2044     spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
2045 }
2046 
2047 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
2048     uint64_t unpacked_lun)
2049 {
2050     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2051     struct qla_qpair_hint *h = NULL;
2052 
2053     if (vha->flags.qpairs_available) {
2054         h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun);
2055         if (!h)
2056             h = &tgt->qphints[0];
2057     } else {
2058         h = &tgt->qphints[0];
2059     }
2060 
2061     return h;
2062 }
2063 
2064 static void qlt_do_tmr_work(struct work_struct *work)
2065 {
2066     struct qla_tgt_mgmt_cmd *mcmd =
2067         container_of(work, struct qla_tgt_mgmt_cmd, work);
2068     struct qla_hw_data *ha = mcmd->vha->hw;
2069     int rc;
2070     uint32_t tag;
2071     unsigned long flags;
2072 
2073     switch (mcmd->tmr_func) {
2074     case QLA_TGT_ABTS:
2075         tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort);
2076         break;
2077     default:
2078         tag = 0;
2079         break;
2080     }
2081 
2082     rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun,
2083         mcmd->tmr_func, tag);
2084 
2085     if (rc != 0) {
2086         spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
2087         switch (mcmd->tmr_func) {
2088         case QLA_TGT_ABTS:
2089             mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
2090             qlt_build_abts_resp_iocb(mcmd);
2091             break;
2092         case QLA_TGT_LUN_RESET:
2093         case QLA_TGT_CLEAR_TS:
2094         case QLA_TGT_ABORT_TS:
2095         case QLA_TGT_CLEAR_ACA:
2096         case QLA_TGT_TARGET_RESET:
2097             qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio,
2098                 qla_sam_status);
2099             break;
2100 
2101         case QLA_TGT_ABORT_ALL:
2102         case QLA_TGT_NEXUS_LOSS_SESS:
2103         case QLA_TGT_NEXUS_LOSS:
2104             qlt_send_notify_ack(mcmd->qpair,
2105                 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2106             break;
2107         }
2108         spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags);
2109 
2110         ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
2111             "qla_target(%d):  tgt_ops->handle_tmr() failed: %d\n",
2112             mcmd->vha->vp_idx, rc);
2113         mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2114     }
2115 }
2116 
2117 /* ha->hardware_lock supposed to be held on entry */
2118 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2119     struct abts_recv_from_24xx *abts, struct fc_port *sess)
2120 {
2121     struct qla_hw_data *ha = vha->hw;
2122     struct qla_tgt_mgmt_cmd *mcmd;
2123     struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
2124     struct qla_tgt_cmd *abort_cmd;
2125 
2126     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
2127         "qla_target(%d): task abort (tag=%d)\n",
2128         vha->vp_idx, abts->exchange_addr_to_abort);
2129 
2130     mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2131     if (mcmd == NULL) {
2132         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
2133             "qla_target(%d): %s: Allocation of ABORT cmd failed",
2134             vha->vp_idx, __func__);
2135         return -ENOMEM;
2136     }
2137     memset(mcmd, 0, sizeof(*mcmd));
2138     mcmd->cmd_type = TYPE_TGT_TMCMD;
2139     mcmd->sess = sess;
2140     memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
2141     mcmd->reset_count = ha->base_qpair->chip_reset;
2142     mcmd->tmr_func = QLA_TGT_ABTS;
2143     mcmd->qpair = h->qpair;
2144     mcmd->vha = vha;
2145 
2146     /*
2147      * LUN is looked up by target-core internally based on the passed
2148      * abts->exchange_addr_to_abort tag.
2149      */
2150     mcmd->se_cmd.cpuid = h->cpuid;
2151 
2152     abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
2153                 le32_to_cpu(abts->exchange_addr_to_abort));
2154     if (!abort_cmd) {
2155         mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2156         return -EIO;
2157     }
2158     mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
2159 
2160     if (abort_cmd->qpair) {
2161         mcmd->qpair = abort_cmd->qpair;
2162         mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
2163         mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
2164         mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
2165     }
2166 
2167     INIT_WORK(&mcmd->work, qlt_do_tmr_work);
2168     queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work);
2169 
2170     return 0;
2171 }
2172 
2173 /*
2174  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2175  */
2176 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2177     struct abts_recv_from_24xx *abts)
2178 {
2179     struct qla_hw_data *ha = vha->hw;
2180     struct fc_port *sess;
2181     uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort);
2182     be_id_t s_id;
2183     int rc;
2184     unsigned long flags;
2185 
2186     if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
2187         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
2188             "qla_target(%d): ABTS: Abort Sequence not "
2189             "supported\n", vha->vp_idx);
2190         qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2191             false);
2192         return;
2193     }
2194 
2195     if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
2196         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
2197             "qla_target(%d): ABTS: Unknown Exchange "
2198             "Address received\n", vha->vp_idx);
2199         qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2200             false);
2201         return;
2202     }
2203 
2204     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
2205         "qla_target(%d): task abort (s_id=%x:%x:%x, "
2206         "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain,
2207         abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag,
2208         le32_to_cpu(abts->fcp_hdr_le.parameter));
2209 
2210     s_id = le_id_to_be(abts->fcp_hdr_le.s_id);
2211 
2212     spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2213     sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
2214     if (!sess) {
2215         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
2216             "qla_target(%d): task abort for non-existent session\n",
2217             vha->vp_idx);
2218         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2219 
2220         qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2221                 false);
2222         return;
2223     }
2224     spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2225 
2226 
2227     if (sess->deleted) {
2228         qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2229             false);
2230         return;
2231     }
2232 
2233     rc = __qlt_24xx_handle_abts(vha, abts, sess);
2234     if (rc != 0) {
2235         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
2236             "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
2237             vha->vp_idx, rc);
2238         qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2239             false);
2240         return;
2241     }
2242 }
2243 
2244 /*
2245  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2246  */
2247 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
2248     struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
2249 {
2250     struct scsi_qla_host *ha = mcmd->vha;
2251     struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
2252     struct ctio7_to_24xx *ctio;
2253     uint16_t temp;
2254 
2255     ql_dbg(ql_dbg_tgt, ha, 0xe008,
2256         "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
2257         ha, atio, resp_code);
2258 
2259 
2260     ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
2261     if (ctio == NULL) {
2262         ql_dbg(ql_dbg_tgt, ha, 0xe04c,
2263             "qla_target(%d): %s failed: unable to allocate "
2264             "request packet\n", ha->vp_idx, __func__);
2265         return;
2266     }
2267 
2268     ctio->entry_type = CTIO_TYPE7;
2269     ctio->entry_count = 1;
2270     ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2271     ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id);
2272     ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2273     ctio->vp_index = ha->vp_idx;
2274     ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2275     ctio->exchange_addr = atio->u.isp24.exchange_addr;
2276     temp = (atio->u.isp24.attr << 9)|
2277         CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2278     ctio->u.status1.flags = cpu_to_le16(temp);
2279     temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2280     ctio->u.status1.ox_id = cpu_to_le16(temp);
2281     ctio->u.status1.scsi_status =
2282         cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
2283     ctio->u.status1.response_len = cpu_to_le16(8);
2284     ctio->u.status1.sense_data[0] = resp_code;
2285 
2286     /* Memory Barrier */
2287     wmb();
2288     if (qpair->reqq_start_iocbs)
2289         qpair->reqq_start_iocbs(qpair);
2290     else
2291         qla2x00_start_iocbs(ha, qpair->req);
2292 }
2293 
2294 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
2295 {
2296     mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2297 }
2298 EXPORT_SYMBOL(qlt_free_mcmd);
2299 
2300 /*
2301  * ha->hardware_lock supposed to be held on entry. Might drop it, then
2302  * reacquire
2303  */
2304 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
2305     uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
2306 {
2307     struct atio_from_isp *atio = &cmd->atio;
2308     struct ctio7_to_24xx *ctio;
2309     uint16_t temp;
2310     struct scsi_qla_host *vha = cmd->vha;
2311 
2312     ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
2313         "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2314         "sense_key=%02x, asc=%02x, ascq=%02x",
2315         vha, atio, scsi_status, sense_key, asc, ascq);
2316 
2317     ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
2318     if (!ctio) {
2319         ql_dbg(ql_dbg_async, vha, 0x3067,
2320             "qla2x00t(%ld): %s failed: unable to allocate request packet",
2321             vha->host_no, __func__);
2322         goto out;
2323     }
2324 
2325     ctio->entry_type = CTIO_TYPE7;
2326     ctio->entry_count = 1;
2327     ctio->handle = QLA_TGT_SKIP_HANDLE;
2328     ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id);
2329     ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2330     ctio->vp_index = vha->vp_idx;
2331     ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2332     ctio->exchange_addr = atio->u.isp24.exchange_addr;
2333     temp = (atio->u.isp24.attr << 9) |
2334         CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2335     ctio->u.status1.flags = cpu_to_le16(temp);
2336     temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2337     ctio->u.status1.ox_id = cpu_to_le16(temp);
2338     ctio->u.status1.scsi_status =
2339         cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
2340     ctio->u.status1.response_len = cpu_to_le16(18);
2341     ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
2342 
2343     if (ctio->u.status1.residual != 0)
2344         ctio->u.status1.scsi_status |=
2345             cpu_to_le16(SS_RESIDUAL_UNDER);
2346 
2347     /* Fixed format sense data. */
2348     ctio->u.status1.sense_data[0] = 0x70;
2349     ctio->u.status1.sense_data[2] = sense_key;
2350     /* Additional sense length */
2351     ctio->u.status1.sense_data[7] = 0xa;
2352     /* ASC and ASCQ */
2353     ctio->u.status1.sense_data[12] = asc;
2354     ctio->u.status1.sense_data[13] = ascq;
2355 
2356     /* Memory Barrier */
2357     wmb();
2358 
2359     if (qpair->reqq_start_iocbs)
2360         qpair->reqq_start_iocbs(qpair);
2361     else
2362         qla2x00_start_iocbs(vha, qpair->req);
2363 
2364 out:
2365     return;
2366 }
2367 
2368 /* callback from target fabric module code */
2369 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2370 {
2371     struct scsi_qla_host *vha = mcmd->sess->vha;
2372     struct qla_hw_data *ha = vha->hw;
2373     unsigned long flags;
2374     struct qla_qpair *qpair = mcmd->qpair;
2375     bool free_mcmd = true;
2376 
2377     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
2378         "TM response mcmd (%p) status %#x state %#x",
2379         mcmd, mcmd->fc_tm_rsp, mcmd->flags);
2380 
2381     spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2382 
2383     if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
2384         /*
2385          * Either the port is not online or this request was from
2386          * previous life, just abort the processing.
2387          */
2388         ql_dbg(ql_dbg_async, vha, 0xe100,
2389             "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2390             vha->flags.online, qla2x00_reset_active(vha),
2391             mcmd->reset_count, qpair->chip_reset);
2392         ha->tgt.tgt_ops->free_mcmd(mcmd);
2393         spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2394         return;
2395     }
2396 
2397     if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
2398         switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
2399         case ELS_LOGO:
2400         case ELS_PRLO:
2401         case ELS_TPRLO:
2402             ql_dbg(ql_dbg_disc, vha, 0x2106,
2403                 "TM response logo %8phC status %#x state %#x",
2404                 mcmd->sess->port_name, mcmd->fc_tm_rsp,
2405                 mcmd->flags);
2406             qlt_schedule_sess_for_deletion(mcmd->sess);
2407             break;
2408         default:
2409             qlt_send_notify_ack(vha->hw->base_qpair,
2410                 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2411             break;
2412         }
2413     } else {
2414         if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
2415             qlt_build_abts_resp_iocb(mcmd);
2416             free_mcmd = false;
2417         } else
2418             qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
2419                 mcmd->fc_tm_rsp);
2420     }
2421     /*
2422      * Make the callback for ->free_mcmd() to queue_work() and invoke
2423      * target_put_sess_cmd() to drop cmd_kref to 1.  The final
2424      * target_put_sess_cmd() call will be made from TFO->check_stop_free()
2425      * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
2426      * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
2427      * qlt_xmit_tm_rsp() returns here..
2428      */
2429     if (free_mcmd)
2430         ha->tgt.tgt_ops->free_mcmd(mcmd);
2431 
2432     spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2433 }
2434 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
2435 
2436 /* No locks */
2437 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
2438 {
2439     struct qla_tgt_cmd *cmd = prm->cmd;
2440 
2441     BUG_ON(cmd->sg_cnt == 0);
2442 
2443     prm->sg = (struct scatterlist *)cmd->sg;
2444     prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
2445         cmd->sg_cnt, cmd->dma_data_direction);
2446     if (unlikely(prm->seg_cnt == 0))
2447         goto out_err;
2448 
2449     prm->cmd->sg_mapped = 1;
2450 
2451     if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
2452         /*
2453          * If greater than four sg entries then we need to allocate
2454          * the continuation entries
2455          */
2456         if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX)
2457             prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
2458             QLA_TGT_DATASEGS_PER_CMD_24XX,
2459             QLA_TGT_DATASEGS_PER_CONT_24XX);
2460     } else {
2461         /* DIF */
2462         if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2463             (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2464             prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
2465             prm->tot_dsds = prm->seg_cnt;
2466         } else
2467             prm->tot_dsds = prm->seg_cnt;
2468 
2469         if (cmd->prot_sg_cnt) {
2470             prm->prot_sg      = cmd->prot_sg;
2471             prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
2472                 cmd->prot_sg, cmd->prot_sg_cnt,
2473                 cmd->dma_data_direction);
2474             if (unlikely(prm->prot_seg_cnt == 0))
2475                 goto out_err;
2476 
2477             if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2478                 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2479                 /* Dif Bundling not support here */
2480                 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
2481                                 cmd->blk_sz);
2482                 prm->tot_dsds += prm->prot_seg_cnt;
2483             } else
2484                 prm->tot_dsds += prm->prot_seg_cnt;
2485         }
2486     }
2487 
2488     return 0;
2489 
2490 out_err:
2491     ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
2492         "qla_target(%d): PCI mapping failed: sg_cnt=%d",
2493         0, prm->cmd->sg_cnt);
2494     return -1;
2495 }
2496 
2497 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2498 {
2499     struct qla_hw_data *ha;
2500     struct qla_qpair *qpair;
2501 
2502     if (!cmd->sg_mapped)
2503         return;
2504 
2505     qpair = cmd->qpair;
2506 
2507     dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
2508         cmd->dma_data_direction);
2509     cmd->sg_mapped = 0;
2510 
2511     if (cmd->prot_sg_cnt)
2512         dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
2513             cmd->dma_data_direction);
2514 
2515     if (!cmd->ctx)
2516         return;
2517     ha = vha->hw;
2518     if (cmd->ctx_dsd_alloced)
2519         qla2x00_clean_dsd_pool(ha, cmd->ctx);
2520 
2521     dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2522 }
2523 
2524 static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
2525     uint32_t req_cnt)
2526 {
2527     uint32_t cnt;
2528     struct req_que *req = qpair->req;
2529 
2530     if (req->cnt < (req_cnt + 2)) {
2531         cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
2532             rd_reg_dword_relaxed(req->req_q_out));
2533 
2534         if  (req->ring_index < cnt)
2535             req->cnt = cnt - req->ring_index;
2536         else
2537             req->cnt = req->length - (req->ring_index - cnt);
2538 
2539         if (unlikely(req->cnt < (req_cnt + 2)))
2540             return -EAGAIN;
2541     }
2542 
2543     req->cnt -= req_cnt;
2544 
2545     return 0;
2546 }
2547 
2548 /*
2549  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2550  */
2551 static inline void *qlt_get_req_pkt(struct req_que *req)
2552 {
2553     /* Adjust ring index. */
2554     req->ring_index++;
2555     if (req->ring_index == req->length) {
2556         req->ring_index = 0;
2557         req->ring_ptr = req->ring;
2558     } else {
2559         req->ring_ptr++;
2560     }
2561     return (cont_entry_t *)req->ring_ptr;
2562 }
2563 
2564 /* ha->hardware_lock supposed to be held on entry */
2565 static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
2566 {
2567     uint32_t h;
2568     int index;
2569     uint8_t found = 0;
2570     struct req_que *req = qpair->req;
2571 
2572     h = req->current_outstanding_cmd;
2573 
2574     for (index = 1; index < req->num_outstanding_cmds; index++) {
2575         h++;
2576         if (h == req->num_outstanding_cmds)
2577             h = 1;
2578 
2579         if (h == QLA_TGT_SKIP_HANDLE)
2580             continue;
2581 
2582         if (!req->outstanding_cmds[h]) {
2583             found = 1;
2584             break;
2585         }
2586     }
2587 
2588     if (found) {
2589         req->current_outstanding_cmd = h;
2590     } else {
2591         ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
2592             "qla_target(%d): Ran out of empty cmd slots\n",
2593             qpair->vha->vp_idx);
2594         h = QLA_TGT_NULL_HANDLE;
2595     }
2596 
2597     return h;
2598 }
2599 
2600 /* ha->hardware_lock supposed to be held on entry */
2601 static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
2602     struct qla_tgt_prm *prm)
2603 {
2604     uint32_t h;
2605     struct ctio7_to_24xx *pkt;
2606     struct atio_from_isp *atio = &prm->cmd->atio;
2607     uint16_t temp;
2608     struct qla_tgt_cmd      *cmd = prm->cmd;
2609 
2610     pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
2611     prm->pkt = pkt;
2612     memset(pkt, 0, sizeof(*pkt));
2613 
2614     pkt->entry_type = CTIO_TYPE7;
2615     pkt->entry_count = (uint8_t)prm->req_cnt;
2616     pkt->vp_index = prm->cmd->vp_idx;
2617 
2618     h = qlt_make_handle(qpair);
2619     if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2620         /*
2621          * CTIO type 7 from the firmware doesn't provide a way to
2622          * know the initiator's LOOP ID, hence we can't find
2623          * the session and, so, the command.
2624          */
2625         return -EAGAIN;
2626     } else
2627         qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2628 
2629     pkt->handle = make_handle(qpair->req->id, h);
2630     pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
2631     pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2632     pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2633     pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2634     pkt->exchange_addr = atio->u.isp24.exchange_addr;
2635     temp = atio->u.isp24.attr << 9;
2636     pkt->u.status0.flags |= cpu_to_le16(temp);
2637     temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2638     pkt->u.status0.ox_id = cpu_to_le16(temp);
2639     pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
2640 
2641     if (cmd->edif) {
2642         if (cmd->dma_data_direction == DMA_TO_DEVICE)
2643             prm->cmd->sess->edif.rx_bytes += cmd->bufflen;
2644         if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2645             prm->cmd->sess->edif.tx_bytes += cmd->bufflen;
2646 
2647         pkt->u.status0.edif_flags |= EF_EN_EDIF;
2648     }
2649 
2650     return 0;
2651 }
2652 
2653 /*
2654  * ha->hardware_lock supposed to be held on entry. We have already made sure
2655  * that there is sufficient amount of request entries to not drop it.
2656  */
2657 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
2658 {
2659     int cnt;
2660     struct dsd64 *cur_dsd;
2661 
2662     /* Build continuation packets */
2663     while (prm->seg_cnt > 0) {
2664         cont_a64_entry_t *cont_pkt64 =
2665             (cont_a64_entry_t *)qlt_get_req_pkt(
2666                prm->cmd->qpair->req);
2667 
2668         /*
2669          * Make sure that from cont_pkt64 none of
2670          * 64-bit specific fields used for 32-bit
2671          * addressing. Cast to (cont_entry_t *) for
2672          * that.
2673          */
2674 
2675         memset(cont_pkt64, 0, sizeof(*cont_pkt64));
2676 
2677         cont_pkt64->entry_count = 1;
2678         cont_pkt64->sys_define = 0;
2679 
2680         cont_pkt64->entry_type = CONTINUE_A64_TYPE;
2681         cur_dsd = cont_pkt64->dsd;
2682 
2683         /* Load continuation entry data segments */
2684         for (cnt = 0;
2685             cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
2686             cnt++, prm->seg_cnt--) {
2687             append_dsd64(&cur_dsd, prm->sg);
2688             prm->sg = sg_next(prm->sg);
2689         }
2690     }
2691 }
2692 
2693 /*
2694  * ha->hardware_lock supposed to be held on entry. We have already made sure
2695  * that there is sufficient amount of request entries to not drop it.
2696  */
2697 static void qlt_load_data_segments(struct qla_tgt_prm *prm)
2698 {
2699     int cnt;
2700     struct dsd64 *cur_dsd;
2701     struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
2702 
2703     pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
2704 
2705     /* Setup packet address segment pointer */
2706     cur_dsd = &pkt24->u.status0.dsd;
2707 
2708     /* Set total data segment count */
2709     if (prm->seg_cnt)
2710         pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
2711 
2712     if (prm->seg_cnt == 0) {
2713         /* No data transfer */
2714         cur_dsd->address = 0;
2715         cur_dsd->length = 0;
2716         return;
2717     }
2718 
2719     /* If scatter gather */
2720 
2721     /* Load command entry data segments */
2722     for (cnt = 0;
2723         (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
2724         cnt++, prm->seg_cnt--) {
2725         append_dsd64(&cur_dsd, prm->sg);
2726         prm->sg = sg_next(prm->sg);
2727     }
2728 
2729     qlt_load_cont_data_segments(prm);
2730 }
2731 
2732 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2733 {
2734     return cmd->bufflen > 0;
2735 }
2736 
2737 static void qlt_print_dif_err(struct qla_tgt_prm *prm)
2738 {
2739     struct qla_tgt_cmd *cmd;
2740     struct scsi_qla_host *vha;
2741 
2742     /* asc 0x10=dif error */
2743     if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
2744         cmd = prm->cmd;
2745         vha = cmd->vha;
2746         /* ASCQ */
2747         switch (prm->sense_buffer[13]) {
2748         case 1:
2749             ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2750                 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2751                 "se_cmd=%p tag[%x]",
2752                 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2753                 cmd->atio.u.isp24.exchange_addr);
2754             break;
2755         case 2:
2756             ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2757                 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2758                 "se_cmd=%p tag[%x]",
2759                 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2760                 cmd->atio.u.isp24.exchange_addr);
2761             break;
2762         case 3:
2763             ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2764                 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2765                 "se_cmd=%p tag[%x]",
2766                 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2767                 cmd->atio.u.isp24.exchange_addr);
2768             break;
2769         default:
2770             ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2771                 "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
2772                 "se_cmd=%p tag[%x]",
2773                 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2774                 cmd->atio.u.isp24.exchange_addr);
2775             break;
2776         }
2777         ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2778     }
2779 }
2780 
2781 /*
2782  * Called without ha->hardware_lock held
2783  */
2784 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
2785     struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
2786     uint32_t *full_req_cnt)
2787 {
2788     struct se_cmd *se_cmd = &cmd->se_cmd;
2789     struct qla_qpair *qpair = cmd->qpair;
2790 
2791     prm->cmd = cmd;
2792     prm->tgt = cmd->tgt;
2793     prm->pkt = NULL;
2794     prm->rq_result = scsi_status;
2795     prm->sense_buffer = &cmd->sense_buffer[0];
2796     prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
2797     prm->sg = NULL;
2798     prm->seg_cnt = -1;
2799     prm->req_cnt = 1;
2800     prm->residual = 0;
2801     prm->add_status_pkt = 0;
2802     prm->prot_sg = NULL;
2803     prm->prot_seg_cnt = 0;
2804     prm->tot_dsds = 0;
2805 
2806     if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
2807         if  (qlt_pci_map_calc_cnt(prm) != 0)
2808             return -EAGAIN;
2809     }
2810 
2811     *full_req_cnt = prm->req_cnt;
2812 
2813     if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
2814         prm->residual = se_cmd->residual_count;
2815         ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c,
2816             "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2817                prm->residual, se_cmd->tag,
2818                se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
2819                cmd->bufflen, prm->rq_result);
2820         prm->rq_result |= SS_RESIDUAL_UNDER;
2821     } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
2822         prm->residual = se_cmd->residual_count;
2823         ql_dbg_qp(ql_dbg_io, qpair, 0x305d,
2824             "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2825                prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
2826                se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2827         prm->rq_result |= SS_RESIDUAL_OVER;
2828     }
2829 
2830     if (xmit_type & QLA_TGT_XMIT_STATUS) {
2831         /*
2832          * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
2833          * ignored in *xmit_response() below
2834          */
2835         if (qlt_has_data(cmd)) {
2836             if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
2837                 (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2838                 (prm->rq_result != 0))) {
2839                 prm->add_status_pkt = 1;
2840                 (*full_req_cnt)++;
2841             }
2842         }
2843     }
2844 
2845     return 0;
2846 }
2847 
2848 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
2849     int sending_sense)
2850 {
2851     if (cmd->qpair->enable_class_2)
2852         return 0;
2853 
2854     if (sending_sense)
2855         return cmd->conf_compl_supported;
2856     else
2857         return cmd->qpair->enable_explicit_conf &&
2858                     cmd->conf_compl_supported;
2859 }
2860 
2861 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2862     struct qla_tgt_prm *prm)
2863 {
2864     prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
2865         (uint32_t)sizeof(ctio->u.status1.sense_data));
2866     ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2867     if (qlt_need_explicit_conf(prm->cmd, 0)) {
2868         ctio->u.status0.flags |= cpu_to_le16(
2869             CTIO7_FLAGS_EXPLICIT_CONFORM |
2870             CTIO7_FLAGS_CONFORM_REQ);
2871     }
2872     ctio->u.status0.residual = cpu_to_le32(prm->residual);
2873     ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
2874     if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
2875         int i;
2876 
2877         if (qlt_need_explicit_conf(prm->cmd, 1)) {
2878             if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
2879                 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
2880                     "Skipping EXPLICIT_CONFORM and "
2881                     "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2882                     "non GOOD status\n");
2883                 goto skip_explict_conf;
2884             }
2885             ctio->u.status1.flags |= cpu_to_le16(
2886                 CTIO7_FLAGS_EXPLICIT_CONFORM |
2887                 CTIO7_FLAGS_CONFORM_REQ);
2888         }
2889 skip_explict_conf:
2890         ctio->u.status1.flags &=
2891             ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2892         ctio->u.status1.flags |=
2893             cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2894         ctio->u.status1.scsi_status |=
2895             cpu_to_le16(SS_SENSE_LEN_VALID);
2896         ctio->u.status1.sense_length =
2897             cpu_to_le16(prm->sense_buffer_len);
2898         for (i = 0; i < prm->sense_buffer_len/4; i++) {
2899             uint32_t v;
2900 
2901             v = get_unaligned_be32(
2902                     &((uint32_t *)prm->sense_buffer)[i]);
2903             put_unaligned_le32(v,
2904                 &((uint32_t *)ctio->u.status1.sense_data)[i]);
2905         }
2906         qlt_print_dif_err(prm);
2907 
2908     } else {
2909         ctio->u.status1.flags &=
2910             ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2911         ctio->u.status1.flags |=
2912             cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2913         ctio->u.status1.sense_length = 0;
2914         memset(ctio->u.status1.sense_data, 0,
2915             sizeof(ctio->u.status1.sense_data));
2916     }
2917 
2918     /* Sense with len > 24, is it possible ??? */
2919 }
2920 
2921 static inline int
2922 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2923 {
2924     switch (se_cmd->prot_op) {
2925     case TARGET_PROT_DOUT_INSERT:
2926     case TARGET_PROT_DIN_STRIP:
2927         if (ql2xenablehba_err_chk >= 1)
2928             return 1;
2929         break;
2930     case TARGET_PROT_DOUT_PASS:
2931     case TARGET_PROT_DIN_PASS:
2932         if (ql2xenablehba_err_chk >= 2)
2933             return 1;
2934         break;
2935     case TARGET_PROT_DIN_INSERT:
2936     case TARGET_PROT_DOUT_STRIP:
2937         return 1;
2938     default:
2939         break;
2940     }
2941     return 0;
2942 }
2943 
2944 static inline int
2945 qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
2946 {
2947     switch (se_cmd->prot_op) {
2948     case TARGET_PROT_DIN_INSERT:
2949     case TARGET_PROT_DOUT_INSERT:
2950     case TARGET_PROT_DIN_STRIP:
2951     case TARGET_PROT_DOUT_STRIP:
2952     case TARGET_PROT_DIN_PASS:
2953     case TARGET_PROT_DOUT_PASS:
2954         return 1;
2955     default:
2956         return 0;
2957     }
2958     return 0;
2959 }
2960 
2961 /*
2962  * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2963  */
2964 static void
2965 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
2966     uint16_t *pfw_prot_opts)
2967 {
2968     struct se_cmd *se_cmd = &cmd->se_cmd;
2969     uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2970     scsi_qla_host_t *vha = cmd->tgt->vha;
2971     struct qla_hw_data *ha = vha->hw;
2972     uint32_t t32 = 0;
2973 
2974     /*
2975      * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2976      * have been immplemented by TCM, before AppTag is avail.
2977      * Look for modesense_handlers[]
2978      */
2979     ctx->app_tag = 0;
2980     ctx->app_tag_mask[0] = 0x0;
2981     ctx->app_tag_mask[1] = 0x0;
2982 
2983     if (IS_PI_UNINIT_CAPABLE(ha)) {
2984         if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2985             (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2986             *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
2987         else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2988             *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2989     }
2990 
2991     t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
2992 
2993     switch (se_cmd->prot_type) {
2994     case TARGET_DIF_TYPE0_PROT:
2995         /*
2996          * No check for ql2xenablehba_err_chk, as it
2997          * would be an I/O error if hba tag generation
2998          * is not done.
2999          */
3000         ctx->ref_tag = cpu_to_le32(lba);
3001         /* enable ALL bytes of the ref tag */
3002         ctx->ref_tag_mask[0] = 0xff;
3003         ctx->ref_tag_mask[1] = 0xff;
3004         ctx->ref_tag_mask[2] = 0xff;
3005         ctx->ref_tag_mask[3] = 0xff;
3006         break;
3007     case TARGET_DIF_TYPE1_PROT:
3008         /*
3009          * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
3010          * REF tag, and 16 bit app tag.
3011          */
3012         ctx->ref_tag = cpu_to_le32(lba);
3013         if (!qla_tgt_ref_mask_check(se_cmd) ||
3014         !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
3015             *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
3016             break;
3017         }
3018         /* enable ALL bytes of the ref tag */
3019         ctx->ref_tag_mask[0] = 0xff;
3020         ctx->ref_tag_mask[1] = 0xff;
3021         ctx->ref_tag_mask[2] = 0xff;
3022         ctx->ref_tag_mask[3] = 0xff;
3023         break;
3024     case TARGET_DIF_TYPE2_PROT:
3025         /*
3026          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
3027          * tag has to match LBA in CDB + N
3028          */
3029         ctx->ref_tag = cpu_to_le32(lba);
3030         if (!qla_tgt_ref_mask_check(se_cmd) ||
3031         !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
3032             *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
3033             break;
3034         }
3035         /* enable ALL bytes of the ref tag */
3036         ctx->ref_tag_mask[0] = 0xff;
3037         ctx->ref_tag_mask[1] = 0xff;
3038         ctx->ref_tag_mask[2] = 0xff;
3039         ctx->ref_tag_mask[3] = 0xff;
3040         break;
3041     case TARGET_DIF_TYPE3_PROT:
3042         /* For TYPE 3 protection: 16 bit GUARD only */
3043         *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
3044         ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
3045         ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
3046         break;
3047     }
3048 }
3049 
3050 static inline int
3051 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
3052 {
3053     struct dsd64        *cur_dsd;
3054     uint32_t        transfer_length = 0;
3055     uint32_t        data_bytes;
3056     uint32_t        dif_bytes;
3057     uint8_t         bundling = 1;
3058     struct crc_context  *crc_ctx_pkt = NULL;
3059     struct qla_hw_data  *ha;
3060     struct ctio_crc2_to_fw  *pkt;
3061     dma_addr_t      crc_ctx_dma;
3062     uint16_t        fw_prot_opts = 0;
3063     struct qla_tgt_cmd  *cmd = prm->cmd;
3064     struct se_cmd       *se_cmd = &cmd->se_cmd;
3065     uint32_t h;
3066     struct atio_from_isp *atio = &prm->cmd->atio;
3067     struct qla_tc_param tc;
3068     uint16_t t16;
3069     scsi_qla_host_t *vha = cmd->vha;
3070 
3071     ha = vha->hw;
3072 
3073     pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
3074     prm->pkt = pkt;
3075     memset(pkt, 0, sizeof(*pkt));
3076 
3077     ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
3078         "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
3079         cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
3080         prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
3081 
3082     if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
3083         (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
3084         bundling = 0;
3085 
3086     /* Compute dif len and adjust data len to incude protection */
3087     data_bytes = cmd->bufflen;
3088     dif_bytes  = (data_bytes / cmd->blk_sz) * 8;
3089 
3090     switch (se_cmd->prot_op) {
3091     case TARGET_PROT_DIN_INSERT:
3092     case TARGET_PROT_DOUT_STRIP:
3093         transfer_length = data_bytes;
3094         if (cmd->prot_sg_cnt)
3095             data_bytes += dif_bytes;
3096         break;
3097     case TARGET_PROT_DIN_STRIP:
3098     case TARGET_PROT_DOUT_INSERT:
3099     case TARGET_PROT_DIN_PASS:
3100     case TARGET_PROT_DOUT_PASS:
3101         transfer_length = data_bytes + dif_bytes;
3102         break;
3103     default:
3104         BUG();
3105         break;
3106     }
3107 
3108     if (!qlt_hba_err_chk_enabled(se_cmd))
3109         fw_prot_opts |= 0x10; /* Disable Guard tag checking */
3110     /* HBA error checking enabled */
3111     else if (IS_PI_UNINIT_CAPABLE(ha)) {
3112         if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
3113             (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
3114             fw_prot_opts |= PO_DIS_VALD_APP_ESC;
3115         else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
3116             fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
3117     }
3118 
3119     switch (se_cmd->prot_op) {
3120     case TARGET_PROT_DIN_INSERT:
3121     case TARGET_PROT_DOUT_INSERT:
3122         fw_prot_opts |= PO_MODE_DIF_INSERT;
3123         break;
3124     case TARGET_PROT_DIN_STRIP:
3125     case TARGET_PROT_DOUT_STRIP:
3126         fw_prot_opts |= PO_MODE_DIF_REMOVE;
3127         break;
3128     case TARGET_PROT_DIN_PASS:
3129     case TARGET_PROT_DOUT_PASS:
3130         fw_prot_opts |= PO_MODE_DIF_PASS;
3131         /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
3132         break;
3133     default:/* Normal Request */
3134         fw_prot_opts |= PO_MODE_DIF_PASS;
3135         break;
3136     }
3137 
3138     /* ---- PKT ---- */
3139     /* Update entry type to indicate Command Type CRC_2 IOCB */
3140     pkt->entry_type  = CTIO_CRC2;
3141     pkt->entry_count = 1;
3142     pkt->vp_index = cmd->vp_idx;
3143 
3144     h = qlt_make_handle(qpair);
3145     if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
3146         /*
3147          * CTIO type 7 from the firmware doesn't provide a way to
3148          * know the initiator's LOOP ID, hence we can't find
3149          * the session and, so, the command.
3150          */
3151         return -EAGAIN;
3152     } else
3153         qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
3154 
3155     pkt->handle  = make_handle(qpair->req->id, h);
3156     pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
3157     pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
3158     pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3159     pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3160     pkt->exchange_addr   = atio->u.isp24.exchange_addr;
3161 
3162     /* silence compile warning */
3163     t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3164     pkt->ox_id  = cpu_to_le16(t16);
3165 
3166     t16 = (atio->u.isp24.attr << 9);
3167     pkt->flags |= cpu_to_le16(t16);
3168     pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
3169 
3170     /* Set transfer direction */
3171     if (cmd->dma_data_direction == DMA_TO_DEVICE)
3172         pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
3173     else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
3174         pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
3175 
3176     pkt->dseg_count = cpu_to_le16(prm->tot_dsds);
3177     /* Fibre channel byte count */
3178     pkt->transfer_length = cpu_to_le32(transfer_length);
3179 
3180     /* ----- CRC context -------- */
3181 
3182     /* Allocate CRC context from global pool */
3183     crc_ctx_pkt = cmd->ctx =
3184         dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
3185 
3186     if (!crc_ctx_pkt)
3187         goto crc_queuing_error;
3188 
3189     crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
3190     INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
3191 
3192     /* Set handle */
3193     crc_ctx_pkt->handle = pkt->handle;
3194 
3195     qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
3196 
3197     put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
3198     pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
3199 
3200     if (!bundling) {
3201         cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
3202     } else {
3203         /*
3204          * Configure Bundling if we need to fetch interlaving
3205          * protection PCI accesses
3206          */
3207         fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
3208         crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
3209         crc_ctx_pkt->u.bundling.dseg_count =
3210             cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
3211         cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
3212     }
3213 
3214     /* Finish the common fields of CRC pkt */
3215     crc_ctx_pkt->blk_size   = cpu_to_le16(cmd->blk_sz);
3216     crc_ctx_pkt->prot_opts  = cpu_to_le16(fw_prot_opts);
3217     crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
3218     crc_ctx_pkt->guard_seed = cpu_to_le16(0);
3219 
3220     memset((uint8_t *)&tc, 0 , sizeof(tc));
3221     tc.vha = vha;
3222     tc.blk_sz = cmd->blk_sz;
3223     tc.bufflen = cmd->bufflen;
3224     tc.sg = cmd->sg;
3225     tc.prot_sg = cmd->prot_sg;
3226     tc.ctx = crc_ctx_pkt;
3227     tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
3228 
3229     /* Walks data segments */
3230     pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
3231 
3232     if (!bundling && prm->prot_seg_cnt) {
3233         if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
3234             prm->tot_dsds, &tc))
3235             goto crc_queuing_error;
3236     } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
3237         (prm->tot_dsds - prm->prot_seg_cnt), &tc))
3238         goto crc_queuing_error;
3239 
3240     if (bundling && prm->prot_seg_cnt) {
3241         /* Walks dif segments */
3242         pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
3243 
3244         cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
3245         if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
3246             prm->prot_seg_cnt, cmd))
3247             goto crc_queuing_error;
3248     }
3249     return QLA_SUCCESS;
3250 
3251 crc_queuing_error:
3252     /* Cleanup will be performed by the caller */
3253     qpair->req->outstanding_cmds[h] = NULL;
3254 
3255     return QLA_FUNCTION_FAILED;
3256 }
3257 
3258 /*
3259  * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
3260  * QLA_TGT_XMIT_STATUS for >= 24xx silicon
3261  */
3262 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
3263     uint8_t scsi_status)
3264 {
3265     struct scsi_qla_host *vha = cmd->vha;
3266     struct qla_qpair *qpair = cmd->qpair;
3267     struct ctio7_to_24xx *pkt;
3268     struct qla_tgt_prm prm;
3269     uint32_t full_req_cnt = 0;
3270     unsigned long flags = 0;
3271     int res;
3272 
3273     if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3274         (cmd->sess && cmd->sess->deleted)) {
3275         cmd->state = QLA_TGT_STATE_PROCESSED;
3276         return 0;
3277     }
3278 
3279     ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
3280         "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
3281         (xmit_type & QLA_TGT_XMIT_STATUS) ?
3282         1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
3283         &cmd->se_cmd, qpair->id);
3284 
3285     res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
3286         &full_req_cnt);
3287     if (unlikely(res != 0)) {
3288         return res;
3289     }
3290 
3291     spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3292 
3293     if (xmit_type == QLA_TGT_XMIT_STATUS)
3294         qpair->tgt_counters.core_qla_snd_status++;
3295     else
3296         qpair->tgt_counters.core_qla_que_buf++;
3297 
3298     if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
3299         /*
3300          * Either the port is not online or this request was from
3301          * previous life, just abort the processing.
3302          */
3303         cmd->state = QLA_TGT_STATE_PROCESSED;
3304         ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
3305             "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
3306             vha->flags.online, qla2x00_reset_active(vha),
3307             cmd->reset_count, qpair->chip_reset);
3308         res = 0;
3309         goto out_unmap_unlock;
3310     }
3311 
3312     /* Does F/W have an IOCBs for this request */
3313     res = qlt_check_reserve_free_req(qpair, full_req_cnt);
3314     if (unlikely(res))
3315         goto out_unmap_unlock;
3316 
3317     if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
3318         res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3319     else
3320         res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3321     if (unlikely(res != 0)) {
3322         qpair->req->cnt += full_req_cnt;
3323         goto out_unmap_unlock;
3324     }
3325 
3326     pkt = (struct ctio7_to_24xx *)prm.pkt;
3327 
3328     if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
3329         pkt->u.status0.flags |=
3330             cpu_to_le16(CTIO7_FLAGS_DATA_IN |
3331             CTIO7_FLAGS_STATUS_MODE_0);
3332 
3333         if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3334             qlt_load_data_segments(&prm);
3335 
3336         if (prm.add_status_pkt == 0) {
3337             if (xmit_type & QLA_TGT_XMIT_STATUS) {
3338                 pkt->u.status0.scsi_status =
3339                     cpu_to_le16(prm.rq_result);
3340                 if (!cmd->edif)
3341                     pkt->u.status0.residual =
3342                         cpu_to_le32(prm.residual);
3343 
3344                 pkt->u.status0.flags |= cpu_to_le16(
3345                     CTIO7_FLAGS_SEND_STATUS);
3346                 if (qlt_need_explicit_conf(cmd, 0)) {
3347                     pkt->u.status0.flags |=
3348                         cpu_to_le16(
3349                         CTIO7_FLAGS_EXPLICIT_CONFORM |
3350                         CTIO7_FLAGS_CONFORM_REQ);
3351                 }
3352             }
3353 
3354         } else {
3355             /*
3356              * We have already made sure that there is sufficient
3357              * amount of request entries to not drop HW lock in
3358              * req_pkt().
3359              */
3360             struct ctio7_to_24xx *ctio =
3361                 (struct ctio7_to_24xx *)qlt_get_req_pkt(
3362                     qpair->req);
3363 
3364             ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e,
3365                 "Building additional status packet 0x%p.\n",
3366                 ctio);
3367 
3368             /*
3369              * T10Dif: ctio_crc2_to_fw overlay ontop of
3370              * ctio7_to_24xx
3371              */
3372             memcpy(ctio, pkt, sizeof(*ctio));
3373             /* reset back to CTIO7 */
3374             ctio->entry_count = 1;
3375             ctio->entry_type = CTIO_TYPE7;
3376             ctio->dseg_count = 0;
3377             ctio->u.status1.flags &= ~cpu_to_le16(
3378                 CTIO7_FLAGS_DATA_IN);
3379 
3380             /* Real finish is ctio_m1's finish */
3381             pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
3382             pkt->u.status0.flags |= cpu_to_le16(
3383                 CTIO7_FLAGS_DONT_RET_CTIO);
3384 
3385             /* qlt_24xx_init_ctio_to_isp will correct
3386              * all neccessary fields that's part of CTIO7.
3387              * There should be no residual of CTIO-CRC2 data.
3388              */
3389             qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
3390                 &prm);
3391         }
3392     } else
3393         qlt_24xx_init_ctio_to_isp(pkt, &prm);
3394 
3395 
3396     cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3397     cmd->cmd_sent_to_fw = 1;
3398     cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3399 
3400     /* Memory Barrier */
3401     wmb();
3402     if (qpair->reqq_start_iocbs)
3403         qpair->reqq_start_iocbs(qpair);
3404     else
3405         qla2x00_start_iocbs(vha, qpair->req);
3406     spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3407 
3408     return 0;
3409 
3410 out_unmap_unlock:
3411     qlt_unmap_sg(vha, cmd);
3412     spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3413 
3414     return res;
3415 }
3416 EXPORT_SYMBOL(qlt_xmit_response);
3417 
3418 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
3419 {
3420     struct ctio7_to_24xx *pkt;
3421     struct scsi_qla_host *vha = cmd->vha;
3422     struct qla_tgt *tgt = cmd->tgt;
3423     struct qla_tgt_prm prm;
3424     unsigned long flags = 0;
3425     int res = 0;
3426     struct qla_qpair *qpair = cmd->qpair;
3427 
3428     memset(&prm, 0, sizeof(prm));
3429     prm.cmd = cmd;
3430     prm.tgt = tgt;
3431     prm.sg = NULL;
3432     prm.req_cnt = 1;
3433 
3434     if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3435         (cmd->sess && cmd->sess->deleted)) {
3436         /*
3437          * Either the port is not online or this request was from
3438          * previous life, just abort the processing.
3439          */
3440         cmd->aborted = 1;
3441         cmd->write_data_transferred = 0;
3442         cmd->state = QLA_TGT_STATE_DATA_IN;
3443         vha->hw->tgt.tgt_ops->handle_data(cmd);
3444         ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
3445             "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
3446             vha->flags.online, qla2x00_reset_active(vha),
3447             cmd->reset_count, qpair->chip_reset);
3448         return 0;
3449     }
3450 
3451     /* Calculate number of entries and segments required */
3452     if (qlt_pci_map_calc_cnt(&prm) != 0)
3453         return -EAGAIN;
3454 
3455     spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3456     /* Does F/W have an IOCBs for this request */
3457     res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
3458     if (res != 0)
3459         goto out_unlock_free_unmap;
3460     if (cmd->se_cmd.prot_op)
3461         res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3462     else
3463         res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3464 
3465     if (unlikely(res != 0)) {
3466         qpair->req->cnt += prm.req_cnt;
3467         goto out_unlock_free_unmap;
3468     }
3469 
3470     pkt = (struct ctio7_to_24xx *)prm.pkt;
3471     pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
3472         CTIO7_FLAGS_STATUS_MODE_0);
3473 
3474     if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3475         qlt_load_data_segments(&prm);
3476 
3477     cmd->state = QLA_TGT_STATE_NEED_DATA;
3478     cmd->cmd_sent_to_fw = 1;
3479     cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3480 
3481     /* Memory Barrier */
3482     wmb();
3483     if (qpair->reqq_start_iocbs)
3484         qpair->reqq_start_iocbs(qpair);
3485     else
3486         qla2x00_start_iocbs(vha, qpair->req);
3487     spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3488 
3489     return res;
3490 
3491 out_unlock_free_unmap:
3492     qlt_unmap_sg(vha, cmd);
3493     spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3494 
3495     return res;
3496 }
3497 EXPORT_SYMBOL(qlt_rdy_to_xfer);
3498 
3499 
3500 /*
3501  * it is assumed either hardware_lock or qpair lock is held.
3502  */
3503 static void
3504 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
3505     struct ctio_crc_from_fw *sts)
3506 {
3507     uint8_t     *ap = &sts->actual_dif[0];
3508     uint8_t     *ep = &sts->expected_dif[0];
3509     uint64_t    lba = cmd->se_cmd.t_task_lba;
3510     uint8_t scsi_status, sense_key, asc, ascq;
3511     unsigned long flags;
3512     struct scsi_qla_host *vha = cmd->vha;
3513 
3514     cmd->trc_flags |= TRC_DIF_ERR;
3515 
3516     cmd->a_guard   = get_unaligned_be16(ap + 0);
3517     cmd->a_app_tag = get_unaligned_be16(ap + 2);
3518     cmd->a_ref_tag = get_unaligned_be32(ap + 4);
3519 
3520     cmd->e_guard   = get_unaligned_be16(ep + 0);
3521     cmd->e_app_tag = get_unaligned_be16(ep + 2);
3522     cmd->e_ref_tag = get_unaligned_be32(ep + 4);
3523 
3524     ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
3525         "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3526 
3527     scsi_status = sense_key = asc = ascq = 0;
3528 
3529     /* check appl tag */
3530     if (cmd->e_app_tag != cmd->a_app_tag) {
3531         ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
3532             "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3533             cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3534             cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3535             cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3536             cmd->atio.u.isp24.fcp_hdr.ox_id);
3537 
3538         cmd->dif_err_code = DIF_ERR_APP;
3539         scsi_status = SAM_STAT_CHECK_CONDITION;
3540         sense_key = ABORTED_COMMAND;
3541         asc = 0x10;
3542         ascq = 0x2;
3543     }
3544 
3545     /* check ref tag */
3546     if (cmd->e_ref_tag != cmd->a_ref_tag) {
3547         ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
3548             "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
3549             cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3550             cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3551             cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3552             cmd->atio.u.isp24.fcp_hdr.ox_id);
3553 
3554         cmd->dif_err_code = DIF_ERR_REF;
3555         scsi_status = SAM_STAT_CHECK_CONDITION;
3556         sense_key = ABORTED_COMMAND;
3557         asc = 0x10;
3558         ascq = 0x3;
3559         goto out;
3560     }
3561 
3562     /* check guard */
3563     if (cmd->e_guard != cmd->a_guard) {
3564         ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
3565             "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3566             cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3567             cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3568             cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3569             cmd->atio.u.isp24.fcp_hdr.ox_id);
3570 
3571         cmd->dif_err_code = DIF_ERR_GRD;
3572         scsi_status = SAM_STAT_CHECK_CONDITION;
3573         sense_key = ABORTED_COMMAND;
3574         asc = 0x10;
3575         ascq = 0x1;
3576     }
3577 out:
3578     switch (cmd->state) {
3579     case QLA_TGT_STATE_NEED_DATA:
3580         /* handle_data will load DIF error code  */
3581         cmd->state = QLA_TGT_STATE_DATA_IN;
3582         vha->hw->tgt.tgt_ops->handle_data(cmd);
3583         break;
3584     default:
3585         spin_lock_irqsave(&cmd->cmd_lock, flags);
3586         if (cmd->aborted) {
3587             spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3588             vha->hw->tgt.tgt_ops->free_cmd(cmd);
3589             break;
3590         }
3591         spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3592 
3593         qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
3594             ascq);
3595         /* assume scsi status gets out on the wire.
3596          * Will not wait for completion.
3597          */
3598         vha->hw->tgt.tgt_ops->free_cmd(cmd);
3599         break;
3600     }
3601 }
3602 
3603 /* If hardware_lock held on entry, might drop it, then reaquire */
3604 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
3605 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3606     struct imm_ntfy_from_isp *ntfy)
3607 {
3608     struct nack_to_isp *nack;
3609     struct qla_hw_data *ha = vha->hw;
3610     request_t *pkt;
3611     int ret = 0;
3612 
3613     ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3614         "Sending TERM ELS CTIO (ha=%p)\n", ha);
3615 
3616     pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3617     if (pkt == NULL) {
3618         ql_dbg(ql_dbg_tgt, vha, 0xe080,
3619             "qla_target(%d): %s failed: unable to allocate "
3620             "request packet\n", vha->vp_idx, __func__);
3621         return -ENOMEM;
3622     }
3623 
3624     pkt->entry_type = NOTIFY_ACK_TYPE;
3625     pkt->entry_count = 1;
3626     pkt->handle = QLA_TGT_SKIP_HANDLE;
3627 
3628     nack = (struct nack_to_isp *)pkt;
3629     nack->ox_id = ntfy->ox_id;
3630 
3631     nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3632     if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3633         nack->u.isp24.flags = ntfy->u.isp24.flags &
3634             cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3635     }
3636 
3637     /* terminate */
3638     nack->u.isp24.flags |=
3639         __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
3640 
3641     nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3642     nack->u.isp24.status = ntfy->u.isp24.status;
3643     nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3644     nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3645     nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3646     nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3647     nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3648     nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3649 
3650     qla2x00_start_iocbs(vha, vha->req);
3651     return ret;
3652 }
3653 
3654 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3655     struct imm_ntfy_from_isp *imm, int ha_locked)
3656 {
3657     int rc;
3658 
3659     WARN_ON_ONCE(!ha_locked);
3660     rc = __qlt_send_term_imm_notif(vha, imm);
3661     pr_debug("rc = %d\n", rc);
3662 }
3663 
3664 /*
3665  * If hardware_lock held on entry, might drop it, then reaquire
3666  * This function sends the appropriate CTIO to ISP 2xxx or 24xx
3667  */
3668 static int __qlt_send_term_exchange(struct qla_qpair *qpair,
3669     struct qla_tgt_cmd *cmd,
3670     struct atio_from_isp *atio)
3671 {
3672     struct scsi_qla_host *vha = qpair->vha;
3673     struct ctio7_to_24xx *ctio24;
3674     struct qla_hw_data *ha = vha->hw;
3675     request_t *pkt;
3676     int ret = 0;
3677     uint16_t temp;
3678 
3679     ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3680 
3681     if (cmd)
3682         vha = cmd->vha;
3683 
3684     pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
3685     if (pkt == NULL) {
3686         ql_dbg(ql_dbg_tgt, vha, 0xe050,
3687             "qla_target(%d): %s failed: unable to allocate "
3688             "request packet\n", vha->vp_idx, __func__);
3689         return -ENOMEM;
3690     }
3691 
3692     if (cmd != NULL) {
3693         if (cmd->state < QLA_TGT_STATE_PROCESSED) {
3694             ql_dbg(ql_dbg_tgt, vha, 0xe051,
3695                 "qla_target(%d): Terminating cmd %p with "
3696                 "incorrect state %d\n", vha->vp_idx, cmd,
3697                 cmd->state);
3698         } else
3699             ret = 1;
3700     }
3701 
3702     qpair->tgt_counters.num_term_xchg_sent++;
3703     pkt->entry_count = 1;
3704     pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3705 
3706     ctio24 = (struct ctio7_to_24xx *)pkt;
3707     ctio24->entry_type = CTIO_TYPE7;
3708     ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED);
3709     ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3710     ctio24->vp_index = vha->vp_idx;
3711     ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3712     ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3713     temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
3714         CTIO7_FLAGS_TERMINATE;
3715     ctio24->u.status1.flags = cpu_to_le16(temp);
3716     temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3717     ctio24->u.status1.ox_id = cpu_to_le16(temp);
3718 
3719     /* Memory Barrier */
3720     wmb();
3721     if (qpair->reqq_start_iocbs)
3722         qpair->reqq_start_iocbs(qpair);
3723     else
3724         qla2x00_start_iocbs(vha, qpair->req);
3725     return ret;
3726 }
3727 
3728 static void qlt_send_term_exchange(struct qla_qpair *qpair,
3729     struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
3730     int ul_abort)
3731 {
3732     struct scsi_qla_host *vha;
3733     unsigned long flags = 0;
3734     int rc;
3735 
3736     /* why use different vha? NPIV */
3737     if (cmd)
3738         vha = cmd->vha;
3739     else
3740         vha = qpair->vha;
3741 
3742     if (ha_locked) {
3743         rc = __qlt_send_term_exchange(qpair, cmd, atio);
3744         if (rc == -ENOMEM)
3745             qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3746         goto done;
3747     }
3748     spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3749     rc = __qlt_send_term_exchange(qpair, cmd, atio);
3750     if (rc == -ENOMEM)
3751         qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3752 
3753 done:
3754     if (cmd && !ul_abort && !cmd->aborted) {
3755         if (cmd->sg_mapped)
3756             qlt_unmap_sg(vha, cmd);
3757         vha->hw->tgt.tgt_ops->free_cmd(cmd);
3758     }
3759 
3760     if (!ha_locked)
3761         spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3762 
3763     return;
3764 }
3765 
3766 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3767 {
3768     struct list_head free_list;
3769     struct qla_tgt_cmd *cmd, *tcmd;
3770 
3771     vha->hw->tgt.leak_exchg_thresh_hold =
3772         (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3773 
3774     cmd = tcmd = NULL;
3775     if (!list_empty(&vha->hw->tgt.q_full_list)) {
3776         INIT_LIST_HEAD(&free_list);
3777         list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3778 
3779         list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
3780             list_del(&cmd->cmd_list);
3781             /* This cmd was never sent to TCM.  There is no need
3782              * to schedule free or call free_cmd
3783              */
3784             qlt_free_cmd(cmd);
3785             vha->hw->tgt.num_qfull_cmds_alloc--;
3786         }
3787     }
3788     vha->hw->tgt.num_qfull_cmds_dropped = 0;
3789 }
3790 
3791 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3792 {
3793     uint32_t total_leaked;
3794 
3795     total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3796 
3797     if (vha->hw->tgt.leak_exchg_thresh_hold &&
3798         (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3799 
3800         ql_dbg(ql_dbg_tgt, vha, 0xe079,
3801             "Chip reset due to exchange starvation: %d/%d.\n",
3802             total_leaked, vha->hw->cur_fw_xcb_count);
3803 
3804         if (IS_P3P_TYPE(vha->hw))
3805             set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3806         else
3807             set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3808         qla2xxx_wake_dpc(vha);
3809     }
3810 
3811 }
3812 
3813 int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3814 {
3815     struct qla_tgt *tgt = cmd->tgt;
3816     struct scsi_qla_host *vha = tgt->vha;
3817     struct se_cmd *se_cmd = &cmd->se_cmd;
3818     unsigned long flags;
3819 
3820     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3821         "qla_target(%d): terminating exchange for aborted cmd=%p "
3822         "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3823         se_cmd->tag);
3824 
3825     spin_lock_irqsave(&cmd->cmd_lock, flags);
3826     if (cmd->aborted) {
3827         if (cmd->sg_mapped)
3828             qlt_unmap_sg(vha, cmd);
3829 
3830         spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3831         /*
3832          * It's normal to see 2 calls in this path:
3833          *  1) XFER Rdy completion + CMD_T_ABORT
3834          *  2) TCM TMR - drain_state_list
3835          */
3836         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
3837             "multiple abort. %p transport_state %x, t_state %x, "
3838             "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
3839             cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
3840         return -EIO;
3841     }
3842     cmd->aborted = 1;
3843     cmd->trc_flags |= TRC_ABORT;
3844     spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3845 
3846     qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
3847     return 0;
3848 }
3849 EXPORT_SYMBOL(qlt_abort_cmd);
3850 
3851 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3852 {
3853     struct fc_port *sess = cmd->sess;
3854 
3855     ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3856         "%s: se_cmd[%p] ox_id %04x\n",
3857         __func__, &cmd->se_cmd,
3858         be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3859 
3860     BUG_ON(cmd->cmd_in_wq);
3861 
3862     if (!cmd->q_full)
3863         qlt_decr_num_pend_cmds(cmd->vha);
3864 
3865     BUG_ON(cmd->sg_mapped);
3866     cmd->jiffies_at_free = get_jiffies_64();
3867 
3868     if (!sess || !sess->se_sess) {
3869         WARN_ON(1);
3870         return;
3871     }
3872     cmd->jiffies_at_free = get_jiffies_64();
3873     cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
3874 }
3875 EXPORT_SYMBOL(qlt_free_cmd);
3876 
3877 /*
3878  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3879  */
3880 static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
3881     struct qla_tgt_cmd *cmd, uint32_t status)
3882 {
3883     int term = 0;
3884     struct scsi_qla_host *vha = qpair->vha;
3885 
3886     if (cmd->se_cmd.prot_op)
3887         ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3888             "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
3889             "se_cmd=%p tag[%x] op %#x/%s",
3890              cmd->lba, cmd->lba,
3891              cmd->num_blks, &cmd->se_cmd,
3892              cmd->atio.u.isp24.exchange_addr,
3893              cmd->se_cmd.prot_op,
3894              prot_op_str(cmd->se_cmd.prot_op));
3895 
3896     if (ctio != NULL) {
3897         struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3898 
3899         term = !(c->flags &
3900             cpu_to_le16(OF_TERM_EXCH));
3901     } else
3902         term = 1;
3903 
3904     if (term)
3905         qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
3906 
3907     return term;
3908 }
3909 
3910 
3911 /* ha->hardware_lock supposed to be held on entry */
3912 static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3913     struct rsp_que *rsp, uint32_t handle, void *ctio)
3914 {
3915     void *cmd = NULL;
3916     struct req_que *req;
3917     int qid = GET_QID(handle);
3918     uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
3919 
3920     if (unlikely(h == QLA_TGT_SKIP_HANDLE))
3921         return NULL;
3922 
3923     if (qid == rsp->req->id) {
3924         req = rsp->req;
3925     } else if (vha->hw->req_q_map[qid]) {
3926         ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
3927             "qla_target(%d): CTIO completion with different QID %d handle %x\n",
3928             vha->vp_idx, rsp->id, handle);
3929         req = vha->hw->req_q_map[qid];
3930     } else {
3931         return NULL;
3932     }
3933 
3934     h &= QLA_CMD_HANDLE_MASK;
3935 
3936     if (h != QLA_TGT_NULL_HANDLE) {
3937         if (unlikely(h >= req->num_outstanding_cmds)) {
3938             ql_dbg(ql_dbg_tgt, vha, 0xe052,
3939                 "qla_target(%d): Wrong handle %x received\n",
3940                 vha->vp_idx, handle);
3941             return NULL;
3942         }
3943 
3944         cmd = req->outstanding_cmds[h];
3945         if (unlikely(cmd == NULL)) {
3946             ql_dbg(ql_dbg_async, vha, 0xe053,
3947                 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
3948                 vha->vp_idx, handle, req->id, rsp->id);
3949             return NULL;
3950         }
3951         req->outstanding_cmds[h] = NULL;
3952     } else if (ctio != NULL) {
3953         /* We can't get loop ID from CTIO7 */
3954         ql_dbg(ql_dbg_tgt, vha, 0xe054,
3955             "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
3956             "support NULL handles\n", vha->vp_idx);
3957         return NULL;
3958     }
3959 
3960     return cmd;
3961 }
3962 
3963 /*
3964  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3965  */
3966 static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3967     struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
3968 {
3969     struct qla_hw_data *ha = vha->hw;
3970     struct se_cmd *se_cmd;
3971     struct qla_tgt_cmd *cmd;
3972     struct qla_qpair *qpair = rsp->qpair;
3973 
3974     if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
3975         /* That could happen only in case of an error/reset/abort */
3976         if (status != CTIO_SUCCESS) {
3977             ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3978                 "Intermediate CTIO received"
3979                 " (status %x)\n", status);
3980         }
3981         return;
3982     }
3983 
3984     cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
3985     if (cmd == NULL)
3986         return;
3987 
3988     if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) &&
3989         cmd->sess) {
3990         qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess,
3991             (struct ctio7_from_24xx *)ctio);
3992     }
3993 
3994     se_cmd = &cmd->se_cmd;
3995     cmd->cmd_sent_to_fw = 0;
3996 
3997     qlt_unmap_sg(vha, cmd);
3998 
3999     if (unlikely(status != CTIO_SUCCESS)) {
4000         switch (status & 0xFFFF) {
4001         case CTIO_INVALID_RX_ID:
4002             if (printk_ratelimit())
4003                 dev_info(&vha->hw->pdev->dev,
4004                     "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
4005                     vha->vp_idx, cmd->atio.u.isp24.attr,
4006                     ((cmd->ctio_flags >> 9) & 0xf),
4007                     cmd->ctio_flags);
4008 
4009             break;
4010         case CTIO_LIP_RESET:
4011         case CTIO_TARGET_RESET:
4012         case CTIO_ABORTED:
4013             /* driver request abort via Terminate exchange */
4014         case CTIO_TIMEOUT:
4015             /* They are OK */
4016             ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
4017                 "qla_target(%d): CTIO with "
4018                 "status %#x received, state %x, se_cmd %p, "
4019                 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
4020                 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
4021                 status, cmd->state, se_cmd);
4022             break;
4023 
4024         case CTIO_PORT_LOGGED_OUT:
4025         case CTIO_PORT_UNAVAILABLE:
4026         {
4027             int logged_out =
4028                 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
4029 
4030             ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
4031                 "qla_target(%d): CTIO with %s status %x "
4032                 "received (state %x, se_cmd %p)\n", vha->vp_idx,
4033                 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
4034                 status, cmd->state, se_cmd);
4035 
4036             if (logged_out && cmd->sess) {
4037                 /*
4038                  * Session is already logged out, but we need
4039                  * to notify initiator, who's not aware of this
4040                  */
4041                 cmd->sess->send_els_logo = 1;
4042                 ql_dbg(ql_dbg_disc, vha, 0x20f8,
4043                     "%s %d %8phC post del sess\n",
4044                     __func__, __LINE__, cmd->sess->port_name);
4045 
4046                 qlt_schedule_sess_for_deletion(cmd->sess);
4047             }
4048             break;
4049         }
4050         case CTIO_DIF_ERROR: {
4051             struct ctio_crc_from_fw *crc =
4052                 (struct ctio_crc_from_fw *)ctio;
4053             ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
4054                 "qla_target(%d): CTIO with DIF_ERROR status %x "
4055                 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
4056                 "expect_dif[0x%llx]\n",
4057                 vha->vp_idx, status, cmd->state, se_cmd,
4058                 *((u64 *)&crc->actual_dif[0]),
4059                 *((u64 *)&crc->expected_dif[0]));
4060 
4061             qlt_handle_dif_error(qpair, cmd, ctio);
4062             return;
4063         }
4064 
4065         case CTIO_FAST_AUTH_ERR:
4066         case CTIO_FAST_INCOMP_PAD_LEN:
4067         case CTIO_FAST_INVALID_REQ:
4068         case CTIO_FAST_SPI_ERR:
4069             ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4070                 "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n",
4071                 vha->vp_idx, status, cmd->state, se_cmd);
4072             break;
4073 
4074         default:
4075             ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4076                 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
4077                 vha->vp_idx, status, cmd->state, se_cmd);
4078             break;
4079         }
4080 
4081 
4082         /* "cmd->aborted" means
4083          * cmd is already aborted/terminated, we don't
4084          * need to terminate again.  The exchange is already
4085          * cleaned up/freed at FW level.  Just cleanup at driver
4086          * level.
4087          */
4088         if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
4089             (!cmd->aborted)) {
4090             cmd->trc_flags |= TRC_CTIO_ERR;
4091             if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
4092                 return;
4093         }
4094     }
4095 
4096     if (cmd->state == QLA_TGT_STATE_PROCESSED) {
4097         cmd->trc_flags |= TRC_CTIO_DONE;
4098     } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
4099         cmd->state = QLA_TGT_STATE_DATA_IN;
4100 
4101         if (status == CTIO_SUCCESS)
4102             cmd->write_data_transferred = 1;
4103 
4104         ha->tgt.tgt_ops->handle_data(cmd);
4105         return;
4106     } else if (cmd->aborted) {
4107         cmd->trc_flags |= TRC_CTIO_ABORTED;
4108         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
4109           "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
4110     } else {
4111         cmd->trc_flags |= TRC_CTIO_STRANGE;
4112         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
4113             "qla_target(%d): A command in state (%d) should "
4114             "not return a CTIO complete\n", vha->vp_idx, cmd->state);
4115     }
4116 
4117     if (unlikely(status != CTIO_SUCCESS) &&
4118         !cmd->aborted) {
4119         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
4120         dump_stack();
4121     }
4122 
4123     ha->tgt.tgt_ops->free_cmd(cmd);
4124 }
4125 
4126 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
4127     uint8_t task_codes)
4128 {
4129     int fcp_task_attr;
4130 
4131     switch (task_codes) {
4132     case ATIO_SIMPLE_QUEUE:
4133         fcp_task_attr = TCM_SIMPLE_TAG;
4134         break;
4135     case ATIO_HEAD_OF_QUEUE:
4136         fcp_task_attr = TCM_HEAD_TAG;
4137         break;
4138     case ATIO_ORDERED_QUEUE:
4139         fcp_task_attr = TCM_ORDERED_TAG;
4140         break;
4141     case ATIO_ACA_QUEUE:
4142         fcp_task_attr = TCM_ACA_TAG;
4143         break;
4144     case ATIO_UNTAGGED:
4145         fcp_task_attr = TCM_SIMPLE_TAG;
4146         break;
4147     default:
4148         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
4149             "qla_target: unknown task code %x, use ORDERED instead\n",
4150             task_codes);
4151         fcp_task_attr = TCM_ORDERED_TAG;
4152         break;
4153     }
4154 
4155     return fcp_task_attr;
4156 }
4157 
4158 /*
4159  * Process context for I/O path into tcm_qla2xxx code
4160  */
4161 static void __qlt_do_work(struct qla_tgt_cmd *cmd)
4162 {
4163     scsi_qla_host_t *vha = cmd->vha;
4164     struct qla_hw_data *ha = vha->hw;
4165     struct fc_port *sess = cmd->sess;
4166     struct atio_from_isp *atio = &cmd->atio;
4167     unsigned char *cdb;
4168     unsigned long flags;
4169     uint32_t data_length;
4170     int ret, fcp_task_attr, data_dir, bidi = 0;
4171     struct qla_qpair *qpair = cmd->qpair;
4172 
4173     cmd->cmd_in_wq = 0;
4174     cmd->trc_flags |= TRC_DO_WORK;
4175 
4176     if (cmd->aborted) {
4177         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
4178             "cmd with tag %u is aborted\n",
4179             cmd->atio.u.isp24.exchange_addr);
4180         goto out_term;
4181     }
4182 
4183     spin_lock_init(&cmd->cmd_lock);
4184     cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
4185     cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr);
4186 
4187     if (atio->u.isp24.fcp_cmnd.rddata &&
4188         atio->u.isp24.fcp_cmnd.wrdata) {
4189         bidi = 1;
4190         data_dir = DMA_TO_DEVICE;
4191     } else if (atio->u.isp24.fcp_cmnd.rddata)
4192         data_dir = DMA_FROM_DEVICE;
4193     else if (atio->u.isp24.fcp_cmnd.wrdata)
4194         data_dir = DMA_TO_DEVICE;
4195     else
4196         data_dir = DMA_NONE;
4197 
4198     fcp_task_attr = qlt_get_fcp_task_attr(vha,
4199         atio->u.isp24.fcp_cmnd.task_attr);
4200     data_length = get_datalen_for_atio(atio);
4201 
4202     ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
4203                           fcp_task_attr, data_dir, bidi);
4204     if (ret != 0)
4205         goto out_term;
4206     /*
4207      * Drop extra session reference from qlt_handle_cmd_for_atio().
4208      */
4209     ha->tgt.tgt_ops->put_sess(sess);
4210     return;
4211 
4212 out_term:
4213     ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4214     /*
4215      * cmd has not sent to target yet, so pass NULL as the second
4216      * argument to qlt_send_term_exchange() and free the memory here.
4217      */
4218     cmd->trc_flags |= TRC_DO_WORK_ERR;
4219     spin_lock_irqsave(qpair->qp_lock_ptr, flags);
4220     qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
4221 
4222     qlt_decr_num_pend_cmds(vha);
4223     cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
4224     spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
4225 
4226     ha->tgt.tgt_ops->put_sess(sess);
4227 }
4228 
4229 static void qlt_do_work(struct work_struct *work)
4230 {
4231     struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
4232     scsi_qla_host_t *vha = cmd->vha;
4233     unsigned long flags;
4234 
4235     spin_lock_irqsave(&vha->cmd_list_lock, flags);
4236     list_del(&cmd->cmd_list);
4237     spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4238 
4239     __qlt_do_work(cmd);
4240 }
4241 
4242 void qlt_clr_qp_table(struct scsi_qla_host *vha)
4243 {
4244     unsigned long flags;
4245     struct qla_hw_data *ha = vha->hw;
4246     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4247     void *node;
4248     u64 key = 0;
4249 
4250     ql_log(ql_log_info, vha, 0x706c,
4251         "User update Number of Active Qpairs %d\n",
4252         ha->tgt.num_act_qpairs);
4253 
4254     spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4255 
4256     btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
4257         btree_remove64(&tgt->lun_qpair_map, key);
4258 
4259     ha->base_qpair->lun_cnt = 0;
4260     for (key = 0; key < ha->max_qpairs; key++)
4261         if (ha->queue_pair_map[key])
4262             ha->queue_pair_map[key]->lun_cnt = 0;
4263 
4264     spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4265 }
4266 
4267 static void qlt_assign_qpair(struct scsi_qla_host *vha,
4268     struct qla_tgt_cmd *cmd)
4269 {
4270     struct qla_qpair *qpair, *qp;
4271     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4272     struct qla_qpair_hint *h;
4273 
4274     if (vha->flags.qpairs_available) {
4275         h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
4276         if (unlikely(!h)) {
4277             /* spread lun to qpair ratio evently */
4278             int lcnt = 0, rc;
4279             struct scsi_qla_host *base_vha =
4280                 pci_get_drvdata(vha->hw->pdev);
4281 
4282             qpair = vha->hw->base_qpair;
4283             if (qpair->lun_cnt == 0) {
4284                 qpair->lun_cnt++;
4285                 h = qla_qpair_to_hint(tgt, qpair);
4286                 BUG_ON(!h);
4287                 rc = btree_insert64(&tgt->lun_qpair_map,
4288                     cmd->unpacked_lun, h, GFP_ATOMIC);
4289                 if (rc) {
4290                     qpair->lun_cnt--;
4291                     ql_log(ql_log_info, vha, 0xd037,
4292                         "Unable to insert lun %llx into lun_qpair_map\n",
4293                         cmd->unpacked_lun);
4294                 }
4295                 goto out;
4296             } else {
4297                 lcnt = qpair->lun_cnt;
4298             }
4299 
4300             h = NULL;
4301             list_for_each_entry(qp, &base_vha->qp_list,
4302                 qp_list_elem) {
4303                 if (qp->lun_cnt == 0) {
4304                     qp->lun_cnt++;
4305                     h = qla_qpair_to_hint(tgt, qp);
4306                     BUG_ON(!h);
4307                     rc = btree_insert64(&tgt->lun_qpair_map,
4308                         cmd->unpacked_lun, h, GFP_ATOMIC);
4309                     if (rc) {
4310                         qp->lun_cnt--;
4311                         ql_log(ql_log_info, vha, 0xd038,
4312                             "Unable to insert lun %llx into lun_qpair_map\n",
4313                             cmd->unpacked_lun);
4314                     }
4315                     qpair = qp;
4316                     goto out;
4317                 } else {
4318                     if (qp->lun_cnt < lcnt) {
4319                         lcnt = qp->lun_cnt;
4320                         qpair = qp;
4321                         continue;
4322                     }
4323                 }
4324             }
4325             BUG_ON(!qpair);
4326             qpair->lun_cnt++;
4327             h = qla_qpair_to_hint(tgt, qpair);
4328             BUG_ON(!h);
4329             rc = btree_insert64(&tgt->lun_qpair_map,
4330                 cmd->unpacked_lun, h, GFP_ATOMIC);
4331             if (rc) {
4332                 qpair->lun_cnt--;
4333                 ql_log(ql_log_info, vha, 0xd039,
4334                    "Unable to insert lun %llx into lun_qpair_map\n",
4335                    cmd->unpacked_lun);
4336             }
4337         }
4338     } else {
4339         h = &tgt->qphints[0];
4340     }
4341 out:
4342     cmd->qpair = h->qpair;
4343     cmd->se_cmd.cpuid = h->cpuid;
4344 }
4345 
4346 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4347                        struct fc_port *sess,
4348                        struct atio_from_isp *atio)
4349 {
4350     struct qla_tgt_cmd *cmd;
4351 
4352     cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
4353     if (!cmd)
4354         return NULL;
4355 
4356     cmd->cmd_type = TYPE_TGT_CMD;
4357     memcpy(&cmd->atio, atio, sizeof(*atio));
4358     INIT_LIST_HEAD(&cmd->sess_cmd_list);
4359     cmd->state = QLA_TGT_STATE_NEW;
4360     cmd->tgt = vha->vha_tgt.qla_tgt;
4361     qlt_incr_num_pend_cmds(vha);
4362     cmd->vha = vha;
4363     cmd->sess = sess;
4364     cmd->loop_id = sess->loop_id;
4365     cmd->conf_compl_supported = sess->conf_compl_supported;
4366 
4367     cmd->trc_flags = 0;
4368     cmd->jiffies_at_alloc = get_jiffies_64();
4369 
4370     cmd->unpacked_lun = scsilun_to_int(
4371         (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
4372     qlt_assign_qpair(vha, cmd);
4373     cmd->reset_count = vha->hw->base_qpair->chip_reset;
4374     cmd->vp_idx = vha->vp_idx;
4375     cmd->edif = sess->edif.enable;
4376 
4377     return cmd;
4378 }
4379 
4380 /* ha->hardware_lock supposed to be held on entry */
4381 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4382     struct atio_from_isp *atio)
4383 {
4384     struct qla_hw_data *ha = vha->hw;
4385     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4386     struct fc_port *sess;
4387     struct qla_tgt_cmd *cmd;
4388     unsigned long flags;
4389     port_id_t id;
4390 
4391     if (unlikely(tgt->tgt_stop)) {
4392         ql_dbg(ql_dbg_io, vha, 0x3061,
4393             "New command while device %p is shutting down\n", tgt);
4394         return -ENODEV;
4395     }
4396 
4397     id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
4398     if (IS_SW_RESV_ADDR(id))
4399         return -EBUSY;
4400 
4401     sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4402     if (unlikely(!sess))
4403         return -EFAULT;
4404 
4405     /* Another WWN used to have our s_id. Our PLOGI scheduled its
4406      * session deletion, but it's still in sess_del_work wq */
4407     if (sess->deleted) {
4408         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4409             "New command while old session %p is being deleted\n",
4410             sess);
4411         return -EFAULT;
4412     }
4413 
4414     /*
4415      * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
4416      */
4417     if (!kref_get_unless_zero(&sess->sess_kref)) {
4418         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4419             "%s: kref_get fail, %8phC oxid %x \n",
4420             __func__, sess->port_name,
4421              be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
4422         return -EFAULT;
4423     }
4424 
4425     cmd = qlt_get_tag(vha, sess, atio);
4426     if (!cmd) {
4427         ql_dbg(ql_dbg_io, vha, 0x3062,
4428             "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4429         ha->tgt.tgt_ops->put_sess(sess);
4430         return -EBUSY;
4431     }
4432 
4433     cmd->cmd_in_wq = 1;
4434     cmd->trc_flags |= TRC_NEW_CMD;
4435 
4436     spin_lock_irqsave(&vha->cmd_list_lock, flags);
4437     list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4438     spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4439 
4440     INIT_WORK(&cmd->work, qlt_do_work);
4441     if (vha->flags.qpairs_available) {
4442         queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
4443     } else if (ha->msix_count) {
4444         if (cmd->atio.u.isp24.fcp_cmnd.rddata)
4445             queue_work_on(smp_processor_id(), qla_tgt_wq,
4446                 &cmd->work);
4447         else
4448             queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
4449                 &cmd->work);
4450     } else {
4451         queue_work(qla_tgt_wq, &cmd->work);
4452     }
4453 
4454     return 0;
4455 }
4456 
4457 /* ha->hardware_lock supposed to be held on entry */
4458 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4459     int fn, void *iocb, int flags)
4460 {
4461     struct scsi_qla_host *vha = sess->vha;
4462     struct qla_hw_data *ha = vha->hw;
4463     struct qla_tgt_mgmt_cmd *mcmd;
4464     struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4465     struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
4466 
4467     mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4468     if (!mcmd) {
4469         ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4470             "qla_target(%d): Allocation of management "
4471             "command failed, some commands and their data could "
4472             "leak\n", vha->vp_idx);
4473         return -ENOMEM;
4474     }
4475     memset(mcmd, 0, sizeof(*mcmd));
4476     mcmd->sess = sess;
4477 
4478     if (iocb) {
4479         memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4480             sizeof(mcmd->orig_iocb.imm_ntfy));
4481     }
4482     mcmd->tmr_func = fn;
4483     mcmd->flags = flags;
4484     mcmd->reset_count = ha->base_qpair->chip_reset;
4485     mcmd->qpair = h->qpair;
4486     mcmd->vha = vha;
4487     mcmd->se_cmd.cpuid = h->cpuid;
4488     mcmd->unpacked_lun = lun;
4489 
4490     switch (fn) {
4491     case QLA_TGT_LUN_RESET:
4492     case QLA_TGT_CLEAR_TS:
4493     case QLA_TGT_ABORT_TS:
4494         abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4495         fallthrough;
4496     case QLA_TGT_CLEAR_ACA:
4497         h = qlt_find_qphint(vha, mcmd->unpacked_lun);
4498         mcmd->qpair = h->qpair;
4499         mcmd->se_cmd.cpuid = h->cpuid;
4500         break;
4501 
4502     case QLA_TGT_TARGET_RESET:
4503     case QLA_TGT_NEXUS_LOSS_SESS:
4504     case QLA_TGT_NEXUS_LOSS:
4505     case QLA_TGT_ABORT_ALL:
4506     default:
4507         /* no-op */
4508         break;
4509     }
4510 
4511     INIT_WORK(&mcmd->work, qlt_do_tmr_work);
4512     queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq,
4513         &mcmd->work);
4514 
4515     return 0;
4516 }
4517 
4518 /* ha->hardware_lock supposed to be held on entry */
4519 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4520 {
4521     struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4522     struct qla_hw_data *ha = vha->hw;
4523     struct fc_port *sess;
4524     u64 unpacked_lun;
4525     int fn;
4526     unsigned long flags;
4527 
4528     fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4529 
4530     spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4531     sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4532         a->u.isp24.fcp_hdr.s_id);
4533     spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4534 
4535     unpacked_lun =
4536         scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4537 
4538     if (sess == NULL || sess->deleted)
4539         return -EFAULT;
4540 
4541     return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4542 }
4543 
4544 /* ha->hardware_lock supposed to be held on entry */
4545 static int __qlt_abort_task(struct scsi_qla_host *vha,
4546     struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
4547 {
4548     struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4549     struct qla_hw_data *ha = vha->hw;
4550     struct qla_tgt_mgmt_cmd *mcmd;
4551     u64 unpacked_lun;
4552     int rc;
4553 
4554     mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4555     if (mcmd == NULL) {
4556         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4557             "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4558             vha->vp_idx, __func__);
4559         return -ENOMEM;
4560     }
4561     memset(mcmd, 0, sizeof(*mcmd));
4562 
4563     mcmd->sess = sess;
4564     memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4565         sizeof(mcmd->orig_iocb.imm_ntfy));
4566 
4567     unpacked_lun =
4568         scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4569     mcmd->reset_count = ha->base_qpair->chip_reset;
4570     mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
4571     mcmd->qpair = ha->base_qpair;
4572 
4573     rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
4574         le16_to_cpu(iocb->u.isp2x.seq_id));
4575     if (rc != 0) {
4576         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4577             "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
4578             vha->vp_idx, rc);
4579         mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4580         return -EFAULT;
4581     }
4582 
4583     return 0;
4584 }
4585 
4586 /* ha->hardware_lock supposed to be held on entry */
4587 static int qlt_abort_task(struct scsi_qla_host *vha,
4588     struct imm_ntfy_from_isp *iocb)
4589 {
4590     struct qla_hw_data *ha = vha->hw;
4591     struct fc_port *sess;
4592     int loop_id;
4593     unsigned long flags;
4594 
4595     loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
4596 
4597     spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4598     sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4599     spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4600 
4601     if (sess == NULL) {
4602         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4603             "qla_target(%d): task abort for unexisting "
4604             "session\n", vha->vp_idx);
4605         return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4606             QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
4607     }
4608 
4609     return __qlt_abort_task(vha, iocb, sess);
4610 }
4611 
4612 void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4613 {
4614     if (rc != MBS_COMMAND_COMPLETE) {
4615         ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4616             "%s: se_sess %p / sess %p from"
4617             " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4618             " LOGO failed: %#x\n",
4619             __func__,
4620             fcport->se_sess,
4621             fcport,
4622             fcport->port_name, fcport->loop_id,
4623             fcport->d_id.b.domain, fcport->d_id.b.area,
4624             fcport->d_id.b.al_pa, rc);
4625     }
4626 
4627     fcport->logout_completed = 1;
4628 }
4629 
4630 /*
4631 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4632 *
4633 * Schedules sessions with matching port_id/loop_id but different wwn for
4634 * deletion. Returns existing session with matching wwn if present.
4635 * Null otherwise.
4636 */
4637 struct fc_port *
4638 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4639     port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
4640 {
4641     struct fc_port *sess = NULL, *other_sess;
4642     uint64_t other_wwn;
4643 
4644     *conflict_sess = NULL;
4645 
4646     list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4647 
4648         other_wwn = wwn_to_u64(other_sess->port_name);
4649 
4650         if (wwn == other_wwn) {
4651             WARN_ON(sess);
4652             sess = other_sess;
4653             continue;
4654         }
4655 
4656         /* find other sess with nport_id collision */
4657         if (port_id.b24 == other_sess->d_id.b24) {
4658             if (loop_id != other_sess->loop_id) {
4659                 ql_dbg(ql_dbg_disc, vha, 0x1000c,
4660                     "Invalidating sess %p loop_id %d wwn %llx.\n",
4661                     other_sess, other_sess->loop_id, other_wwn);
4662 
4663                 /*
4664                  * logout_on_delete is set by default, but another
4665                  * session that has the same s_id/loop_id combo
4666                  * might have cleared it when requested this session
4667                  * deletion, so don't touch it
4668                  */
4669                 qlt_schedule_sess_for_deletion(other_sess);
4670             } else {
4671                 /*
4672                  * Another wwn used to have our s_id/loop_id
4673                  * kill the session, but don't free the loop_id
4674                  */
4675                 ql_dbg(ql_dbg_disc, vha, 0xf01b,
4676                     "Invalidating sess %p loop_id %d wwn %llx.\n",
4677                     other_sess, other_sess->loop_id, other_wwn);
4678 
4679                 other_sess->keep_nport_handle = 1;
4680                 if (other_sess->disc_state != DSC_DELETED)
4681                     *conflict_sess = other_sess;
4682                 qlt_schedule_sess_for_deletion(other_sess);
4683             }
4684             continue;
4685         }
4686 
4687         /* find other sess with nport handle collision */
4688         if ((loop_id == other_sess->loop_id) &&
4689             (loop_id != FC_NO_LOOP_ID)) {
4690             ql_dbg(ql_dbg_disc, vha, 0x1000d,
4691                    "Invalidating sess %p loop_id %d wwn %llx.\n",
4692                    other_sess, other_sess->loop_id, other_wwn);
4693 
4694             /* Same loop_id but different s_id
4695              * Ok to kill and logout */
4696             qlt_schedule_sess_for_deletion(other_sess);
4697         }
4698     }
4699 
4700     return sess;
4701 }
4702 
4703 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4704 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4705 {
4706     struct qla_tgt_sess_op *op;
4707     struct qla_tgt_cmd *cmd;
4708     uint32_t key;
4709     int count = 0;
4710     unsigned long flags;
4711 
4712     key = (((u32)s_id->b.domain << 16) |
4713            ((u32)s_id->b.area   <<  8) |
4714            ((u32)s_id->b.al_pa));
4715 
4716     spin_lock_irqsave(&vha->cmd_list_lock, flags);
4717     list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
4718         uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4719 
4720         if (op_key == key) {
4721             op->aborted = true;
4722             count++;
4723         }
4724     }
4725 
4726     list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4727         uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4728 
4729         if (cmd_key == key) {
4730             cmd->aborted = 1;
4731             count++;
4732         }
4733     }
4734     spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4735 
4736     return count;
4737 }
4738 
4739 static int qlt_handle_login(struct scsi_qla_host *vha,
4740     struct imm_ntfy_from_isp *iocb)
4741 {
4742     struct fc_port *sess = NULL, *conflict_sess = NULL;
4743     uint64_t wwn;
4744     port_id_t port_id;
4745     uint16_t loop_id, wd3_lo;
4746     int res = 0;
4747     struct qlt_plogi_ack_t *pla;
4748     unsigned long flags;
4749 
4750     lockdep_assert_held(&vha->hw->hardware_lock);
4751 
4752     wwn = wwn_to_u64(iocb->u.isp24.port_name);
4753 
4754     port_id.b.domain = iocb->u.isp24.port_id[2];
4755     port_id.b.area   = iocb->u.isp24.port_id[1];
4756     port_id.b.al_pa  = iocb->u.isp24.port_id[0];
4757     port_id.b.rsvd_1 = 0;
4758 
4759     loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4760 
4761     /* Mark all stale commands sitting in qla_tgt_wq for deletion */
4762     abort_cmds_for_s_id(vha, &port_id);
4763 
4764     if (wwn) {
4765         spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4766         sess = qlt_find_sess_invalidate_other(vha, wwn,
4767             port_id, loop_id, &conflict_sess);
4768         spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4769     } else {
4770         ql_dbg(ql_dbg_disc, vha, 0xffff,
4771             "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
4772             __func__, __LINE__, loop_id, port_id.b24);
4773         qlt_send_term_imm_notif(vha, iocb, 1);
4774         goto out;
4775     }
4776 
4777     if (IS_SW_RESV_ADDR(port_id)) {
4778         res = 1;
4779         goto out;
4780     }
4781 
4782     if (vha->hw->flags.edif_enabled &&
4783         !(vha->e_dbell.db_flags & EDB_ACTIVE) &&
4784         iocb->u.isp24.status_subcode == ELS_PLOGI &&
4785         !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
4786         ql_dbg(ql_dbg_disc, vha, 0xffff,
4787             "%s %d Term INOT due to app not available lid=%d, NportID %06X ",
4788             __func__, __LINE__, loop_id, port_id.b24);
4789         qlt_send_term_imm_notif(vha, iocb, 1);
4790         goto out;
4791     }
4792 
4793     if (vha->hw->flags.edif_enabled) {
4794         if (DBELL_INACTIVE(vha)) {
4795             ql_dbg(ql_dbg_disc, vha, 0xffff,
4796                    "%s %d Term INOT due to app not started lid=%d, NportID %06X ",
4797                    __func__, __LINE__, loop_id, port_id.b24);
4798             qlt_send_term_imm_notif(vha, iocb, 1);
4799             goto out;
4800         } else if (iocb->u.isp24.status_subcode == ELS_PLOGI &&
4801                !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
4802             ql_dbg(ql_dbg_disc, vha, 0xffff,
4803                    "%s %d Term INOT due to unsecure lid=%d, NportID %06X ",
4804                    __func__, __LINE__, loop_id, port_id.b24);
4805             qlt_send_term_imm_notif(vha, iocb, 1);
4806             goto out;
4807         }
4808     }
4809 
4810     pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4811     if (!pla) {
4812         ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4813             "%s %d %8phC Term INOT due to mem alloc fail",
4814             __func__, __LINE__,
4815             iocb->u.isp24.port_name);
4816         qlt_send_term_imm_notif(vha, iocb, 1);
4817         goto out;
4818     }
4819 
4820     if (conflict_sess) {
4821         conflict_sess->login_gen++;
4822         qlt_plogi_ack_link(vha, pla, conflict_sess,
4823             QLT_PLOGI_LINK_CONFLICT);
4824     }
4825 
4826     if (!sess) {
4827         pla->ref_count++;
4828         ql_dbg(ql_dbg_disc, vha, 0xffff,
4829             "%s %d %8phC post new sess\n",
4830             __func__, __LINE__, iocb->u.isp24.port_name);
4831         if (iocb->u.isp24.status_subcode == ELS_PLOGI)
4832             qla24xx_post_newsess_work(vha, &port_id,
4833                 iocb->u.isp24.port_name,
4834                 iocb->u.isp24.u.plogi.node_name,
4835                 pla, 0);
4836         else
4837             qla24xx_post_newsess_work(vha, &port_id,
4838                 iocb->u.isp24.port_name, NULL,
4839                 pla, 0);
4840 
4841         goto out;
4842     }
4843 
4844     if (sess->disc_state == DSC_UPD_FCPORT) {
4845         u16 sec;
4846 
4847         /*
4848          * Remote port registration is still going on from
4849          * previous login. Allow it to finish before we
4850          * accept the new login.
4851          */
4852         sess->next_disc_state = DSC_DELETE_PEND;
4853         sec = jiffies_to_msecs(jiffies -
4854             sess->jiffies_at_registration) / 1000;
4855         if (sess->sec_since_registration < sec && sec &&
4856             !(sec % 5)) {
4857             sess->sec_since_registration = sec;
4858             ql_dbg(ql_dbg_disc, vha, 0xffff,
4859                 "%s %8phC - Slow Rport registration (%d Sec)\n",
4860                 __func__, sess->port_name, sec);
4861         }
4862 
4863         if (!conflict_sess) {
4864             list_del(&pla->list);
4865             kmem_cache_free(qla_tgt_plogi_cachep, pla);
4866         }
4867 
4868         qlt_send_term_imm_notif(vha, iocb, 1);
4869         goto out;
4870     }
4871 
4872     qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4873     sess->d_id = port_id;
4874     sess->login_gen++;
4875     sess->loop_id = loop_id;
4876 
4877     if (iocb->u.isp24.status_subcode == ELS_PLOGI) {
4878         /* remote port has assigned Port ID */
4879         if (N2N_TOPO(vha->hw) && fcport_is_bigger(sess))
4880             vha->d_id = sess->d_id;
4881 
4882         ql_dbg(ql_dbg_disc, vha, 0xffff,
4883             "%s %8phC - send port online\n",
4884             __func__, sess->port_name);
4885 
4886         qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
4887             sess->d_id.b24);
4888     }
4889 
4890     if (iocb->u.isp24.status_subcode == ELS_PRLI) {
4891         sess->fw_login_state = DSC_LS_PRLI_PEND;
4892         sess->local = 0;
4893         sess->loop_id = loop_id;
4894         sess->d_id = port_id;
4895         sess->fw_login_state = DSC_LS_PRLI_PEND;
4896         wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4897 
4898         if (wd3_lo & BIT_7)
4899             sess->conf_compl_supported = 1;
4900 
4901         if ((wd3_lo & BIT_4) == 0)
4902             sess->port_type = FCT_INITIATOR;
4903         else
4904             sess->port_type = FCT_TARGET;
4905 
4906     } else
4907         sess->fw_login_state = DSC_LS_PLOGI_PEND;
4908 
4909 
4910     ql_dbg(ql_dbg_disc, vha, 0x20f9,
4911         "%s %d %8phC  DS %d\n",
4912         __func__, __LINE__, sess->port_name, sess->disc_state);
4913 
4914     switch (sess->disc_state) {
4915     case DSC_DELETED:
4916     case DSC_LOGIN_PEND:
4917         qlt_plogi_ack_unref(vha, pla);
4918         break;
4919 
4920     default:
4921         /*
4922          * Under normal circumstances we want to release nport handle
4923          * during LOGO process to avoid nport handle leaks inside FW.
4924          * The exception is when LOGO is done while another PLOGI with
4925          * the same nport handle is waiting as might be the case here.
4926          * Note: there is always a possibily of a race where session
4927          * deletion has already started for other reasons (e.g. ACL
4928          * removal) and now PLOGI arrives:
4929          * 1. if PLOGI arrived in FW after nport handle has been freed,
4930          *    FW must have assigned this PLOGI a new/same handle and we
4931          *    can proceed ACK'ing it as usual when session deletion
4932          *    completes.
4933          * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4934          *    bit reached it, the handle has now been released. We'll
4935          *    get an error when we ACK this PLOGI. Nothing will be sent
4936          *    back to initiator. Initiator should eventually retry
4937          *    PLOGI and situation will correct itself.
4938          */
4939         sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4940             (sess->d_id.b24 == port_id.b24));
4941 
4942         ql_dbg(ql_dbg_disc, vha, 0x20f9,
4943             "%s %d %8phC post del sess\n",
4944             __func__, __LINE__, sess->port_name);
4945 
4946 
4947         qlt_schedule_sess_for_deletion(sess);
4948         break;
4949     }
4950 out:
4951     return res;
4952 }
4953 
4954 /*
4955  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4956  */
4957 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4958     struct imm_ntfy_from_isp *iocb)
4959 {
4960     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4961     struct qla_hw_data *ha = vha->hw;
4962     struct fc_port *sess = NULL, *conflict_sess = NULL;
4963     uint64_t wwn;
4964     port_id_t port_id;
4965     uint16_t loop_id;
4966     uint16_t wd3_lo;
4967     int res = 0;
4968     unsigned long flags;
4969 
4970     lockdep_assert_held(&ha->hardware_lock);
4971 
4972     wwn = wwn_to_u64(iocb->u.isp24.port_name);
4973 
4974     port_id.b.domain = iocb->u.isp24.port_id[2];
4975     port_id.b.area   = iocb->u.isp24.port_id[1];
4976     port_id.b.al_pa  = iocb->u.isp24.port_id[0];
4977     port_id.b.rsvd_1 = 0;
4978 
4979     loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4980 
4981     ql_dbg(ql_dbg_disc, vha, 0xf026,
4982         "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
4983         vha->vp_idx, iocb->u.isp24.port_id[2],
4984         iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
4985            iocb->u.isp24.status_subcode, loop_id,
4986         iocb->u.isp24.port_name);
4987 
4988     /* res = 1 means ack at the end of thread
4989      * res = 0 means ack async/later.
4990      */
4991     switch (iocb->u.isp24.status_subcode) {
4992     case ELS_PLOGI:
4993         res = qlt_handle_login(vha, iocb);
4994         break;
4995 
4996     case ELS_PRLI:
4997         if (N2N_TOPO(ha)) {
4998             sess = qla2x00_find_fcport_by_wwpn(vha,
4999                 iocb->u.isp24.port_name, 1);
5000 
5001             if (vha->hw->flags.edif_enabled && sess &&
5002                 (!(sess->flags & FCF_FCSP_DEVICE) ||
5003                  !sess->edif.authok)) {
5004                 ql_dbg(ql_dbg_disc, vha, 0xffff,
5005                        "%s %d %8phC Term PRLI due to unauthorize PRLI\n",
5006                        __func__, __LINE__, iocb->u.isp24.port_name);
5007                 qlt_send_term_imm_notif(vha, iocb, 1);
5008                 break;
5009             }
5010 
5011             if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
5012                 ql_dbg(ql_dbg_disc, vha, 0xffff,
5013                     "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
5014                     __func__, __LINE__,
5015                     iocb->u.isp24.port_name);
5016                 qlt_send_term_imm_notif(vha, iocb, 1);
5017                 break;
5018             }
5019 
5020             res = qlt_handle_login(vha, iocb);
5021             break;
5022         }
5023 
5024         if (IS_SW_RESV_ADDR(port_id)) {
5025             res = 1;
5026             break;
5027         }
5028 
5029         wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
5030 
5031         if (wwn) {
5032             spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
5033             sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
5034                 loop_id, &conflict_sess);
5035             spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
5036         }
5037 
5038         if (conflict_sess) {
5039             switch (conflict_sess->disc_state) {
5040             case DSC_DELETED:
5041             case DSC_DELETE_PEND:
5042                 break;
5043             default:
5044                 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
5045                     "PRLI with conflicting sess %p port %8phC\n",
5046                     conflict_sess, conflict_sess->port_name);
5047                 conflict_sess->fw_login_state =
5048                     DSC_LS_PORT_UNAVAIL;
5049                 qlt_send_term_imm_notif(vha, iocb, 1);
5050                 res = 0;
5051                 break;
5052             }
5053         }
5054 
5055         if (sess != NULL) {
5056             bool delete = false;
5057             int sec;
5058 
5059             if (vha->hw->flags.edif_enabled && sess &&
5060                 (!(sess->flags & FCF_FCSP_DEVICE) ||
5061                  !sess->edif.authok)) {
5062                 ql_dbg(ql_dbg_disc, vha, 0xffff,
5063                        "%s %d %8phC Term PRLI due to unauthorize prli\n",
5064                        __func__, __LINE__, iocb->u.isp24.port_name);
5065                 qlt_send_term_imm_notif(vha, iocb, 1);
5066                 break;
5067             }
5068 
5069             spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
5070             switch (sess->fw_login_state) {
5071             case DSC_LS_PLOGI_PEND:
5072             case DSC_LS_PLOGI_COMP:
5073             case DSC_LS_PRLI_COMP:
5074                 break;
5075             default:
5076                 delete = true;
5077                 break;
5078             }
5079 
5080             switch (sess->disc_state) {
5081             case DSC_UPD_FCPORT:
5082                 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
5083                     flags);
5084 
5085                 sec = jiffies_to_msecs(jiffies -
5086                     sess->jiffies_at_registration)/1000;
5087                 if (sess->sec_since_registration < sec && sec &&
5088                     !(sec % 5)) {
5089                     sess->sec_since_registration = sec;
5090                     ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
5091                         "%s %8phC : Slow Rport registration(%d Sec)\n",
5092                         __func__, sess->port_name, sec);
5093                 }
5094                 qlt_send_term_imm_notif(vha, iocb, 1);
5095                 return 0;
5096 
5097             case DSC_LOGIN_PEND:
5098             case DSC_GPDB:
5099             case DSC_LOGIN_COMPLETE:
5100             case DSC_ADISC:
5101                 delete = false;
5102                 break;
5103             default:
5104                 break;
5105             }
5106 
5107             if (delete) {
5108                 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
5109                     flags);
5110                 /*
5111                  * Impatient initiator sent PRLI before last
5112                  * PLOGI could finish. Will force him to re-try,
5113                  * while last one finishes.
5114                  */
5115                 ql_log(ql_log_warn, sess->vha, 0xf095,
5116                     "sess %p PRLI received, before plogi ack.\n",
5117                     sess);
5118                 qlt_send_term_imm_notif(vha, iocb, 1);
5119                 res = 0;
5120                 break;
5121             }
5122 
5123             /*
5124              * This shouldn't happen under normal circumstances,
5125              * since we have deleted the old session during PLOGI
5126              */
5127             ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
5128                 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
5129                 sess->loop_id, sess, iocb->u.isp24.nport_handle);
5130 
5131             sess->local = 0;
5132             sess->loop_id = loop_id;
5133             sess->d_id = port_id;
5134             sess->fw_login_state = DSC_LS_PRLI_PEND;
5135 
5136             if (wd3_lo & BIT_7)
5137                 sess->conf_compl_supported = 1;
5138 
5139             if ((wd3_lo & BIT_4) == 0)
5140                 sess->port_type = FCT_INITIATOR;
5141             else
5142                 sess->port_type = FCT_TARGET;
5143 
5144             spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
5145         }
5146         res = 1; /* send notify ack */
5147 
5148         /* Make session global (not used in fabric mode) */
5149         if (ha->current_topology != ISP_CFG_F) {
5150             if (sess) {
5151                 ql_dbg(ql_dbg_disc, vha, 0x20fa,
5152                     "%s %d %8phC post nack\n",
5153                     __func__, __LINE__, sess->port_name);
5154                 qla24xx_post_nack_work(vha, sess, iocb,
5155                     SRB_NACK_PRLI);
5156                 res = 0;
5157             } else {
5158                 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5159                 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5160                 qla2xxx_wake_dpc(vha);
5161             }
5162         } else {
5163             if (sess) {
5164                 ql_dbg(ql_dbg_disc, vha, 0x20fb,
5165                     "%s %d %8phC post nack\n",
5166                     __func__, __LINE__, sess->port_name);
5167                 qla24xx_post_nack_work(vha, sess, iocb,
5168                     SRB_NACK_PRLI);
5169                 res = 0;
5170             }
5171         }
5172         break;
5173 
5174     case ELS_TPRLO:
5175         if (le16_to_cpu(iocb->u.isp24.flags) &
5176             NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
5177             loop_id = 0xFFFF;
5178             qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
5179             res = 1;
5180             break;
5181         }
5182         fallthrough;
5183     case ELS_LOGO:
5184     case ELS_PRLO:
5185         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5186         sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
5187         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5188 
5189         if (sess) {
5190             sess->login_gen++;
5191             sess->fw_login_state = DSC_LS_LOGO_PEND;
5192             sess->logo_ack_needed = 1;
5193             memcpy(sess->iocb, iocb, IOCB_SIZE);
5194         }
5195 
5196         res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5197 
5198         ql_dbg(ql_dbg_disc, vha, 0x20fc,
5199             "%s: logo %llx res %d sess %p ",
5200             __func__, wwn, res, sess);
5201         if (res == 0) {
5202             /*
5203              * cmd went upper layer, look for qlt_xmit_tm_rsp()
5204              * for LOGO_ACK & sess delete
5205              */
5206             BUG_ON(!sess);
5207             res = 0;
5208         } else {
5209             /* cmd did not go to upper layer. */
5210             if (sess) {
5211                 qlt_schedule_sess_for_deletion(sess);
5212                 res = 0;
5213             }
5214             /* else logo will be ack */
5215         }
5216         break;
5217     case ELS_PDISC:
5218     case ELS_ADISC:
5219     {
5220         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5221 
5222         if (tgt->link_reinit_iocb_pending) {
5223             qlt_send_notify_ack(ha->base_qpair,
5224                 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5225             tgt->link_reinit_iocb_pending = 0;
5226         }
5227 
5228         sess = qla2x00_find_fcport_by_wwpn(vha,
5229             iocb->u.isp24.port_name, 1);
5230         if (sess) {
5231             ql_dbg(ql_dbg_disc, vha, 0x20fd,
5232                 "sess %p lid %d|%d DS %d LS %d\n",
5233                 sess, sess->loop_id, loop_id,
5234                 sess->disc_state, sess->fw_login_state);
5235         }
5236 
5237         res = 1; /* send notify ack */
5238         break;
5239     }
5240 
5241     case ELS_FLOGI: /* should never happen */
5242     default:
5243         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
5244             "qla_target(%d): Unsupported ELS command %x "
5245             "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
5246         res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5247         break;
5248     }
5249 
5250     ql_dbg(ql_dbg_disc, vha, 0xf026,
5251         "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
5252         vha->vp_idx, iocb->u.isp24.status_subcode, res);
5253 
5254     return res;
5255 }
5256 
5257 /*
5258  * ha->hardware_lock supposed to be held on entry.
5259  * Might drop it, then reacquire.
5260  */
5261 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
5262     struct imm_ntfy_from_isp *iocb)
5263 {
5264     struct qla_hw_data *ha = vha->hw;
5265     uint32_t add_flags = 0;
5266     int send_notify_ack = 1;
5267     uint16_t status;
5268 
5269     lockdep_assert_held(&ha->hardware_lock);
5270 
5271     status = le16_to_cpu(iocb->u.isp2x.status);
5272     switch (status) {
5273     case IMM_NTFY_LIP_RESET:
5274     {
5275         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
5276             "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
5277             vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
5278             iocb->u.isp24.status_subcode);
5279 
5280         if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5281             send_notify_ack = 0;
5282         break;
5283     }
5284 
5285     case IMM_NTFY_LIP_LINK_REINIT:
5286     {
5287         struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5288 
5289         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
5290             "qla_target(%d): LINK REINIT (loop %#x, "
5291             "subcode %x)\n", vha->vp_idx,
5292             le16_to_cpu(iocb->u.isp24.nport_handle),
5293             iocb->u.isp24.status_subcode);
5294         if (tgt->link_reinit_iocb_pending) {
5295             qlt_send_notify_ack(ha->base_qpair,
5296                 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
5297         }
5298         memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
5299         tgt->link_reinit_iocb_pending = 1;
5300         /*
5301          * QLogic requires to wait after LINK REINIT for possible
5302          * PDISC or ADISC ELS commands
5303          */
5304         send_notify_ack = 0;
5305         break;
5306     }
5307 
5308     case IMM_NTFY_PORT_LOGOUT:
5309         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
5310             "qla_target(%d): Port logout (loop "
5311             "%#x, subcode %x)\n", vha->vp_idx,
5312             le16_to_cpu(iocb->u.isp24.nport_handle),
5313             iocb->u.isp24.status_subcode);
5314 
5315         if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
5316             send_notify_ack = 0;
5317         /* The sessions will be cleared in the callback, if needed */
5318         break;
5319 
5320     case IMM_NTFY_GLBL_TPRLO:
5321         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
5322             "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
5323         if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5324             send_notify_ack = 0;
5325         /* The sessions will be cleared in the callback, if needed */
5326         break;
5327 
5328     case IMM_NTFY_PORT_CONFIG:
5329         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
5330             "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
5331             status);
5332         if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5333             send_notify_ack = 0;
5334         /* The sessions will be cleared in the callback, if needed */
5335         break;
5336 
5337     case IMM_NTFY_GLBL_LOGO:
5338         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
5339             "qla_target(%d): Link failure detected\n",
5340             vha->vp_idx);
5341         /* I_T nexus loss */
5342         if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5343             send_notify_ack = 0;
5344         break;
5345 
5346     case IMM_NTFY_IOCB_OVERFLOW:
5347         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
5348             "qla_target(%d): Cannot provide requested "
5349             "capability (IOCB overflowed the immediate notify "
5350             "resource count)\n", vha->vp_idx);
5351         break;
5352 
5353     case IMM_NTFY_ABORT_TASK:
5354         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
5355             "qla_target(%d): Abort Task (S %08x I %#x -> "
5356             "L %#x)\n", vha->vp_idx,
5357             le16_to_cpu(iocb->u.isp2x.seq_id),
5358             GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
5359             le16_to_cpu(iocb->u.isp2x.lun));
5360         if (qlt_abort_task(vha, iocb) == 0)
5361             send_notify_ack = 0;
5362         break;
5363 
5364     case IMM_NTFY_RESOURCE:
5365         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
5366             "qla_target(%d): Out of resources, host %ld\n",
5367             vha->vp_idx, vha->host_no);
5368         break;
5369 
5370     case IMM_NTFY_MSG_RX:
5371         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5372             "qla_target(%d): Immediate notify task %x\n",
5373             vha->vp_idx, iocb->u.isp2x.task_flags);
5374         break;
5375 
5376     case IMM_NTFY_ELS:
5377         if (qlt_24xx_handle_els(vha, iocb) == 0)
5378             send_notify_ack = 0;
5379         break;
5380     default:
5381         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
5382             "qla_target(%d): Received unknown immediate "
5383             "notify status %x\n", vha->vp_idx, status);
5384         break;
5385     }
5386 
5387     if (send_notify_ack)
5388         qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
5389             0, 0);
5390 }
5391 
5392 /*
5393  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
5394  * This function sends busy to ISP 2xxx or 24xx.
5395  */
5396 static int __qlt_send_busy(struct qla_qpair *qpair,
5397     struct atio_from_isp *atio, uint16_t status)
5398 {
5399     struct scsi_qla_host *vha = qpair->vha;
5400     struct ctio7_to_24xx *ctio24;
5401     struct qla_hw_data *ha = vha->hw;
5402     request_t *pkt;
5403     struct fc_port *sess = NULL;
5404     unsigned long flags;
5405     u16 temp;
5406     port_id_t id;
5407 
5408     id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
5409 
5410     spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5411     sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
5412     spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5413     if (!sess) {
5414         qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
5415         return 0;
5416     }
5417     /* Sending marker isn't necessary, since we called from ISR */
5418 
5419     pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
5420     if (!pkt) {
5421         ql_dbg(ql_dbg_io, vha, 0x3063,
5422             "qla_target(%d): %s failed: unable to allocate "
5423             "request packet", vha->vp_idx, __func__);
5424         return -ENOMEM;
5425     }
5426 
5427     qpair->tgt_counters.num_q_full_sent++;
5428     pkt->entry_count = 1;
5429     pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
5430 
5431     ctio24 = (struct ctio7_to_24xx *)pkt;
5432     ctio24->entry_type = CTIO_TYPE7;
5433     ctio24->nport_handle = cpu_to_le16(sess->loop_id);
5434     ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
5435     ctio24->vp_index = vha->vp_idx;
5436     ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
5437     ctio24->exchange_addr = atio->u.isp24.exchange_addr;
5438     temp = (atio->u.isp24.attr << 9) |
5439         CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
5440         CTIO7_FLAGS_DONT_RET_CTIO;
5441     ctio24->u.status1.flags = cpu_to_le16(temp);
5442     /*
5443      * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
5444      * if the explicit conformation is used.
5445      */
5446     ctio24->u.status1.ox_id =
5447         cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
5448     ctio24->u.status1.scsi_status = cpu_to_le16(status);
5449 
5450     ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
5451 
5452     if (ctio24->u.status1.residual != 0)
5453         ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER);
5454 
5455     /* Memory Barrier */
5456     wmb();
5457     if (qpair->reqq_start_iocbs)
5458         qpair->reqq_start_iocbs(qpair);
5459     else
5460         qla2x00_start_iocbs(vha, qpair->req);
5461     return 0;
5462 }
5463 
5464 /*
5465  * This routine is used to allocate a command for either a QFull condition
5466  * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
5467  * out previously.
5468  */
5469 static void
5470 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
5471     struct atio_from_isp *atio, uint16_t status, int qfull)
5472 {
5473     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5474     struct qla_hw_data *ha = vha->hw;
5475     struct fc_port *sess;
5476     struct qla_tgt_cmd *cmd;
5477     unsigned long flags;
5478 
5479     if (unlikely(tgt->tgt_stop)) {
5480         ql_dbg(ql_dbg_io, vha, 0x300a,
5481             "New command while device %p is shutting down\n", tgt);
5482         return;
5483     }
5484 
5485     if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5486         vha->hw->tgt.num_qfull_cmds_dropped++;
5487         if (vha->hw->tgt.num_qfull_cmds_dropped >
5488             vha->qla_stats.stat_max_qfull_cmds_dropped)
5489             vha->qla_stats.stat_max_qfull_cmds_dropped =
5490                 vha->hw->tgt.num_qfull_cmds_dropped;
5491 
5492         ql_dbg(ql_dbg_io, vha, 0x3068,
5493             "qla_target(%d): %s: QFull CMD dropped[%d]\n",
5494             vha->vp_idx, __func__,
5495             vha->hw->tgt.num_qfull_cmds_dropped);
5496 
5497         qlt_chk_exch_leak_thresh_hold(vha);
5498         return;
5499     }
5500 
5501     sess = ha->tgt.tgt_ops->find_sess_by_s_id
5502         (vha, atio->u.isp24.fcp_hdr.s_id);
5503     if (!sess)
5504         return;
5505 
5506     cmd = ha->tgt.tgt_ops->get_cmd(sess);
5507     if (!cmd) {
5508         ql_dbg(ql_dbg_io, vha, 0x3009,
5509             "qla_target(%d): %s: Allocation of cmd failed\n",
5510             vha->vp_idx, __func__);
5511 
5512         vha->hw->tgt.num_qfull_cmds_dropped++;
5513         if (vha->hw->tgt.num_qfull_cmds_dropped >
5514             vha->qla_stats.stat_max_qfull_cmds_dropped)
5515             vha->qla_stats.stat_max_qfull_cmds_dropped =
5516                 vha->hw->tgt.num_qfull_cmds_dropped;
5517 
5518         qlt_chk_exch_leak_thresh_hold(vha);
5519         return;
5520     }
5521 
5522     qlt_incr_num_pend_cmds(vha);
5523     INIT_LIST_HEAD(&cmd->cmd_list);
5524     memcpy(&cmd->atio, atio, sizeof(*atio));
5525 
5526     cmd->tgt = vha->vha_tgt.qla_tgt;
5527     cmd->vha = vha;
5528     cmd->reset_count = ha->base_qpair->chip_reset;
5529     cmd->q_full = 1;
5530     cmd->qpair = ha->base_qpair;
5531 
5532     if (qfull) {
5533         cmd->q_full = 1;
5534         /* NOTE: borrowing the state field to carry the status */
5535         cmd->state = status;
5536     } else
5537         cmd->term_exchg = 1;
5538 
5539     spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5540     list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5541 
5542     vha->hw->tgt.num_qfull_cmds_alloc++;
5543     if (vha->hw->tgt.num_qfull_cmds_alloc >
5544         vha->qla_stats.stat_max_qfull_cmds_alloc)
5545         vha->qla_stats.stat_max_qfull_cmds_alloc =
5546             vha->hw->tgt.num_qfull_cmds_alloc;
5547     spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5548 }
5549 
5550 int
5551 qlt_free_qfull_cmds(struct qla_qpair *qpair)
5552 {
5553     struct scsi_qla_host *vha = qpair->vha;
5554     struct qla_hw_data *ha = vha->hw;
5555     unsigned long flags;
5556     struct qla_tgt_cmd *cmd, *tcmd;
5557     struct list_head free_list, q_full_list;
5558     int rc = 0;
5559 
5560     if (list_empty(&ha->tgt.q_full_list))
5561         return 0;
5562 
5563     INIT_LIST_HEAD(&free_list);
5564     INIT_LIST_HEAD(&q_full_list);
5565 
5566     spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5567     if (list_empty(&ha->tgt.q_full_list)) {
5568         spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5569         return 0;
5570     }
5571 
5572     list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
5573     spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5574 
5575     spin_lock_irqsave(qpair->qp_lock_ptr, flags);
5576     list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
5577         if (cmd->q_full)
5578             /* cmd->state is a borrowed field to hold status */
5579             rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
5580         else if (cmd->term_exchg)
5581             rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
5582 
5583         if (rc == -ENOMEM)
5584             break;
5585 
5586         if (cmd->q_full)
5587             ql_dbg(ql_dbg_io, vha, 0x3006,
5588                 "%s: busy sent for ox_id[%04x]\n", __func__,
5589                 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5590         else if (cmd->term_exchg)
5591             ql_dbg(ql_dbg_io, vha, 0x3007,
5592                 "%s: Term exchg sent for ox_id[%04x]\n", __func__,
5593                 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5594         else
5595             ql_dbg(ql_dbg_io, vha, 0x3008,
5596                 "%s: Unexpected cmd in QFull list %p\n", __func__,
5597                 cmd);
5598 
5599         list_move_tail(&cmd->cmd_list, &free_list);
5600 
5601         /* piggy back on hardware_lock for protection */
5602         vha->hw->tgt.num_qfull_cmds_alloc--;
5603     }
5604     spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
5605 
5606     cmd = NULL;
5607 
5608     list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
5609         list_del(&cmd->cmd_list);
5610         /* This cmd was never sent to TCM.  There is no need
5611          * to schedule free or call free_cmd
5612          */
5613         qlt_free_cmd(cmd);
5614     }
5615 
5616     if (!list_empty(&q_full_list)) {
5617         spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5618         list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
5619         spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5620     }
5621 
5622     return rc;
5623 }
5624 
5625 static void
5626 qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
5627     uint16_t status)
5628 {
5629     int rc = 0;
5630     struct scsi_qla_host *vha = qpair->vha;
5631 
5632     rc = __qlt_send_busy(qpair, atio, status);
5633     if (rc == -ENOMEM)
5634         qlt_alloc_qfull_cmd(vha, atio, status, 1);
5635 }
5636 
5637 static int
5638 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
5639     struct atio_from_isp *atio, uint8_t ha_locked)
5640 {
5641     struct qla_hw_data *ha = vha->hw;
5642     unsigned long flags;
5643 
5644     if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
5645         return 0;
5646 
5647     if (!ha_locked)
5648         spin_lock_irqsave(&ha->hardware_lock, flags);
5649     qlt_send_busy(qpair, atio, qla_sam_status);
5650     if (!ha_locked)
5651         spin_unlock_irqrestore(&ha->hardware_lock, flags);
5652 
5653     return 1;
5654 }
5655 
5656 /* ha->hardware_lock supposed to be held on entry */
5657 /* called via callback from qla2xxx */
5658 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5659     struct atio_from_isp *atio, uint8_t ha_locked)
5660 {
5661     struct qla_hw_data *ha = vha->hw;
5662     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5663     int rc;
5664     unsigned long flags = 0;
5665 
5666     if (unlikely(tgt == NULL)) {
5667         ql_dbg(ql_dbg_tgt, vha, 0x3064,
5668             "ATIO pkt, but no tgt (ha %p)", ha);
5669         return;
5670     }
5671     /*
5672      * In tgt_stop mode we also should allow all requests to pass.
5673      * Otherwise, some commands can stuck.
5674      */
5675 
5676     tgt->atio_irq_cmd_count++;
5677 
5678     switch (atio->u.raw.entry_type) {
5679     case ATIO_TYPE7:
5680         if (unlikely(atio->u.isp24.exchange_addr ==
5681                  cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) {
5682             ql_dbg(ql_dbg_io, vha, 0x3065,
5683                 "qla_target(%d): ATIO_TYPE7 "
5684                 "received with UNKNOWN exchange address, "
5685                 "sending QUEUE_FULL\n", vha->vp_idx);
5686             if (!ha_locked)
5687                 spin_lock_irqsave(&ha->hardware_lock, flags);
5688             qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
5689             if (!ha_locked)
5690                 spin_unlock_irqrestore(&ha->hardware_lock,
5691                     flags);
5692             break;
5693         }
5694 
5695         if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
5696             rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
5697                 atio, ha_locked);
5698             if (rc != 0) {
5699                 tgt->atio_irq_cmd_count--;
5700                 return;
5701             }
5702             rc = qlt_handle_cmd_for_atio(vha, atio);
5703         } else {
5704             rc = qlt_handle_task_mgmt(vha, atio);
5705         }
5706         if (unlikely(rc != 0)) {
5707             if (!ha_locked)
5708                 spin_lock_irqsave(&ha->hardware_lock, flags);
5709             switch (rc) {
5710             case -ENODEV:
5711                 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5712                     "qla_target: Unable to send command to target\n");
5713                 break;
5714             case -EBADF:
5715                 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5716                     "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5717                 qlt_send_term_exchange(ha->base_qpair, NULL,
5718                     atio, 1, 0);
5719                 break;
5720             case -EBUSY:
5721                 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5722                     "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5723                     vha->vp_idx);
5724                 qlt_send_busy(ha->base_qpair, atio,
5725                     tc_sam_status);
5726                 break;
5727             default:
5728                 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5729                     "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5730                     vha->vp_idx);
5731                 qlt_send_busy(ha->base_qpair, atio,
5732                     qla_sam_status);
5733                 break;
5734             }
5735             if (!ha_locked)
5736                 spin_unlock_irqrestore(&ha->hardware_lock,
5737                     flags);
5738         }
5739         break;
5740 
5741     case IMMED_NOTIFY_TYPE:
5742     {
5743         if (unlikely(atio->u.isp2x.entry_status != 0)) {
5744             ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5745                 "qla_target(%d): Received ATIO packet %x "
5746                 "with error status %x\n", vha->vp_idx,
5747                 atio->u.raw.entry_type,
5748                 atio->u.isp2x.entry_status);
5749             break;
5750         }
5751         ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5752 
5753         if (!ha_locked)
5754             spin_lock_irqsave(&ha->hardware_lock, flags);
5755         qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5756         if (!ha_locked)
5757             spin_unlock_irqrestore(&ha->hardware_lock, flags);
5758         break;
5759     }
5760 
5761     default:
5762         ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5763             "qla_target(%d): Received unknown ATIO atio "
5764             "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5765         break;
5766     }
5767 
5768     tgt->atio_irq_cmd_count--;
5769 }
5770 
5771 /*
5772  * qpair lock is assume to be held
5773  * rc = 0 : send terminate & abts respond
5774  * rc != 0: do not send term & abts respond
5775  */
5776 static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
5777     struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
5778 {
5779     struct qla_hw_data *ha = vha->hw;
5780     int rc = 0;
5781 
5782     /*
5783      * Detect unresolved exchange. If the same ABTS is unable
5784      * to terminate an existing command and the same ABTS loops
5785      * between FW & Driver, then force FW dump. Under 1 jiff,
5786      * we should see multiple loops.
5787      */
5788     if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
5789         qpair->retry_term_jiff == jiffies) {
5790         /* found existing exchange */
5791         qpair->retry_term_cnt++;
5792         if (qpair->retry_term_cnt >= 5) {
5793             rc = -EIO;
5794             qpair->retry_term_cnt = 0;
5795             ql_log(ql_log_warn, vha, 0xffff,
5796                 "Unable to send ABTS Respond. Dumping firmware.\n");
5797             ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
5798                 vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
5799 
5800             if (qpair == ha->base_qpair)
5801                 ha->isp_ops->fw_dump(vha);
5802             else
5803                 qla2xxx_dump_fw(vha);
5804 
5805             set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5806             qla2xxx_wake_dpc(vha);
5807         }
5808     } else if (qpair->retry_term_jiff != jiffies) {
5809         qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
5810         qpair->retry_term_cnt = 0;
5811         qpair->retry_term_jiff = jiffies;
5812     }
5813 
5814     return rc;
5815 }
5816 
5817 
5818 static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
5819     struct rsp_que *rsp, response_t *pkt)
5820 {
5821     struct abts_resp_from_24xx_fw *entry =
5822         (struct abts_resp_from_24xx_fw *)pkt;
5823     u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
5824     struct qla_tgt_mgmt_cmd *mcmd;
5825     struct qla_hw_data *ha = vha->hw;
5826 
5827     mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
5828     if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
5829         ql_dbg(ql_dbg_async, vha, 0xe064,
5830             "qla_target(%d): ABTS Comp without mcmd\n",
5831             vha->vp_idx);
5832         return;
5833     }
5834 
5835     if (mcmd)
5836         vha  = mcmd->vha;
5837     vha->vha_tgt.qla_tgt->abts_resp_expected--;
5838 
5839     ql_dbg(ql_dbg_tgt, vha, 0xe038,
5840         "ABTS_RESP_24XX: compl_status %x\n",
5841         entry->compl_status);
5842 
5843     if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
5844         if (le32_to_cpu(entry->error_subcode1) == 0x1E &&
5845             le32_to_cpu(entry->error_subcode2) == 0) {
5846             if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
5847                 ha->tgt.tgt_ops->free_mcmd(mcmd);
5848                 return;
5849             }
5850             qlt_24xx_retry_term_exchange(vha, rsp->qpair,
5851                 pkt, mcmd);
5852         } else {
5853             ql_dbg(ql_dbg_tgt, vha, 0xe063,
5854                 "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
5855                 vha->vp_idx, entry->compl_status,
5856                 entry->error_subcode1,
5857                 entry->error_subcode2);
5858             ha->tgt.tgt_ops->free_mcmd(mcmd);
5859         }
5860     } else if (mcmd) {
5861         ha->tgt.tgt_ops->free_mcmd(mcmd);
5862     }
5863 }
5864 
5865 /* ha->hardware_lock supposed to be held on entry */
5866 /* called via callback from qla2xxx */
5867 static void qlt_response_pkt(struct scsi_qla_host *vha,
5868     struct rsp_que *rsp, response_t *pkt)
5869 {
5870     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5871 
5872     if (unlikely(tgt == NULL)) {
5873         ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5874             "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
5875             vha->vp_idx, pkt->entry_type, vha->hw);
5876         return;
5877     }
5878 
5879     /*
5880      * In tgt_stop mode we also should allow all requests to pass.
5881      * Otherwise, some commands can stuck.
5882      */
5883 
5884     switch (pkt->entry_type) {
5885     case CTIO_CRC2:
5886     case CTIO_TYPE7:
5887     {
5888         struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
5889 
5890         qlt_do_ctio_completion(vha, rsp, entry->handle,
5891             le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5892             entry);
5893         break;
5894     }
5895 
5896     case ACCEPT_TGT_IO_TYPE:
5897     {
5898         struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
5899         int rc;
5900 
5901         if (atio->u.isp2x.status !=
5902             cpu_to_le16(ATIO_CDB_VALID)) {
5903             ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5904                 "qla_target(%d): ATIO with error "
5905                 "status %x received\n", vha->vp_idx,
5906                 le16_to_cpu(atio->u.isp2x.status));
5907             break;
5908         }
5909 
5910         rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
5911         if (rc != 0)
5912             return;
5913 
5914         rc = qlt_handle_cmd_for_atio(vha, atio);
5915         if (unlikely(rc != 0)) {
5916             switch (rc) {
5917             case -ENODEV:
5918                 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5919                     "qla_target: Unable to send command to target\n");
5920                 break;
5921             case -EBADF:
5922                 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5923                     "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
5924                 qlt_send_term_exchange(rsp->qpair, NULL,
5925                     atio, 1, 0);
5926                 break;
5927             case -EBUSY:
5928                 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5929                     "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5930                     vha->vp_idx);
5931                 qlt_send_busy(rsp->qpair, atio,
5932                     tc_sam_status);
5933                 break;
5934             default:
5935                 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5936                     "qla_target(%d): Unable to send command to target, sending BUSY status\n",
5937                     vha->vp_idx);
5938                 qlt_send_busy(rsp->qpair, atio,
5939                     qla_sam_status);
5940                 break;
5941             }
5942         }
5943     }
5944     break;
5945 
5946     case CONTINUE_TGT_IO_TYPE:
5947     {
5948         struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5949 
5950         qlt_do_ctio_completion(vha, rsp, entry->handle,
5951             le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5952             entry);
5953         break;
5954     }
5955 
5956     case CTIO_A64_TYPE:
5957     {
5958         struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
5959 
5960         qlt_do_ctio_completion(vha, rsp, entry->handle,
5961             le16_to_cpu(entry->status)|(pkt->entry_status << 16),
5962             entry);
5963         break;
5964     }
5965 
5966     case IMMED_NOTIFY_TYPE:
5967         ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5968         qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5969         break;
5970 
5971     case NOTIFY_ACK_TYPE:
5972         if (tgt->notify_ack_expected > 0) {
5973             struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
5974 
5975             ql_dbg(ql_dbg_tgt, vha, 0xe036,
5976                 "NOTIFY_ACK seq %08x status %x\n",
5977                 le16_to_cpu(entry->u.isp2x.seq_id),
5978                 le16_to_cpu(entry->u.isp2x.status));
5979             tgt->notify_ack_expected--;
5980             if (entry->u.isp2x.status !=
5981                 cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5982                 ql_dbg(ql_dbg_tgt, vha, 0xe061,
5983                     "qla_target(%d): NOTIFY_ACK "
5984                     "failed %x\n", vha->vp_idx,
5985                     le16_to_cpu(entry->u.isp2x.status));
5986             }
5987         } else {
5988             ql_dbg(ql_dbg_tgt, vha, 0xe062,
5989                 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
5990                 vha->vp_idx);
5991         }
5992         break;
5993 
5994     case ABTS_RECV_24XX:
5995         ql_dbg(ql_dbg_tgt, vha, 0xe037,
5996             "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5997         qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5998         break;
5999 
6000     case ABTS_RESP_24XX:
6001         if (tgt->abts_resp_expected > 0) {
6002             qlt_handle_abts_completion(vha, rsp, pkt);
6003         } else {
6004             ql_dbg(ql_dbg_tgt, vha, 0xe064,
6005                 "qla_target(%d): Unexpected ABTS_RESP_24XX "
6006                 "received\n", vha->vp_idx);
6007         }
6008         break;
6009 
6010     default:
6011         ql_dbg(ql_dbg_tgt, vha, 0xe065,
6012             "qla_target(%d): Received unknown response pkt "
6013             "type %x\n", vha->vp_idx, pkt->entry_type);
6014         break;
6015     }
6016 
6017 }
6018 
6019 /*
6020  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
6021  */
6022 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
6023     uint16_t *mailbox)
6024 {
6025     struct qla_hw_data *ha = vha->hw;
6026     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6027     int login_code;
6028 
6029     if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
6030         return;
6031 
6032     if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
6033         IS_QLA2100(ha))
6034         return;
6035     /*
6036      * In tgt_stop mode we also should allow all requests to pass.
6037      * Otherwise, some commands can stuck.
6038      */
6039 
6040 
6041     switch (code) {
6042     case MBA_RESET:         /* Reset */
6043     case MBA_SYSTEM_ERR:        /* System Error */
6044     case MBA_REQ_TRANSFER_ERR:  /* Request Transfer Error */
6045     case MBA_RSP_TRANSFER_ERR:  /* Response Transfer Error */
6046         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
6047             "qla_target(%d): System error async event %#x "
6048             "occurred", vha->vp_idx, code);
6049         break;
6050     case MBA_WAKEUP_THRES:      /* Request Queue Wake-up. */
6051         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6052         break;
6053 
6054     case MBA_LOOP_UP:
6055     {
6056         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
6057             "qla_target(%d): Async LOOP_UP occurred "
6058             "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
6059             mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
6060         if (tgt->link_reinit_iocb_pending) {
6061             qlt_send_notify_ack(ha->base_qpair,
6062                 &tgt->link_reinit_iocb,
6063                 0, 0, 0, 0, 0, 0);
6064             tgt->link_reinit_iocb_pending = 0;
6065         }
6066         break;
6067     }
6068 
6069     case MBA_LIP_OCCURRED:
6070     case MBA_LOOP_DOWN:
6071     case MBA_LIP_RESET:
6072     case MBA_RSCN_UPDATE:
6073         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
6074             "qla_target(%d): Async event %#x occurred "
6075             "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
6076             mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
6077         break;
6078 
6079     case MBA_REJECTED_FCP_CMD:
6080         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
6081             "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
6082             vha->vp_idx,
6083             mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
6084 
6085         if (mailbox[3] == 1) {
6086             /* exchange starvation. */
6087             vha->hw->exch_starvation++;
6088             if (vha->hw->exch_starvation > 5) {
6089                 ql_log(ql_log_warn, vha, 0xd03a,
6090                     "Exchange starvation-. Resetting RISC\n");
6091 
6092                 vha->hw->exch_starvation = 0;
6093                 if (IS_P3P_TYPE(vha->hw))
6094                     set_bit(FCOE_CTX_RESET_NEEDED,
6095                         &vha->dpc_flags);
6096                 else
6097                     set_bit(ISP_ABORT_NEEDED,
6098                         &vha->dpc_flags);
6099                 qla2xxx_wake_dpc(vha);
6100             }
6101         }
6102         break;
6103 
6104     case MBA_PORT_UPDATE:
6105         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
6106             "qla_target(%d): Port update async event %#x "
6107             "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
6108             "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
6109             mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
6110 
6111         login_code = mailbox[2];
6112         if (login_code == 0x4) {
6113             ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
6114                 "Async MB 2: Got PLOGI Complete\n");
6115             vha->hw->exch_starvation = 0;
6116         } else if (login_code == 0x7)
6117             ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
6118                 "Async MB 2: Port Logged Out\n");
6119         break;
6120     default:
6121         break;
6122     }
6123 
6124 }
6125 
6126 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
6127     uint16_t loop_id)
6128 {
6129     fc_port_t *fcport, *tfcp, *del;
6130     int rc;
6131     unsigned long flags;
6132     u8 newfcport = 0;
6133 
6134     fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6135     if (!fcport) {
6136         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
6137             "qla_target(%d): Allocation of tmp FC port failed",
6138             vha->vp_idx);
6139         return NULL;
6140     }
6141 
6142     fcport->loop_id = loop_id;
6143 
6144     rc = qla24xx_gpdb_wait(vha, fcport, 0);
6145     if (rc != QLA_SUCCESS) {
6146         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
6147             "qla_target(%d): Failed to retrieve fcport "
6148             "information -- get_port_database() returned %x "
6149             "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
6150         kfree(fcport);
6151         return NULL;
6152     }
6153 
6154     del = NULL;
6155     spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6156     tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
6157 
6158     if (tfcp) {
6159         tfcp->d_id = fcport->d_id;
6160         tfcp->port_type = fcport->port_type;
6161         tfcp->supported_classes = fcport->supported_classes;
6162         tfcp->flags |= fcport->flags;
6163         tfcp->scan_state = QLA_FCPORT_FOUND;
6164 
6165         del = fcport;
6166         fcport = tfcp;
6167     } else {
6168         if (vha->hw->current_topology == ISP_CFG_F)
6169             fcport->flags |= FCF_FABRIC_DEVICE;
6170 
6171         list_add_tail(&fcport->list, &vha->vp_fcports);
6172         if (!IS_SW_RESV_ADDR(fcport->d_id))
6173            vha->fcport_count++;
6174         fcport->login_gen++;
6175         qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
6176         fcport->login_succ = 1;
6177         newfcport = 1;
6178     }
6179 
6180     fcport->deleted = 0;
6181     spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6182 
6183     switch (vha->host->active_mode) {
6184     case MODE_INITIATOR:
6185     case MODE_DUAL:
6186         if (newfcport) {
6187             if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
6188                 qla24xx_sched_upd_fcport(fcport);
6189             } else {
6190                 ql_dbg(ql_dbg_disc, vha, 0x20ff,
6191                    "%s %d %8phC post gpsc fcp_cnt %d\n",
6192                    __func__, __LINE__, fcport->port_name, vha->fcport_count);
6193                 qla24xx_post_gpsc_work(vha, fcport);
6194             }
6195         }
6196         break;
6197 
6198     case MODE_TARGET:
6199     default:
6200         break;
6201     }
6202     if (del)
6203         qla2x00_free_fcport(del);
6204 
6205     return fcport;
6206 }
6207 
6208 /* Must be called under tgt_mutex */
6209 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
6210                        be_id_t s_id)
6211 {
6212     struct fc_port *sess = NULL;
6213     fc_port_t *fcport = NULL;
6214     int rc, global_resets;
6215     uint16_t loop_id = 0;
6216 
6217     if (s_id.domain == 0xFF && s_id.area == 0xFC) {
6218         /*
6219          * This is Domain Controller, so it should be
6220          * OK to drop SCSI commands from it.
6221          */
6222         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
6223             "Unable to find initiator with S_ID %x:%x:%x",
6224             s_id.domain, s_id.area, s_id.al_pa);
6225         return NULL;
6226     }
6227 
6228     mutex_lock(&vha->vha_tgt.tgt_mutex);
6229 
6230 retry:
6231     global_resets =
6232         atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
6233 
6234     rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
6235     if (rc != 0) {
6236         mutex_unlock(&vha->vha_tgt.tgt_mutex);
6237 
6238         ql_log(ql_log_info, vha, 0xf071,
6239             "qla_target(%d): Unable to find "
6240             "initiator with S_ID %x:%x:%x",
6241             vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa);
6242 
6243         if (rc == -ENOENT) {
6244             qlt_port_logo_t logo;
6245 
6246             logo.id = be_to_port_id(s_id);
6247             logo.cmd_count = 1;
6248             qlt_send_first_logo(vha, &logo);
6249         }
6250 
6251         return NULL;
6252     }
6253 
6254     fcport = qlt_get_port_database(vha, loop_id);
6255     if (!fcport) {
6256         mutex_unlock(&vha->vha_tgt.tgt_mutex);
6257         return NULL;
6258     }
6259 
6260     if (global_resets !=
6261         atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
6262         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
6263             "qla_target(%d): global reset during session discovery "
6264             "(counter was %d, new %d), retrying", vha->vp_idx,
6265             global_resets,
6266             atomic_read(&vha->vha_tgt.
6267             qla_tgt->tgt_global_resets_count));
6268         goto retry;
6269     }
6270 
6271     sess = qlt_create_sess(vha, fcport, true);
6272 
6273     mutex_unlock(&vha->vha_tgt.tgt_mutex);
6274 
6275     return sess;
6276 }
6277 
6278 static void qlt_abort_work(struct qla_tgt *tgt,
6279     struct qla_tgt_sess_work_param *prm)
6280 {
6281     struct scsi_qla_host *vha = tgt->vha;
6282     struct qla_hw_data *ha = vha->hw;
6283     struct fc_port *sess = NULL;
6284     unsigned long flags = 0, flags2 = 0;
6285     be_id_t s_id;
6286     int rc;
6287 
6288     spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6289 
6290     if (tgt->tgt_stop)
6291         goto out_term2;
6292 
6293     s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id);
6294 
6295     sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6296     if (!sess) {
6297         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6298 
6299         sess = qlt_make_local_sess(vha, s_id);
6300         /* sess has got an extra creation ref */
6301 
6302         spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
6303         if (!sess)
6304             goto out_term2;
6305     } else {
6306         if (sess->deleted) {
6307             sess = NULL;
6308             goto out_term2;
6309         }
6310 
6311         if (!kref_get_unless_zero(&sess->sess_kref)) {
6312             ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
6313                 "%s: kref_get fail %8phC \n",
6314                  __func__, sess->port_name);
6315             sess = NULL;
6316             goto out_term2;
6317         }
6318     }
6319 
6320     rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
6321     spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6322 
6323     ha->tgt.tgt_ops->put_sess(sess);
6324 
6325     if (rc != 0)
6326         goto out_term;
6327     return;
6328 
6329 out_term2:
6330     spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6331 
6332 out_term:
6333     spin_lock_irqsave(&ha->hardware_lock, flags);
6334     qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
6335         FCP_TMF_REJECTED, false);
6336     spin_unlock_irqrestore(&ha->hardware_lock, flags);
6337 }
6338 
6339 static void qlt_tmr_work(struct qla_tgt *tgt,
6340     struct qla_tgt_sess_work_param *prm)
6341 {
6342     struct atio_from_isp *a = &prm->tm_iocb2;
6343     struct scsi_qla_host *vha = tgt->vha;
6344     struct qla_hw_data *ha = vha->hw;
6345     struct fc_port *sess;
6346     unsigned long flags;
6347     be_id_t s_id;
6348     int rc;
6349     u64 unpacked_lun;
6350     int fn;
6351     void *iocb;
6352 
6353     spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6354 
6355     if (tgt->tgt_stop)
6356         goto out_term2;
6357 
6358     s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
6359     sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6360     if (!sess) {
6361         spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6362 
6363         sess = qlt_make_local_sess(vha, s_id);
6364         /* sess has got an extra creation ref */
6365 
6366         spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6367         if (!sess)
6368             goto out_term2;
6369     } else {
6370         if (sess->deleted) {
6371             goto out_term2;
6372         }
6373 
6374         if (!kref_get_unless_zero(&sess->sess_kref)) {
6375             ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
6376                 "%s: kref_get fail %8phC\n",
6377                  __func__, sess->port_name);
6378             goto out_term2;
6379         }
6380     }
6381 
6382     iocb = a;
6383     fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
6384     unpacked_lun =
6385         scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
6386 
6387     rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
6388     spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6389 
6390     ha->tgt.tgt_ops->put_sess(sess);
6391 
6392     if (rc != 0)
6393         goto out_term;
6394     return;
6395 
6396 out_term2:
6397     spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6398 out_term:
6399     qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
6400 }
6401 
6402 static void qlt_sess_work_fn(struct work_struct *work)
6403 {
6404     struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
6405     struct scsi_qla_host *vha = tgt->vha;
6406     unsigned long flags;
6407 
6408     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
6409 
6410     spin_lock_irqsave(&tgt->sess_work_lock, flags);
6411     while (!list_empty(&tgt->sess_works_list)) {
6412         struct qla_tgt_sess_work_param *prm = list_entry(
6413             tgt->sess_works_list.next, typeof(*prm),
6414             sess_works_list_entry);
6415 
6416         /*
6417          * This work can be scheduled on several CPUs at time, so we
6418          * must delete the entry to eliminate double processing
6419          */
6420         list_del(&prm->sess_works_list_entry);
6421 
6422         spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6423 
6424         switch (prm->type) {
6425         case QLA_TGT_SESS_WORK_ABORT:
6426             qlt_abort_work(tgt, prm);
6427             break;
6428         case QLA_TGT_SESS_WORK_TM:
6429             qlt_tmr_work(tgt, prm);
6430             break;
6431         default:
6432             BUG_ON(1);
6433             break;
6434         }
6435 
6436         spin_lock_irqsave(&tgt->sess_work_lock, flags);
6437 
6438         kfree(prm);
6439     }
6440     spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
6441 }
6442 
6443 /* Must be called under tgt_host_action_mutex */
6444 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
6445 {
6446     struct qla_tgt *tgt;
6447     int rc, i;
6448     struct qla_qpair_hint *h;
6449 
6450     if (!QLA_TGT_MODE_ENABLED())
6451         return 0;
6452 
6453     if (!IS_TGT_MODE_CAPABLE(ha)) {
6454         ql_log(ql_log_warn, base_vha, 0xe070,
6455             "This adapter does not support target mode.\n");
6456         return 0;
6457     }
6458 
6459     ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
6460         "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
6461 
6462     BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
6463 
6464     tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
6465     if (!tgt) {
6466         ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
6467             "Unable to allocate struct qla_tgt\n");
6468         return -ENOMEM;
6469     }
6470 
6471     tgt->qphints = kcalloc(ha->max_qpairs + 1,
6472                    sizeof(struct qla_qpair_hint),
6473                    GFP_KERNEL);
6474     if (!tgt->qphints) {
6475         kfree(tgt);
6476         ql_log(ql_log_warn, base_vha, 0x0197,
6477             "Unable to allocate qpair hints.\n");
6478         return -ENOMEM;
6479     }
6480 
6481     if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
6482         base_vha->host->hostt->supported_mode |= MODE_TARGET;
6483 
6484     rc = btree_init64(&tgt->lun_qpair_map);
6485     if (rc) {
6486         kfree(tgt->qphints);
6487         kfree(tgt);
6488         ql_log(ql_log_info, base_vha, 0x0198,
6489             "Unable to initialize lun_qpair_map btree\n");
6490         return -EIO;
6491     }
6492     h = &tgt->qphints[0];
6493     h->qpair = ha->base_qpair;
6494     INIT_LIST_HEAD(&h->hint_elem);
6495     h->cpuid = ha->base_qpair->cpuid;
6496     list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);
6497 
6498     for (i = 0; i < ha->max_qpairs; i++) {
6499         unsigned long flags;
6500 
6501         struct qla_qpair *qpair = ha->queue_pair_map[i];
6502 
6503         h = &tgt->qphints[i + 1];
6504         INIT_LIST_HEAD(&h->hint_elem);
6505         if (qpair) {
6506             h->qpair = qpair;
6507             spin_lock_irqsave(qpair->qp_lock_ptr, flags);
6508             list_add_tail(&h->hint_elem, &qpair->hints_list);
6509             spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
6510             h->cpuid = qpair->cpuid;
6511         }
6512     }
6513 
6514     tgt->ha = ha;
6515     tgt->vha = base_vha;
6516     init_waitqueue_head(&tgt->waitQ);
6517     INIT_LIST_HEAD(&tgt->del_sess_list);
6518     spin_lock_init(&tgt->sess_work_lock);
6519     INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
6520     INIT_LIST_HEAD(&tgt->sess_works_list);
6521     atomic_set(&tgt->tgt_global_resets_count, 0);
6522 
6523     base_vha->vha_tgt.qla_tgt = tgt;
6524 
6525     ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
6526         "qla_target(%d): using 64 Bit PCI addressing",
6527         base_vha->vp_idx);
6528     /* 3 is reserved */
6529     tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
6530 
6531     mutex_lock(&qla_tgt_mutex);
6532     list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
6533     mutex_unlock(&qla_tgt_mutex);
6534 
6535     if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
6536         ha->tgt.tgt_ops->add_target(base_vha);
6537 
6538     return 0;
6539 }
6540 
6541 /* Must be called under tgt_host_action_mutex */
6542 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
6543 {
6544     if (!vha->vha_tgt.qla_tgt)
6545         return 0;
6546 
6547     if (vha->fc_vport) {
6548         qlt_release(vha->vha_tgt.qla_tgt);
6549         return 0;
6550     }
6551 
6552     /* free left over qfull cmds */
6553     qlt_init_term_exchange(vha);
6554 
6555     ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
6556         vha->host_no, ha);
6557     qlt_release(vha->vha_tgt.qla_tgt);
6558 
6559     return 0;
6560 }
6561 
6562 void qla_remove_hostmap(struct qla_hw_data *ha)
6563 {
6564     struct scsi_qla_host *node;
6565     u32 key = 0;
6566 
6567     btree_for_each_safe32(&ha->host_map, key, node)
6568         btree_remove32(&ha->host_map, key);
6569 
6570     btree_destroy32(&ha->host_map);
6571 }
6572 
6573 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6574     unsigned char *b)
6575 {
6576     pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name);
6577     pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name);
6578     put_unaligned_be64(wwpn, b);
6579     pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b);
6580 }
6581 
6582 /**
6583  * qlt_lport_register - register lport with external module
6584  *
6585  * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
6586  * @phys_wwpn: physical port WWPN
6587  * @npiv_wwpn: NPIV WWPN
6588  * @npiv_wwnn: NPIV WWNN
6589  * @callback:  lport initialization callback for tcm_qla2xxx code
6590  */
6591 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
6592                u64 npiv_wwpn, u64 npiv_wwnn,
6593                int (*callback)(struct scsi_qla_host *, void *, u64, u64))
6594 {
6595     struct qla_tgt *tgt;
6596     struct scsi_qla_host *vha;
6597     struct qla_hw_data *ha;
6598     struct Scsi_Host *host;
6599     unsigned long flags;
6600     int rc;
6601     u8 b[WWN_SIZE];
6602 
6603     mutex_lock(&qla_tgt_mutex);
6604     list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
6605         vha = tgt->vha;
6606         ha = vha->hw;
6607 
6608         host = vha->host;
6609         if (!host)
6610             continue;
6611 
6612         if (!(host->hostt->supported_mode & MODE_TARGET))
6613             continue;
6614 
6615         if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6616             continue;
6617 
6618         spin_lock_irqsave(&ha->hardware_lock, flags);
6619         if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
6620             pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
6621                 host->host_no);
6622             spin_unlock_irqrestore(&ha->hardware_lock, flags);
6623             continue;
6624         }
6625         if (tgt->tgt_stop) {
6626             pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
6627                  host->host_no);
6628             spin_unlock_irqrestore(&ha->hardware_lock, flags);
6629             continue;
6630         }
6631         spin_unlock_irqrestore(&ha->hardware_lock, flags);
6632 
6633         if (!scsi_host_get(host)) {
6634             ql_dbg(ql_dbg_tgt, vha, 0xe068,
6635                 "Unable to scsi_host_get() for"
6636                 " qla2xxx scsi_host\n");
6637             continue;
6638         }
6639         qlt_lport_dump(vha, phys_wwpn, b);
6640 
6641         if (memcmp(vha->port_name, b, WWN_SIZE)) {
6642             scsi_host_put(host);
6643             continue;
6644         }
6645         rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
6646         if (rc != 0)
6647             scsi_host_put(host);
6648 
6649         mutex_unlock(&qla_tgt_mutex);
6650         return rc;
6651     }
6652     mutex_unlock(&qla_tgt_mutex);
6653 
6654     return -ENODEV;
6655 }
6656 EXPORT_SYMBOL(qlt_lport_register);
6657 
6658 /**
6659  * qlt_lport_deregister - Degister lport
6660  *
6661  * @vha:  Registered scsi_qla_host pointer
6662  */
6663 void qlt_lport_deregister(struct scsi_qla_host *vha)
6664 {
6665     struct qla_hw_data *ha = vha->hw;
6666     struct Scsi_Host *sh = vha->host;
6667     /*
6668      * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
6669      */
6670     vha->vha_tgt.target_lport_ptr = NULL;
6671     ha->tgt.tgt_ops = NULL;
6672     /*
6673      * Release the Scsi_Host reference for the underlying qla2xxx host
6674      */
6675     scsi_host_put(sh);
6676 }
6677 EXPORT_SYMBOL(qlt_lport_deregister);
6678 
6679 /* Must be called under HW lock */
6680 void qlt_set_mode(struct scsi_qla_host *vha)
6681 {
6682     switch (vha->qlini_mode) {
6683     case QLA2XXX_INI_MODE_DISABLED:
6684     case QLA2XXX_INI_MODE_EXCLUSIVE:
6685         vha->host->active_mode = MODE_TARGET;
6686         break;
6687     case QLA2XXX_INI_MODE_ENABLED:
6688         vha->host->active_mode = MODE_INITIATOR;
6689         break;
6690     case QLA2XXX_INI_MODE_DUAL:
6691         vha->host->active_mode = MODE_DUAL;
6692         break;
6693     default:
6694         break;
6695     }
6696 }
6697 
6698 /* Must be called under HW lock */
6699 static void qlt_clear_mode(struct scsi_qla_host *vha)
6700 {
6701     switch (vha->qlini_mode) {
6702     case QLA2XXX_INI_MODE_DISABLED:
6703         vha->host->active_mode = MODE_UNKNOWN;
6704         break;
6705     case QLA2XXX_INI_MODE_EXCLUSIVE:
6706         vha->host->active_mode = MODE_INITIATOR;
6707         break;
6708     case QLA2XXX_INI_MODE_ENABLED:
6709     case QLA2XXX_INI_MODE_DUAL:
6710         vha->host->active_mode = MODE_INITIATOR;
6711         break;
6712     default:
6713         break;
6714     }
6715 }
6716 
6717 /*
6718  * qla_tgt_enable_vha - NO LOCK HELD
6719  *
6720  * host_reset, bring up w/ Target Mode Enabled
6721  */
6722 void
6723 qlt_enable_vha(struct scsi_qla_host *vha)
6724 {
6725     struct qla_hw_data *ha = vha->hw;
6726     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6727     unsigned long flags;
6728     scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
6729 
6730     if (!tgt) {
6731         ql_dbg(ql_dbg_tgt, vha, 0xe069,
6732             "Unable to locate qla_tgt pointer from"
6733             " struct qla_hw_data\n");
6734         dump_stack();
6735         return;
6736     }
6737     if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6738         return;
6739 
6740     if (ha->tgt.num_act_qpairs > ha->max_qpairs)
6741         ha->tgt.num_act_qpairs = ha->max_qpairs;
6742     spin_lock_irqsave(&ha->hardware_lock, flags);
6743     tgt->tgt_stopped = 0;
6744     qlt_set_mode(vha);
6745     spin_unlock_irqrestore(&ha->hardware_lock, flags);
6746 
6747     mutex_lock(&ha->optrom_mutex);
6748     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
6749         "%s.\n", __func__);
6750     if (vha->vp_idx) {
6751         qla24xx_disable_vp(vha);
6752         qla24xx_enable_vp(vha);
6753     } else {
6754         set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
6755         qla2xxx_wake_dpc(base_vha);
6756         WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) !=
6757                  QLA_SUCCESS);
6758     }
6759     mutex_unlock(&ha->optrom_mutex);
6760 }
6761 EXPORT_SYMBOL(qlt_enable_vha);
6762 
6763 /*
6764  * qla_tgt_disable_vha - NO LOCK HELD
6765  *
6766  * Disable Target Mode and reset the adapter
6767  */
6768 static void qlt_disable_vha(struct scsi_qla_host *vha)
6769 {
6770     struct qla_hw_data *ha = vha->hw;
6771     struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6772     unsigned long flags;
6773 
6774     if (!tgt) {
6775         ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6776             "Unable to locate qla_tgt pointer from"
6777             " struct qla_hw_data\n");
6778         dump_stack();
6779         return;
6780     }
6781 
6782     spin_lock_irqsave(&ha->hardware_lock, flags);
6783     qlt_clear_mode(vha);
6784     spin_unlock_irqrestore(&ha->hardware_lock, flags);
6785 
6786     set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6787     qla2xxx_wake_dpc(vha);
6788 
6789     /*
6790      * We are expecting the offline state.
6791      * QLA_FUNCTION_FAILED means that adapter is offline.
6792      */
6793     if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
6794         ql_dbg(ql_dbg_tgt, vha, 0xe081,
6795                "adapter is offline\n");
6796 }
6797 
6798 /*
6799  * Called from qla_init.c:qla24xx_vport_create() contex to setup
6800  * the target mode specific struct scsi_qla_host and struct qla_hw_data
6801  * members.
6802  */
6803 void
6804 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6805 {
6806     vha->vha_tgt.qla_tgt = NULL;
6807 
6808     mutex_init(&vha->vha_tgt.tgt_mutex);
6809     mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6810 
6811     qlt_clear_mode(vha);
6812 
6813     /*
6814      * NOTE: Currently the value is kept the same for <24xx and
6815      * >=24xx ISPs. If it is necessary to change it,
6816      * the check should be added for specific ISPs,
6817      * assigning the value appropriately.
6818      */
6819     ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
6820 
6821     qlt_add_target(ha, vha);
6822 }
6823 
6824 u8
6825 qlt_rff_id(struct scsi_qla_host *vha)
6826 {
6827     u8 fc4_feature = 0;
6828     /*
6829      * FC-4 Feature bit 0 indicates target functionality to the name server.
6830      */
6831     if (qla_tgt_mode_enabled(vha)) {
6832         fc4_feature = BIT_0;
6833     } else if (qla_ini_mode_enabled(vha)) {
6834         fc4_feature = BIT_1;
6835     } else if (qla_dual_mode_enabled(vha))
6836         fc4_feature = BIT_0 | BIT_1;
6837 
6838     return fc4_feature;
6839 }
6840 
6841 /*
6842  * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
6843  * @ha: HA context
6844  *
6845  * Beginning of ATIO ring has initialization control block already built
6846  * by nvram config routine.
6847  *
6848  * Returns 0 on success.
6849  */
6850 void
6851 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6852 {
6853     struct qla_hw_data *ha = vha->hw;
6854     uint16_t cnt;
6855     struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
6856 
6857     if (qla_ini_mode_enabled(vha))
6858         return;
6859 
6860     for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
6861         pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
6862         pkt++;
6863     }
6864 
6865 }
6866 
6867 /*
6868  * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
6869  * @ha: SCSI driver HA context
6870  */
6871 void
6872 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6873 {
6874     struct qla_hw_data *ha = vha->hw;
6875     struct atio_from_isp *pkt;
6876     int cnt, i;
6877 
6878     if (!ha->flags.fw_started)
6879         return;
6880 
6881     while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
6882         fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
6883         pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6884         cnt = pkt->u.raw.entry_count;
6885 
6886         if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
6887             /*
6888              * This packet is corrupted. The header + payload
6889              * can not be trusted. There is no point in passing
6890              * it further up.
6891              */
6892             ql_log(ql_log_warn, vha, 0xd03c,
6893                 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
6894                 &pkt->u.isp24.fcp_hdr.s_id,
6895                 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
6896                 pkt->u.isp24.exchange_addr, pkt);
6897 
6898             adjust_corrupted_atio(pkt);
6899             qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
6900                 ha_locked, 0);
6901         } else {
6902             qlt_24xx_atio_pkt_all_vps(vha,
6903                 (struct atio_from_isp *)pkt, ha_locked);
6904         }
6905 
6906         for (i = 0; i < cnt; i++) {
6907             ha->tgt.atio_ring_index++;
6908             if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
6909                 ha->tgt.atio_ring_index = 0;
6910                 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
6911             } else
6912                 ha->tgt.atio_ring_ptr++;
6913 
6914             pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
6915             pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
6916         }
6917         wmb();
6918     }
6919 
6920     /* Adjust ring index */
6921     wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6922 }
6923 
6924 void
6925 qlt_24xx_config_rings(struct scsi_qla_host *vha)
6926 {
6927     struct qla_hw_data *ha = vha->hw;
6928     struct qla_msix_entry *msix = &ha->msix_entries[2];
6929     struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
6930 
6931     if (!QLA_TGT_MODE_ENABLED())
6932         return;
6933 
6934     wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0);
6935     wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0);
6936     rd_reg_dword(ISP_ATIO_Q_OUT(vha));
6937 
6938     if (ha->flags.msix_enabled) {
6939         if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
6940             icb->msix_atio = cpu_to_le16(msix->entry);
6941             icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
6942             ql_dbg(ql_dbg_init, vha, 0xf072,
6943                 "Registering ICB vector 0x%x for atio que.\n",
6944                 msix->entry);
6945         }
6946     } else {
6947         /* INTx|MSI */
6948         if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
6949             icb->msix_atio = 0;
6950             icb->firmware_options_2 |= cpu_to_le32(BIT_26);
6951             ql_dbg(ql_dbg_init, vha, 0xf072,
6952                 "%s: Use INTx for ATIOQ.\n", __func__);
6953         }
6954     }
6955 }
6956 
6957 void
6958 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6959 {
6960     struct qla_hw_data *ha = vha->hw;
6961     u32 tmp;
6962 
6963     if (!QLA_TGT_MODE_ENABLED())
6964         return;
6965 
6966     if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6967         if (!ha->tgt.saved_set) {
6968             /* We save only once */
6969             ha->tgt.saved_exchange_count = nv->exchange_count;
6970             ha->tgt.saved_firmware_options_1 =
6971                 nv->firmware_options_1;
6972             ha->tgt.saved_firmware_options_2 =
6973                 nv->firmware_options_2;
6974             ha->tgt.saved_firmware_options_3 =
6975                 nv->firmware_options_3;
6976             ha->tgt.saved_set = 1;
6977         }
6978 
6979         if (qla_tgt_mode_enabled(vha))
6980             nv->exchange_count = cpu_to_le16(0xFFFF);
6981         else            /* dual */
6982             nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6983 
6984         /* Enable target mode */
6985         nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6986 
6987         /* Disable ini mode, if requested */
6988         if (qla_tgt_mode_enabled(vha))
6989             nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6990 
6991         /* Disable Full Login after LIP */
6992         nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6993         /* Enable initial LIP */
6994         nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6995         if (ql2xtgt_tape_enable)
6996             /* Enable FC Tape support */
6997             nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6998         else
6999             /* Disable FC Tape support */
7000             nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
7001 
7002         /* Disable Full Login after LIP */
7003         nv->host_p &= cpu_to_le32(~BIT_10);
7004 
7005         /*
7006          * clear BIT 15 explicitly as we have seen at least
7007          * a couple of instances where this was set and this
7008          * was causing the firmware to not be initialized.
7009          */
7010         nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
7011         /* Enable target PRLI control */
7012         nv->firmware_options_2 |= cpu_to_le32(BIT_14);
7013 
7014         if (IS_QLA25XX(ha)) {
7015             /* Change Loop-prefer to Pt-Pt */
7016             tmp = ~(BIT_4|BIT_5|BIT_6);
7017             nv->firmware_options_2 &= cpu_to_le32(tmp);
7018             tmp = P2P << 4;
7019             nv->firmware_options_2 |= cpu_to_le32(tmp);
7020         }
7021     } else {
7022         if (ha->tgt.saved_set) {
7023             nv->exchange_count = ha->tgt.saved_exchange_count;
7024             nv->firmware_options_1 =
7025                 ha->tgt.saved_firmware_options_1;
7026             nv->firmware_options_2 =
7027                 ha->tgt.saved_firmware_options_2;
7028             nv->firmware_options_3 =
7029                 ha->tgt.saved_firmware_options_3;
7030         }
7031         return;
7032     }
7033 
7034     if (ha->base_qpair->enable_class_2) {
7035         if (vha->flags.init_done)
7036             fc_host_supported_classes(vha->host) =
7037                 FC_COS_CLASS2 | FC_COS_CLASS3;
7038 
7039         nv->firmware_options_2 |= cpu_to_le32(BIT_8);
7040     } else {
7041         if (vha->flags.init_done)
7042             fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
7043 
7044         nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
7045     }
7046 }
7047 
7048 void
7049 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
7050     struct init_cb_24xx *icb)
7051 {
7052     struct qla_hw_data *ha = vha->hw;
7053 
7054     if (!QLA_TGT_MODE_ENABLED())
7055         return;
7056 
7057     if (ha->tgt.node_name_set) {
7058         memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
7059         icb->firmware_options_1 |= cpu_to_le32(BIT_14);
7060     }
7061 }
7062 
7063 void
7064 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
7065 {
7066     struct qla_hw_data *ha = vha->hw;
7067     u32 tmp;
7068 
7069     if (!QLA_TGT_MODE_ENABLED())
7070         return;
7071 
7072     if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
7073         if (!ha->tgt.saved_set) {
7074             /* We save only once */
7075             ha->tgt.saved_exchange_count = nv->exchange_count;
7076             ha->tgt.saved_firmware_options_1 =
7077                 nv->firmware_options_1;
7078             ha->tgt.saved_firmware_options_2 =
7079                 nv->firmware_options_2;
7080             ha->tgt.saved_firmware_options_3 =
7081                 nv->firmware_options_3;
7082             ha->tgt.saved_set = 1;
7083         }
7084 
7085         if (qla_tgt_mode_enabled(vha))
7086             nv->exchange_count = cpu_to_le16(0xFFFF);
7087         else            /* dual */
7088             nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
7089 
7090         /* Enable target mode */
7091         nv->firmware_options_1 |= cpu_to_le32(BIT_4);
7092 
7093         /* Disable ini mode, if requested */
7094         if (qla_tgt_mode_enabled(vha))
7095             nv->firmware_options_1 |= cpu_to_le32(BIT_5);
7096         /* Disable Full Login after LIP */
7097         nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
7098         /* Enable initial LIP */
7099         nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
7100         /*
7101          * clear BIT 15 explicitly as we have seen at
7102          * least a couple of instances where this was set
7103          * and this was causing the firmware to not be
7104          * initialized.
7105          */
7106         nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
7107         if (ql2xtgt_tape_enable)
7108             /* Enable FC tape support */
7109             nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7110         else
7111             /* Disable FC tape support */
7112             nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
7113 
7114         /* Disable Full Login after LIP */
7115         nv->host_p &= cpu_to_le32(~BIT_10);
7116         /* Enable target PRLI control */
7117         nv->firmware_options_2 |= cpu_to_le32(BIT_14);
7118 
7119         /* Change Loop-prefer to Pt-Pt */
7120         tmp = ~(BIT_4|BIT_5|BIT_6);
7121         nv->firmware_options_2 &= cpu_to_le32(tmp);
7122         tmp = P2P << 4;
7123         nv->firmware_options_2 |= cpu_to_le32(tmp);
7124     } else {
7125         if (ha->tgt.saved_set) {
7126             nv->exchange_count = ha->tgt.saved_exchange_count;
7127             nv->firmware_options_1 =
7128                 ha->tgt.saved_firmware_options_1;
7129             nv->firmware_options_2 =
7130                 ha->tgt.saved_firmware_options_2;
7131             nv->firmware_options_3 =
7132                 ha->tgt.saved_firmware_options_3;
7133         }
7134         return;
7135     }
7136 
7137     if (ha->base_qpair->enable_class_2) {
7138         if (vha->flags.init_done)
7139             fc_host_supported_classes(vha->host) =
7140                 FC_COS_CLASS2 | FC_COS_CLASS3;
7141 
7142         nv->firmware_options_2 |= cpu_to_le32(BIT_8);
7143     } else {
7144         if (vha->flags.init_done)
7145             fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
7146 
7147         nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
7148     }
7149 }
7150 
7151 void
7152 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
7153     struct init_cb_81xx *icb)
7154 {
7155     struct qla_hw_data *ha = vha->hw;
7156 
7157     if (!QLA_TGT_MODE_ENABLED())
7158         return;
7159 
7160     if (ha->tgt.node_name_set) {
7161         memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
7162         icb->firmware_options_1 |= cpu_to_le32(BIT_14);
7163     }
7164 }
7165 
7166 void
7167 qlt_83xx_iospace_config(struct qla_hw_data *ha)
7168 {
7169     if (!QLA_TGT_MODE_ENABLED())
7170         return;
7171 
7172     ha->msix_count += 1; /* For ATIO Q */
7173 }
7174 
7175 
7176 void
7177 qlt_modify_vp_config(struct scsi_qla_host *vha,
7178     struct vp_config_entry_24xx *vpmod)
7179 {
7180     /* enable target mode.  Bit5 = 1 => disable */
7181     if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7182         vpmod->options_idx1 &= ~BIT_5;
7183 
7184     /* Disable ini mode, if requested.  bit4 = 1 => disable */
7185     if (qla_tgt_mode_enabled(vha))
7186         vpmod->options_idx1 &= ~BIT_4;
7187 }
7188 
7189 void
7190 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
7191 {
7192     mutex_init(&base_vha->vha_tgt.tgt_mutex);
7193     if (!QLA_TGT_MODE_ENABLED())
7194         return;
7195 
7196     if  (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
7197         ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
7198         ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
7199     } else {
7200         ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
7201         ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
7202     }
7203 
7204     mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
7205 
7206     INIT_LIST_HEAD(&base_vha->unknown_atio_list);
7207     INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
7208         qlt_unknown_atio_work_fn);
7209 
7210     qlt_clear_mode(base_vha);
7211 
7212     qlt_update_vp_map(base_vha, SET_VP_IDX);
7213 }
7214 
7215 irqreturn_t
7216 qla83xx_msix_atio_q(int irq, void *dev_id)
7217 {
7218     struct rsp_que *rsp;
7219     scsi_qla_host_t *vha;
7220     struct qla_hw_data *ha;
7221     unsigned long flags;
7222 
7223     rsp = (struct rsp_que *) dev_id;
7224     ha = rsp->hw;
7225     vha = pci_get_drvdata(ha->pdev);
7226 
7227     spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7228 
7229     qlt_24xx_process_atio_queue(vha, 0);
7230 
7231     spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7232 
7233     return IRQ_HANDLED;
7234 }
7235 
7236 static void
7237 qlt_handle_abts_recv_work(struct work_struct *work)
7238 {
7239     struct qla_tgt_sess_op *op = container_of(work,
7240         struct qla_tgt_sess_op, work);
7241     scsi_qla_host_t *vha = op->vha;
7242     struct qla_hw_data *ha = vha->hw;
7243     unsigned long flags;
7244 
7245     if (qla2x00_reset_active(vha) ||
7246         (op->chip_reset != ha->base_qpair->chip_reset))
7247         return;
7248 
7249     spin_lock_irqsave(&ha->tgt.atio_lock, flags);
7250     qlt_24xx_process_atio_queue(vha, 0);
7251     spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
7252 
7253     spin_lock_irqsave(&ha->hardware_lock, flags);
7254     qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
7255     spin_unlock_irqrestore(&ha->hardware_lock, flags);
7256 
7257     kfree(op);
7258 }
7259 
7260 void
7261 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
7262     response_t *pkt)
7263 {
7264     struct qla_tgt_sess_op *op;
7265 
7266     op = kzalloc(sizeof(*op), GFP_ATOMIC);
7267 
7268     if (!op) {
7269         /* do not reach for ATIO queue here.  This is best effort err
7270          * recovery at this point.
7271          */
7272         qlt_response_pkt_all_vps(vha, rsp, pkt);
7273         return;
7274     }
7275 
7276     memcpy(&op->atio, pkt, sizeof(*pkt));
7277     op->vha = vha;
7278     op->chip_reset = vha->hw->base_qpair->chip_reset;
7279     op->rsp = rsp;
7280     INIT_WORK(&op->work, qlt_handle_abts_recv_work);
7281     queue_work(qla_tgt_wq, &op->work);
7282     return;
7283 }
7284 
7285 int
7286 qlt_mem_alloc(struct qla_hw_data *ha)
7287 {
7288     if (!QLA_TGT_MODE_ENABLED())
7289         return 0;
7290 
7291     ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC,
7292                      sizeof(struct qla_tgt_vp_map),
7293                      GFP_KERNEL);
7294     if (!ha->tgt.tgt_vp_map)
7295         return -ENOMEM;
7296 
7297     ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
7298         (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
7299         &ha->tgt.atio_dma, GFP_KERNEL);
7300     if (!ha->tgt.atio_ring) {
7301         kfree(ha->tgt.tgt_vp_map);
7302         return -ENOMEM;
7303     }
7304     return 0;
7305 }
7306 
7307 void
7308 qlt_mem_free(struct qla_hw_data *ha)
7309 {
7310     if (!QLA_TGT_MODE_ENABLED())
7311         return;
7312 
7313     if (ha->tgt.atio_ring) {
7314         dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
7315             sizeof(struct atio_from_isp), ha->tgt.atio_ring,
7316             ha->tgt.atio_dma);
7317     }
7318     ha->tgt.atio_ring = NULL;
7319     ha->tgt.atio_dma = 0;
7320     kfree(ha->tgt.tgt_vp_map);
7321     ha->tgt.tgt_vp_map = NULL;
7322 }
7323 
7324 /* vport_slock to be held by the caller */
7325 void
7326 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
7327 {
7328     void *slot;
7329     u32 key;
7330     int rc;
7331 
7332     key = vha->d_id.b24;
7333 
7334     switch (cmd) {
7335     case SET_VP_IDX:
7336         if (!QLA_TGT_MODE_ENABLED())
7337             return;
7338         vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
7339         break;
7340     case SET_AL_PA:
7341         slot = btree_lookup32(&vha->hw->host_map, key);
7342         if (!slot) {
7343             ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
7344                 "Save vha in host_map %p %06x\n", vha, key);
7345             rc = btree_insert32(&vha->hw->host_map,
7346                 key, vha, GFP_ATOMIC);
7347             if (rc)
7348                 ql_log(ql_log_info, vha, 0xd03e,
7349                     "Unable to insert s_id into host_map: %06x\n",
7350                     key);
7351             return;
7352         }
7353         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
7354             "replace existing vha in host_map %p %06x\n", vha, key);
7355         btree_update32(&vha->hw->host_map, key, vha);
7356         break;
7357     case RESET_VP_IDX:
7358         if (!QLA_TGT_MODE_ENABLED())
7359             return;
7360         vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
7361         break;
7362     case RESET_AL_PA:
7363         ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
7364            "clear vha in host_map %p %06x\n", vha, key);
7365         slot = btree_lookup32(&vha->hw->host_map, key);
7366         if (slot)
7367             btree_remove32(&vha->hw->host_map, key);
7368         vha->d_id.b24 = 0;
7369         break;
7370     }
7371 }
7372 
7373 void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
7374 {
7375 
7376     if (!vha->d_id.b24) {
7377         vha->d_id = id;
7378         qlt_update_vp_map(vha, SET_AL_PA);
7379     } else if (vha->d_id.b24 != id.b24) {
7380         qlt_update_vp_map(vha, RESET_AL_PA);
7381         vha->d_id = id;
7382         qlt_update_vp_map(vha, SET_AL_PA);
7383     }
7384 }
7385 
7386 static int __init qlt_parse_ini_mode(void)
7387 {
7388     if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
7389         ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
7390     else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
7391         ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
7392     else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
7393         ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
7394     else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
7395         ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
7396     else
7397         return false;
7398 
7399     return true;
7400 }
7401 
7402 int __init qlt_init(void)
7403 {
7404     int ret;
7405 
7406     BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
7407     BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
7408 
7409     if (!qlt_parse_ini_mode()) {
7410         ql_log(ql_log_fatal, NULL, 0xe06b,
7411             "qlt_parse_ini_mode() failed\n");
7412         return -EINVAL;
7413     }
7414 
7415     if (!QLA_TGT_MODE_ENABLED())
7416         return 0;
7417 
7418     qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
7419         sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
7420         qla_tgt_mgmt_cmd), 0, NULL);
7421     if (!qla_tgt_mgmt_cmd_cachep) {
7422         ql_log(ql_log_fatal, NULL, 0xd04b,
7423             "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
7424         return -ENOMEM;
7425     }
7426 
7427     qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
7428         sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
7429         0, NULL);
7430 
7431     if (!qla_tgt_plogi_cachep) {
7432         ql_log(ql_log_fatal, NULL, 0xe06d,
7433             "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
7434         ret = -ENOMEM;
7435         goto out_mgmt_cmd_cachep;
7436     }
7437 
7438     qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
7439         mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
7440     if (!qla_tgt_mgmt_cmd_mempool) {
7441         ql_log(ql_log_fatal, NULL, 0xe06e,
7442             "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
7443         ret = -ENOMEM;
7444         goto out_plogi_cachep;
7445     }
7446 
7447     qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
7448     if (!qla_tgt_wq) {
7449         ql_log(ql_log_fatal, NULL, 0xe06f,
7450             "alloc_workqueue for qla_tgt_wq failed\n");
7451         ret = -ENOMEM;
7452         goto out_cmd_mempool;
7453     }
7454     /*
7455      * Return 1 to signal that initiator-mode is being disabled
7456      */
7457     return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
7458 
7459 out_cmd_mempool:
7460     mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7461 out_plogi_cachep:
7462     kmem_cache_destroy(qla_tgt_plogi_cachep);
7463 out_mgmt_cmd_cachep:
7464     kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7465     return ret;
7466 }
7467 
7468 void qlt_exit(void)
7469 {
7470     if (!QLA_TGT_MODE_ENABLED())
7471         return;
7472 
7473     destroy_workqueue(qla_tgt_wq);
7474     mempool_destroy(qla_tgt_mgmt_cmd_mempool);
7475     kmem_cache_destroy(qla_tgt_plogi_cachep);
7476     kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
7477 }