Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Marvell Fibre Channel HBA Driver
0004  * Copyright (c)  2021     Marvell
0005  */
0006 #include "qla_def.h"
0007 #include "qla_edif.h"
0008 
0009 #include <linux/kthread.h>
0010 #include <linux/vmalloc.h>
0011 #include <linux/delay.h>
0012 #include <scsi/scsi_tcq.h>
0013 
0014 static struct edif_sa_index_entry *qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
0015         struct list_head *sa_list);
0016 static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
0017         struct qla_sa_update_frame *sa_frame);
0018 static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
0019         uint16_t sa_index);
0020 static int qla_pur_get_pending(scsi_qla_host_t *, fc_port_t *, struct bsg_job *);
0021 
0022 struct edb_node {
0023     struct  list_head   list;
0024     uint32_t        ntype;
0025     union {
0026         port_id_t   plogi_did;
0027         uint32_t    async;
0028         port_id_t   els_sid;
0029         struct edif_sa_update_aen   sa_aen;
0030     } u;
0031 };
0032 
0033 static struct els_sub_cmd {
0034     uint16_t cmd;
0035     const char *str;
0036 } sc_str[] = {
0037     {SEND_ELS, "send ELS"},
0038     {SEND_ELS_REPLY, "send ELS Reply"},
0039     {PULL_ELS, "retrieve ELS"},
0040 };
0041 
0042 const char *sc_to_str(uint16_t cmd)
0043 {
0044     int i;
0045     struct els_sub_cmd *e;
0046 
0047     for (i = 0; i < ARRAY_SIZE(sc_str); i++) {
0048         e = sc_str + i;
0049         if (cmd == e->cmd)
0050             return e->str;
0051     }
0052     return "unknown";
0053 }
0054 
0055 static struct edb_node *qla_edb_getnext(scsi_qla_host_t *vha)
0056 {
0057     unsigned long   flags;
0058     struct edb_node *edbnode = NULL;
0059 
0060     spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
0061 
0062     /* db nodes are fifo - no qualifications done */
0063     if (!list_empty(&vha->e_dbell.head)) {
0064         edbnode = list_first_entry(&vha->e_dbell.head,
0065                        struct edb_node, list);
0066         list_del_init(&edbnode->list);
0067     }
0068 
0069     spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
0070 
0071     return edbnode;
0072 }
0073 
0074 static void qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node)
0075 {
0076     list_del_init(&node->list);
0077     kfree(node);
0078 }
0079 
0080 static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport,
0081         uint16_t handle)
0082 {
0083     struct edif_list_entry *entry;
0084     struct edif_list_entry *tentry;
0085     struct list_head *indx_list = &fcport->edif.edif_indx_list;
0086 
0087     list_for_each_entry_safe(entry, tentry, indx_list, next) {
0088         if (entry->handle == handle)
0089             return entry;
0090     }
0091     return NULL;
0092 }
0093 
0094 /* timeout called when no traffic and delayed rx sa_index delete */
0095 static void qla2x00_sa_replace_iocb_timeout(struct timer_list *t)
0096 {
0097     struct edif_list_entry *edif_entry = from_timer(edif_entry, t, timer);
0098     fc_port_t *fcport = edif_entry->fcport;
0099     struct scsi_qla_host *vha = fcport->vha;
0100     struct  edif_sa_ctl *sa_ctl;
0101     uint16_t nport_handle;
0102     unsigned long flags = 0;
0103 
0104     ql_dbg(ql_dbg_edif, vha, 0x3069,
0105         "%s:  nport_handle 0x%x,  SA REPL Delay Timeout, %8phC portid=%06x\n",
0106         __func__, edif_entry->handle, fcport->port_name, fcport->d_id.b24);
0107 
0108     /*
0109      * if delete_sa_index is valid then no one has serviced this
0110      * delayed delete
0111      */
0112     spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
0113 
0114     /*
0115      * delete_sa_index is invalidated when we find the new sa_index in
0116      * the incoming data stream.  If it is not invalidated then we are
0117      * still looking for the new sa_index because there is no I/O and we
0118      * need to just force the rx delete and move on.  Otherwise
0119      * we could get another rekey which will result in an error 66.
0120      */
0121     if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
0122         uint16_t delete_sa_index = edif_entry->delete_sa_index;
0123 
0124         edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
0125         nport_handle = edif_entry->handle;
0126         spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
0127 
0128         sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
0129             delete_sa_index, 0);
0130 
0131         if (sa_ctl) {
0132             ql_dbg(ql_dbg_edif, vha, 0x3063,
0133                 "%s: sa_ctl: %p, delete index %d, update index: %d, lid: 0x%x\n",
0134                 __func__, sa_ctl, delete_sa_index, edif_entry->update_sa_index,
0135                 nport_handle);
0136 
0137             sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
0138             set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
0139             qla_post_sa_replace_work(fcport->vha, fcport,
0140                 nport_handle, sa_ctl);
0141 
0142         } else {
0143             ql_dbg(ql_dbg_edif, vha, 0x3063,
0144                 "%s: sa_ctl not found for delete_sa_index: %d\n",
0145                 __func__, edif_entry->delete_sa_index);
0146         }
0147     } else {
0148         spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
0149     }
0150 }
0151 
0152 /*
0153  * create a new list entry for this nport handle and
0154  * add an sa_update index to the list - called for sa_update
0155  */
0156 static int qla_edif_list_add_sa_update_index(fc_port_t *fcport,
0157         uint16_t sa_index, uint16_t handle)
0158 {
0159     struct edif_list_entry *entry;
0160     unsigned long flags = 0;
0161 
0162     /* if the entry exists, then just update the sa_index */
0163     entry = qla_edif_list_find_sa_index(fcport, handle);
0164     if (entry) {
0165         entry->update_sa_index = sa_index;
0166         entry->count = 0;
0167         return 0;
0168     }
0169 
0170     /*
0171      * This is the normal path - there should be no existing entry
0172      * when update is called.  The exception is at startup
0173      * when update is called for the first two sa_indexes
0174      * followed by a delete of the first sa_index
0175      */
0176     entry = kzalloc((sizeof(struct edif_list_entry)), GFP_ATOMIC);
0177     if (!entry)
0178         return -ENOMEM;
0179 
0180     INIT_LIST_HEAD(&entry->next);
0181     entry->handle = handle;
0182     entry->update_sa_index = sa_index;
0183     entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
0184     entry->count = 0;
0185     entry->flags = 0;
0186     timer_setup(&entry->timer, qla2x00_sa_replace_iocb_timeout, 0);
0187     spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
0188     list_add_tail(&entry->next, &fcport->edif.edif_indx_list);
0189     spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
0190     return 0;
0191 }
0192 
0193 /* remove an entry from the list */
0194 static void qla_edif_list_delete_sa_index(fc_port_t *fcport, struct edif_list_entry *entry)
0195 {
0196     unsigned long flags = 0;
0197 
0198     spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
0199     list_del(&entry->next);
0200     spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
0201 }
0202 
0203 int qla_post_sa_replace_work(struct scsi_qla_host *vha,
0204      fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl)
0205 {
0206     struct qla_work_evt *e;
0207 
0208     e = qla2x00_alloc_work(vha, QLA_EVT_SA_REPLACE);
0209     if (!e)
0210         return QLA_FUNCTION_FAILED;
0211 
0212     e->u.sa_update.fcport = fcport;
0213     e->u.sa_update.sa_ctl = sa_ctl;
0214     e->u.sa_update.nport_handle = nport_handle;
0215     fcport->flags |= FCF_ASYNC_ACTIVE;
0216     return qla2x00_post_work(vha, e);
0217 }
0218 
0219 static void
0220 qla_edif_sa_ctl_init(scsi_qla_host_t *vha, struct fc_port  *fcport)
0221 {
0222     ql_dbg(ql_dbg_edif, vha, 0x2058,
0223         "Init SA_CTL List for fcport - nn %8phN pn %8phN portid=%06x.\n",
0224         fcport->node_name, fcport->port_name, fcport->d_id.b24);
0225 
0226     fcport->edif.tx_rekey_cnt = 0;
0227     fcport->edif.rx_rekey_cnt = 0;
0228 
0229     fcport->edif.tx_bytes = 0;
0230     fcport->edif.rx_bytes = 0;
0231 }
0232 
0233 static int qla_bsg_check(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
0234 fc_port_t *fcport)
0235 {
0236     struct extra_auth_els *p;
0237     struct fc_bsg_reply *bsg_reply = bsg_job->reply;
0238     struct qla_bsg_auth_els_request *req =
0239         (struct qla_bsg_auth_els_request *)bsg_job->request;
0240 
0241     if (!vha->hw->flags.edif_enabled) {
0242         ql_dbg(ql_dbg_edif, vha, 0x9105,
0243             "%s edif not enabled\n", __func__);
0244         goto done;
0245     }
0246     if (DBELL_INACTIVE(vha)) {
0247         ql_dbg(ql_dbg_edif, vha, 0x09102,
0248             "%s doorbell not enabled\n", __func__);
0249         goto done;
0250     }
0251 
0252     p = &req->e;
0253 
0254     /* Get response */
0255     if (p->sub_cmd == PULL_ELS) {
0256         struct qla_bsg_auth_els_reply *rpl =
0257             (struct qla_bsg_auth_els_reply *)bsg_job->reply;
0258 
0259         qla_pur_get_pending(vha, fcport, bsg_job);
0260 
0261         ql_dbg(ql_dbg_edif, vha, 0x911d,
0262             "%s %s %8phN sid=%x. xchg %x, nb=%xh bsg ptr %p\n",
0263             __func__, sc_to_str(p->sub_cmd), fcport->port_name,
0264             fcport->d_id.b24, rpl->rx_xchg_address,
0265             rpl->r.reply_payload_rcv_len, bsg_job);
0266 
0267         goto done;
0268     }
0269     return 0;
0270 
0271 done:
0272 
0273     bsg_job_done(bsg_job, bsg_reply->result,
0274             bsg_reply->reply_payload_rcv_len);
0275     return -EIO;
0276 }
0277 
0278 fc_port_t *
0279 qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id)
0280 {
0281     fc_port_t *f, *tf;
0282 
0283     f = NULL;
0284     list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
0285         if (f->d_id.b24 == id->b24)
0286             return f;
0287     }
0288     return NULL;
0289 }
0290 
0291 /**
0292  * qla_edif_app_check(): check for valid application id.
0293  * @vha: host adapter pointer
0294  * @appid: application id
0295  * Return: false = fail, true = pass
0296  */
0297 static bool
0298 qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid)
0299 {
0300     /* check that the app is allow/known to the driver */
0301 
0302     if (appid.app_vid != EDIF_APP_ID) {
0303         ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)",
0304             __func__, appid.app_vid);
0305         return false;
0306     }
0307 
0308     if (appid.version != EDIF_VERSION1) {
0309         ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app version is not ok (%x)",
0310             __func__, appid.version);
0311         return false;
0312     }
0313 
0314     return true;
0315 }
0316 
0317 static void
0318 qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl,
0319     int index)
0320 {
0321     unsigned long flags = 0;
0322 
0323     spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
0324     list_del(&sa_ctl->next);
0325     spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
0326     if (index >= 512)
0327         fcport->edif.tx_rekey_cnt--;
0328     else
0329         fcport->edif.rx_rekey_cnt--;
0330     kfree(sa_ctl);
0331 }
0332 
0333 /* return an index to the freepool */
0334 static void qla_edif_add_sa_index_to_freepool(fc_port_t *fcport, int dir,
0335         uint16_t sa_index)
0336 {
0337     void *sa_id_map;
0338     struct scsi_qla_host *vha = fcport->vha;
0339     struct qla_hw_data *ha = vha->hw;
0340     unsigned long flags = 0;
0341     u16 lsa_index = sa_index;
0342 
0343     ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
0344         "%s: entry\n", __func__);
0345 
0346     if (dir) {
0347         sa_id_map = ha->edif_tx_sa_id_map;
0348         lsa_index -= EDIF_TX_SA_INDEX_BASE;
0349     } else {
0350         sa_id_map = ha->edif_rx_sa_id_map;
0351     }
0352 
0353     spin_lock_irqsave(&ha->sadb_fp_lock, flags);
0354     clear_bit(lsa_index, sa_id_map);
0355     spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
0356     ql_dbg(ql_dbg_edif, vha, 0x3063,
0357         "%s: index %d added to free pool\n", __func__, sa_index);
0358 }
0359 
0360 static void __qla2x00_release_all_sadb(struct scsi_qla_host *vha,
0361     struct fc_port *fcport, struct edif_sa_index_entry *entry,
0362     int pdir)
0363 {
0364     struct edif_list_entry *edif_entry;
0365     struct  edif_sa_ctl *sa_ctl;
0366     int i, dir;
0367     int key_cnt = 0;
0368 
0369     for (i = 0; i < 2; i++) {
0370         if (entry->sa_pair[i].sa_index == INVALID_EDIF_SA_INDEX)
0371             continue;
0372 
0373         if (fcport->loop_id != entry->handle) {
0374             ql_dbg(ql_dbg_edif, vha, 0x3063,
0375                 "%s: ** WARNING %d** entry handle: 0x%x, lid: 0x%x, sa_index: %d\n",
0376                 __func__, i, entry->handle, fcport->loop_id,
0377                 entry->sa_pair[i].sa_index);
0378         }
0379 
0380         /* release the sa_ctl */
0381         sa_ctl = qla_edif_find_sa_ctl_by_index(fcport,
0382                 entry->sa_pair[i].sa_index, pdir);
0383         if (sa_ctl &&
0384             qla_edif_find_sa_ctl_by_index(fcport, sa_ctl->index, pdir)) {
0385             ql_dbg(ql_dbg_edif, vha, 0x3063,
0386                 "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index);
0387             qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
0388         } else {
0389             ql_dbg(ql_dbg_edif, vha, 0x3063,
0390                 "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__, sa_ctl);
0391         }
0392 
0393         /* Release the index */
0394         ql_dbg(ql_dbg_edif, vha, 0x3063,
0395             "%s: freeing sa_index %d, nph: 0x%x\n",
0396             __func__, entry->sa_pair[i].sa_index, entry->handle);
0397 
0398         dir = (entry->sa_pair[i].sa_index <
0399             EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
0400         qla_edif_add_sa_index_to_freepool(fcport, dir,
0401             entry->sa_pair[i].sa_index);
0402 
0403         /* Delete timer on RX */
0404         if (pdir != SAU_FLG_TX) {
0405             edif_entry =
0406                 qla_edif_list_find_sa_index(fcport, entry->handle);
0407             if (edif_entry) {
0408                 ql_dbg(ql_dbg_edif, vha, 0x5033,
0409                     "%s: remove edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
0410                     __func__, edif_entry, edif_entry->update_sa_index,
0411                     edif_entry->delete_sa_index);
0412                 qla_edif_list_delete_sa_index(fcport, edif_entry);
0413                 /*
0414                  * valid delete_sa_index indicates there is a rx
0415                  * delayed delete queued
0416                  */
0417                 if (edif_entry->delete_sa_index !=
0418                         INVALID_EDIF_SA_INDEX) {
0419                     del_timer(&edif_entry->timer);
0420 
0421                     /* build and send the aen */
0422                     fcport->edif.rx_sa_set = 1;
0423                     fcport->edif.rx_sa_pending = 0;
0424                     qla_edb_eventcreate(vha,
0425                             VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
0426                             QL_VND_SA_STAT_SUCCESS,
0427                             QL_VND_RX_SA_KEY, fcport);
0428                 }
0429                 ql_dbg(ql_dbg_edif, vha, 0x5033,
0430                     "%s: release edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n",
0431                     __func__, edif_entry, edif_entry->update_sa_index,
0432                     edif_entry->delete_sa_index);
0433 
0434                 kfree(edif_entry);
0435             }
0436         }
0437         key_cnt++;
0438     }
0439     ql_dbg(ql_dbg_edif, vha, 0x3063,
0440         "%s: %d %s keys released\n",
0441         __func__, key_cnt, pdir ? "tx" : "rx");
0442 }
0443 
0444 /* find an release all outstanding sadb sa_indicies */
0445 void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport)
0446 {
0447     struct edif_sa_index_entry *entry, *tmp;
0448     struct qla_hw_data *ha = vha->hw;
0449     unsigned long flags;
0450 
0451     ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
0452         "%s: Starting...\n", __func__);
0453 
0454     spin_lock_irqsave(&ha->sadb_lock, flags);
0455 
0456     list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
0457         if (entry->fcport == fcport) {
0458             list_del(&entry->next);
0459             spin_unlock_irqrestore(&ha->sadb_lock, flags);
0460             __qla2x00_release_all_sadb(vha, fcport, entry, 0);
0461             kfree(entry);
0462             spin_lock_irqsave(&ha->sadb_lock, flags);
0463             break;
0464         }
0465     }
0466 
0467     list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
0468         if (entry->fcport == fcport) {
0469             list_del(&entry->next);
0470             spin_unlock_irqrestore(&ha->sadb_lock, flags);
0471 
0472             __qla2x00_release_all_sadb(vha, fcport, entry, SAU_FLG_TX);
0473 
0474             kfree(entry);
0475             spin_lock_irqsave(&ha->sadb_lock, flags);
0476             break;
0477         }
0478     }
0479     spin_unlock_irqrestore(&ha->sadb_lock, flags);
0480 }
0481 
0482 /**
0483  * qla_edif_app_start:  application has announce its present
0484  * @vha: host adapter pointer
0485  * @bsg_job: user request
0486  *
0487  * Set/activate doorbell.  Reset current sessions and re-login with
0488  * secure flag.
0489  */
0490 static int
0491 qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
0492 {
0493     int32_t         rval = 0;
0494     struct fc_bsg_reply *bsg_reply = bsg_job->reply;
0495     struct app_start    appstart;
0496     struct app_start_reply  appreply;
0497     struct fc_port  *fcport, *tf;
0498 
0499     ql_log(ql_log_info, vha, 0x1313,
0500            "EDIF application registration with driver, FC device connections will be re-established.\n");
0501 
0502     sg_copy_to_buffer(bsg_job->request_payload.sg_list,
0503         bsg_job->request_payload.sg_cnt, &appstart,
0504         sizeof(struct app_start));
0505 
0506     ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n",
0507          __func__, appstart.app_info.app_vid, appstart.app_start_flags);
0508 
0509     if (DBELL_INACTIVE(vha)) {
0510         /* mark doorbell as active since an app is now present */
0511         vha->e_dbell.db_flags |= EDB_ACTIVE;
0512     } else {
0513         goto out;
0514     }
0515 
0516     if (N2N_TOPO(vha->hw)) {
0517         list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
0518             fcport->n2n_link_reset_cnt = 0;
0519 
0520         if (vha->hw->flags.n2n_fw_acc_sec) {
0521             list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
0522                 qla_edif_sa_ctl_init(vha, fcport);
0523 
0524             /*
0525              * While authentication app was not running, remote device
0526              * could still try to login with this local port.  Let's
0527              * clear the state and try again.
0528              */
0529             qla2x00_wait_for_sess_deletion(vha);
0530 
0531             /* bounce the link to get the other guy to relogin */
0532             if (!vha->hw->flags.n2n_bigger) {
0533                 set_bit(N2N_LINK_RESET, &vha->dpc_flags);
0534                 qla2xxx_wake_dpc(vha);
0535             }
0536         } else {
0537             qla2x00_wait_for_hba_online(vha);
0538             set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
0539             qla2xxx_wake_dpc(vha);
0540             qla2x00_wait_for_hba_online(vha);
0541         }
0542     } else {
0543         list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
0544             ql_dbg(ql_dbg_edif, vha, 0x2058,
0545                    "FCSP - nn %8phN pn %8phN portid=%06x.\n",
0546                    fcport->node_name, fcport->port_name,
0547                    fcport->d_id.b24);
0548             ql_dbg(ql_dbg_edif, vha, 0xf084,
0549                    "%s: se_sess %p / sess %p from port %8phC "
0550                    "loop_id %#04x s_id %06x logout %d "
0551                    "keep %d els_logo %d disc state %d auth state %d"
0552                    "stop state %d\n",
0553                    __func__, fcport->se_sess, fcport,
0554                    fcport->port_name, fcport->loop_id,
0555                    fcport->d_id.b24, fcport->logout_on_delete,
0556                    fcport->keep_nport_handle, fcport->send_els_logo,
0557                    fcport->disc_state, fcport->edif.auth_state,
0558                    fcport->edif.app_stop);
0559 
0560             if (atomic_read(&vha->loop_state) == LOOP_DOWN)
0561                 break;
0562 
0563             fcport->login_retry = vha->hw->login_retry_count;
0564 
0565             fcport->edif.app_stop = 0;
0566             fcport->edif.app_sess_online = 0;
0567 
0568             if (fcport->scan_state != QLA_FCPORT_FOUND)
0569                 continue;
0570 
0571             if (fcport->port_type == FCT_UNKNOWN &&
0572                 !fcport->fc4_features)
0573                 rval = qla24xx_async_gffid(vha, fcport, true);
0574 
0575             if (!rval && !(fcport->fc4_features & FC4_FF_TARGET ||
0576                 fcport->port_type & (FCT_TARGET|FCT_NVME_TARGET)))
0577                 continue;
0578 
0579             rval = 0;
0580 
0581             ql_dbg(ql_dbg_edif, vha, 0x911e,
0582                    "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
0583                    __func__, fcport->port_name);
0584             qlt_schedule_sess_for_deletion(fcport);
0585             qla_edif_sa_ctl_init(vha, fcport);
0586         }
0587         set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
0588     }
0589 
0590     if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
0591         /* mark as active since an app is now present */
0592         vha->pur_cinfo.enode_flags = ENODE_ACTIVE;
0593     } else {
0594         ql_dbg(ql_dbg_edif, vha, 0x911f, "%s enode already active\n",
0595              __func__);
0596     }
0597 
0598 out:
0599     appreply.host_support_edif = vha->hw->flags.edif_enabled;
0600     appreply.edif_enode_active = vha->pur_cinfo.enode_flags;
0601     appreply.edif_edb_active = vha->e_dbell.db_flags;
0602     appreply.version = EDIF_VERSION1;
0603 
0604     bsg_job->reply_len = sizeof(struct fc_bsg_reply);
0605 
0606     SET_DID_STATUS(bsg_reply->result, DID_OK);
0607 
0608     bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
0609                                    bsg_job->reply_payload.sg_cnt,
0610                                    &appreply,
0611                                    sizeof(struct app_start_reply));
0612 
0613     ql_dbg(ql_dbg_edif, vha, 0x911d,
0614         "%s app start completed with 0x%x\n",
0615         __func__, rval);
0616 
0617     return rval;
0618 }
0619 
0620 /**
0621  * qla_edif_app_stop - app has announced it's exiting.
0622  * @vha: host adapter pointer
0623  * @bsg_job: user space command pointer
0624  *
0625  * Free any in flight messages, clear all doorbell events
0626  * to application. Reject any message relate to security.
0627  */
0628 static int
0629 qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
0630 {
0631     struct app_stop         appstop;
0632     struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
0633     struct fc_port  *fcport, *tf;
0634 
0635     sg_copy_to_buffer(bsg_job->request_payload.sg_list,
0636         bsg_job->request_payload.sg_cnt, &appstop,
0637         sizeof(struct app_stop));
0638 
0639     ql_dbg(ql_dbg_edif, vha, 0x911d, "%s Stopping APP: app_vid=%x\n",
0640         __func__, appstop.app_info.app_vid);
0641 
0642     /* Call db stop and enode stop functions */
0643 
0644     /* if we leave this running short waits are operational < 16 secs */
0645     qla_enode_stop(vha);        /* stop enode */
0646     qla_edb_stop(vha);          /* stop db */
0647 
0648     list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
0649         if (!(fcport->flags & FCF_FCSP_DEVICE))
0650             continue;
0651 
0652         if (fcport->flags & FCF_FCSP_DEVICE) {
0653             ql_dbg(ql_dbg_edif, vha, 0xf084,
0654                 "%s: sess %p from port %8phC lid %#04x s_id %06x logout %d keep %d els_logo %d\n",
0655                 __func__, fcport,
0656                 fcport->port_name, fcport->loop_id, fcport->d_id.b24,
0657                 fcport->logout_on_delete, fcport->keep_nport_handle,
0658                 fcport->send_els_logo);
0659 
0660             if (atomic_read(&vha->loop_state) == LOOP_DOWN)
0661                 break;
0662 
0663             fcport->edif.app_stop = 1;
0664             ql_dbg(ql_dbg_edif, vha, 0x911e,
0665                 "%s wwpn %8phC calling qla_edif_reset_auth_wait\n",
0666                 __func__, fcport->port_name);
0667 
0668             fcport->send_els_logo = 1;
0669             qlt_schedule_sess_for_deletion(fcport);
0670         }
0671     }
0672 
0673     bsg_job->reply_len = sizeof(struct fc_bsg_reply);
0674     SET_DID_STATUS(bsg_reply->result, DID_OK);
0675 
0676     /* no return interface to app - it assumes we cleaned up ok */
0677 
0678     return 0;
0679 }
0680 
0681 static int
0682 qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport,
0683         struct app_plogi_reply *appplogireply)
0684 {
0685     int ret = 0;
0686 
0687     if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
0688         ql_dbg(ql_dbg_edif, vha, 0x911e,
0689             "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
0690             __func__, fcport->port_name, fcport->edif.tx_sa_set,
0691             fcport->edif.rx_sa_set);
0692         appplogireply->prli_status = 0;
0693         ret = 1;
0694     } else  {
0695         ql_dbg(ql_dbg_edif, vha, 0x911e,
0696             "%s wwpn %8phC Both SA(s) updated.\n", __func__,
0697             fcport->port_name);
0698         fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
0699         fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
0700         appplogireply->prli_status = 1;
0701     }
0702     return ret;
0703 }
0704 
0705 /**
0706  * qla_edif_app_authok - authentication by app succeeded.  Driver can proceed
0707  *   with prli
0708  * @vha: host adapter pointer
0709  * @bsg_job: user request
0710  */
0711 static int
0712 qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
0713 {
0714     struct auth_complete_cmd appplogiok;
0715     struct app_plogi_reply  appplogireply = {0};
0716     struct fc_bsg_reply *bsg_reply = bsg_job->reply;
0717     fc_port_t       *fcport = NULL;
0718     port_id_t       portid = {0};
0719 
0720     sg_copy_to_buffer(bsg_job->request_payload.sg_list,
0721         bsg_job->request_payload.sg_cnt, &appplogiok,
0722         sizeof(struct auth_complete_cmd));
0723 
0724     /* silent unaligned access warning */
0725     portid.b.domain = appplogiok.u.d_id.b.domain;
0726     portid.b.area   = appplogiok.u.d_id.b.area;
0727     portid.b.al_pa  = appplogiok.u.d_id.b.al_pa;
0728 
0729     appplogireply.version = EDIF_VERSION1;
0730     switch (appplogiok.type) {
0731     case PL_TYPE_WWPN:
0732         fcport = qla2x00_find_fcport_by_wwpn(vha,
0733             appplogiok.u.wwpn, 0);
0734         if (!fcport)
0735             ql_dbg(ql_dbg_edif, vha, 0x911d,
0736                 "%s wwpn lookup failed: %8phC\n",
0737                 __func__, appplogiok.u.wwpn);
0738         break;
0739     case PL_TYPE_DID:
0740         fcport = qla2x00_find_fcport_by_pid(vha, &portid);
0741         if (!fcport)
0742             ql_dbg(ql_dbg_edif, vha, 0x911d,
0743                 "%s d_id lookup failed: %x\n", __func__,
0744                 portid.b24);
0745         break;
0746     default:
0747         ql_dbg(ql_dbg_edif, vha, 0x911d,
0748             "%s undefined type: %x\n", __func__,
0749             appplogiok.type);
0750         break;
0751     }
0752 
0753     if (!fcport) {
0754         SET_DID_STATUS(bsg_reply->result, DID_ERROR);
0755         goto errstate_exit;
0756     }
0757 
0758     /*
0759      * if port is online then this is a REKEY operation
0760      * Only do sa update checking
0761      */
0762     if (atomic_read(&fcport->state) == FCS_ONLINE) {
0763         ql_dbg(ql_dbg_edif, vha, 0x911d,
0764             "%s Skipping PRLI complete based on rekey\n", __func__);
0765         appplogireply.prli_status = 1;
0766         SET_DID_STATUS(bsg_reply->result, DID_OK);
0767         qla_edif_app_chk_sa_update(vha, fcport, &appplogireply);
0768         goto errstate_exit;
0769     }
0770 
0771     /* make sure in AUTH_PENDING or else reject */
0772     if (fcport->disc_state != DSC_LOGIN_AUTH_PEND) {
0773         ql_dbg(ql_dbg_edif, vha, 0x911e,
0774             "%s wwpn %8phC is not in auth pending state (%x)\n",
0775             __func__, fcport->port_name, fcport->disc_state);
0776         SET_DID_STATUS(bsg_reply->result, DID_OK);
0777         appplogireply.prli_status = 0;
0778         goto errstate_exit;
0779     }
0780 
0781     SET_DID_STATUS(bsg_reply->result, DID_OK);
0782     appplogireply.prli_status = 1;
0783     fcport->edif.authok = 1;
0784     if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) {
0785         ql_dbg(ql_dbg_edif, vha, 0x911e,
0786             "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n",
0787             __func__, fcport->port_name, fcport->edif.tx_sa_set,
0788             fcport->edif.rx_sa_set);
0789         SET_DID_STATUS(bsg_reply->result, DID_OK);
0790         appplogireply.prli_status = 0;
0791         goto errstate_exit;
0792 
0793     } else {
0794         ql_dbg(ql_dbg_edif, vha, 0x911e,
0795             "%s wwpn %8phC Both SA(s) updated.\n", __func__,
0796             fcport->port_name);
0797         fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0;
0798         fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0;
0799     }
0800 
0801     if (qla_ini_mode_enabled(vha)) {
0802         ql_dbg(ql_dbg_edif, vha, 0x911e,
0803             "%s AUTH complete - RESUME with prli for wwpn %8phC\n",
0804             __func__, fcport->port_name);
0805         qla24xx_post_prli_work(vha, fcport);
0806     }
0807 
0808 errstate_exit:
0809     bsg_job->reply_len = sizeof(struct fc_bsg_reply);
0810     bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
0811                                    bsg_job->reply_payload.sg_cnt,
0812                                    &appplogireply,
0813                                    sizeof(struct app_plogi_reply));
0814 
0815     return 0;
0816 }
0817 
0818 /**
0819  * qla_edif_app_authfail - authentication by app has failed.  Driver is given
0820  *   notice to tear down current session.
0821  * @vha: host adapter pointer
0822  * @bsg_job: user request
0823  */
0824 static int
0825 qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
0826 {
0827     int32_t         rval = 0;
0828     struct auth_complete_cmd appplogifail;
0829     struct fc_bsg_reply *bsg_reply = bsg_job->reply;
0830     fc_port_t       *fcport = NULL;
0831     port_id_t       portid = {0};
0832 
0833     ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app auth fail\n", __func__);
0834 
0835     sg_copy_to_buffer(bsg_job->request_payload.sg_list,
0836         bsg_job->request_payload.sg_cnt, &appplogifail,
0837         sizeof(struct auth_complete_cmd));
0838 
0839     /* silent unaligned access warning */
0840     portid.b.domain = appplogifail.u.d_id.b.domain;
0841     portid.b.area   = appplogifail.u.d_id.b.area;
0842     portid.b.al_pa  = appplogifail.u.d_id.b.al_pa;
0843 
0844     /*
0845      * TODO: edif: app has failed this plogi. Inform driver to
0846      * take any action (if any).
0847      */
0848     switch (appplogifail.type) {
0849     case PL_TYPE_WWPN:
0850         fcport = qla2x00_find_fcport_by_wwpn(vha,
0851             appplogifail.u.wwpn, 0);
0852         SET_DID_STATUS(bsg_reply->result, DID_OK);
0853         break;
0854     case PL_TYPE_DID:
0855         fcport = qla2x00_find_fcport_by_pid(vha, &portid);
0856         if (!fcport)
0857             ql_dbg(ql_dbg_edif, vha, 0x911d,
0858                 "%s d_id lookup failed: %x\n", __func__,
0859                 portid.b24);
0860         SET_DID_STATUS(bsg_reply->result, DID_OK);
0861         break;
0862     default:
0863         ql_dbg(ql_dbg_edif, vha, 0x911e,
0864             "%s undefined type: %x\n", __func__,
0865             appplogifail.type);
0866         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
0867         SET_DID_STATUS(bsg_reply->result, DID_ERROR);
0868         rval = -1;
0869         break;
0870     }
0871 
0872     ql_dbg(ql_dbg_edif, vha, 0x911d,
0873         "%s fcport is 0x%p\n", __func__, fcport);
0874 
0875     if (fcport) {
0876         /* set/reset edif values and flags */
0877         ql_dbg(ql_dbg_edif, vha, 0x911e,
0878             "%s reset the auth process - %8phC, loopid=%x portid=%06x.\n",
0879             __func__, fcport->port_name, fcport->loop_id, fcport->d_id.b24);
0880 
0881         if (qla_ini_mode_enabled(fcport->vha)) {
0882             fcport->send_els_logo = 1;
0883             qlt_schedule_sess_for_deletion(fcport);
0884         }
0885     }
0886 
0887     return rval;
0888 }
0889 
0890 /**
0891  * qla_edif_app_getfcinfo - app would like to read session info (wwpn, nportid,
0892  *   [initiator|target] mode.  It can specific session with specific nport id or
0893  *   all sessions.
0894  * @vha: host adapter pointer
0895  * @bsg_job: user request pointer
0896  */
0897 static int
0898 qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
0899 {
0900     int32_t         rval = 0;
0901     int32_t         pcnt = 0;
0902     struct fc_bsg_reply *bsg_reply = bsg_job->reply;
0903     struct app_pinfo_req    app_req;
0904     struct app_pinfo_reply  *app_reply;
0905     port_id_t       tdid;
0906 
0907     ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app get fcinfo\n", __func__);
0908 
0909     sg_copy_to_buffer(bsg_job->request_payload.sg_list,
0910         bsg_job->request_payload.sg_cnt, &app_req,
0911         sizeof(struct app_pinfo_req));
0912 
0913     app_reply = kzalloc((sizeof(struct app_pinfo_reply) +
0914         sizeof(struct app_pinfo) * app_req.num_ports), GFP_KERNEL);
0915 
0916     if (!app_reply) {
0917         SET_DID_STATUS(bsg_reply->result, DID_ERROR);
0918         rval = -1;
0919     } else {
0920         struct fc_port  *fcport = NULL, *tf;
0921 
0922         app_reply->version = EDIF_VERSION1;
0923 
0924         list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
0925             if (!(fcport->flags & FCF_FCSP_DEVICE))
0926                 continue;
0927 
0928             tdid = app_req.remote_pid;
0929 
0930             ql_dbg(ql_dbg_edif, vha, 0x2058,
0931                 "APP request entry - portid=%06x.\n", tdid.b24);
0932 
0933             /* Ran out of space */
0934             if (pcnt >= app_req.num_ports)
0935                 break;
0936 
0937             if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24)
0938                 continue;
0939 
0940             if (!N2N_TOPO(vha->hw)) {
0941                 if (fcport->scan_state != QLA_FCPORT_FOUND)
0942                     continue;
0943 
0944                 if (fcport->port_type == FCT_UNKNOWN &&
0945                     !fcport->fc4_features)
0946                     rval = qla24xx_async_gffid(vha, fcport,
0947                                    true);
0948 
0949                 if (!rval &&
0950                     !(fcport->fc4_features & FC4_FF_TARGET ||
0951                       fcport->port_type &
0952                       (FCT_TARGET | FCT_NVME_TARGET)))
0953                     continue;
0954             }
0955 
0956             rval = 0;
0957 
0958             app_reply->ports[pcnt].version = EDIF_VERSION1;
0959             app_reply->ports[pcnt].remote_type =
0960                 VND_CMD_RTYPE_UNKNOWN;
0961             if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET))
0962                 app_reply->ports[pcnt].remote_type |=
0963                     VND_CMD_RTYPE_TARGET;
0964             if (fcport->port_type & (FCT_NVME_INITIATOR | FCT_INITIATOR))
0965                 app_reply->ports[pcnt].remote_type |=
0966                     VND_CMD_RTYPE_INITIATOR;
0967 
0968             app_reply->ports[pcnt].remote_pid = fcport->d_id;
0969 
0970             ql_dbg(ql_dbg_edif, vha, 0x2058,
0971                 "Found FC_SP fcport - nn %8phN pn %8phN pcnt %d portid=%06x secure %d.\n",
0972                 fcport->node_name, fcport->port_name, pcnt,
0973                 fcport->d_id.b24, fcport->flags & FCF_FCSP_DEVICE);
0974 
0975             switch (fcport->edif.auth_state) {
0976             case VND_CMD_AUTH_STATE_ELS_RCVD:
0977                 if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) {
0978                     fcport->edif.auth_state = VND_CMD_AUTH_STATE_NEEDED;
0979                     app_reply->ports[pcnt].auth_state =
0980                         VND_CMD_AUTH_STATE_NEEDED;
0981                 } else {
0982                     app_reply->ports[pcnt].auth_state =
0983                         VND_CMD_AUTH_STATE_ELS_RCVD;
0984                 }
0985                 break;
0986             default:
0987                 app_reply->ports[pcnt].auth_state = fcport->edif.auth_state;
0988                 break;
0989             }
0990 
0991             memcpy(app_reply->ports[pcnt].remote_wwpn,
0992                 fcport->port_name, 8);
0993 
0994             app_reply->ports[pcnt].remote_state =
0995                 (atomic_read(&fcport->state) ==
0996                     FCS_ONLINE ? 1 : 0);
0997 
0998             pcnt++;
0999 
1000             if (tdid.b24 != 0)
1001                 break;
1002         }
1003         app_reply->port_count = pcnt;
1004         SET_DID_STATUS(bsg_reply->result, DID_OK);
1005     }
1006 
1007     bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1008     bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1009                                    bsg_job->reply_payload.sg_cnt,
1010                                    app_reply,
1011                                    sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * pcnt);
1012 
1013     kfree(app_reply);
1014 
1015     return rval;
1016 }
1017 
1018 /**
1019  * qla_edif_app_getstats - app would like to read various statistics info
1020  * @vha: host adapter pointer
1021  * @bsg_job: user request
1022  */
1023 static int32_t
1024 qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
1025 {
1026     int32_t         rval = 0;
1027     struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1028     uint32_t size;
1029 
1030     struct app_sinfo_req    app_req;
1031     struct app_stats_reply  *app_reply;
1032     uint32_t pcnt = 0;
1033 
1034     sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1035         bsg_job->request_payload.sg_cnt, &app_req,
1036         sizeof(struct app_sinfo_req));
1037     if (app_req.num_ports == 0) {
1038         ql_dbg(ql_dbg_async, vha, 0x911d,
1039            "%s app did not indicate number of ports to return\n",
1040             __func__);
1041         SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1042         rval = -1;
1043     }
1044 
1045     size = sizeof(struct app_stats_reply) +
1046         (sizeof(struct app_sinfo) * app_req.num_ports);
1047 
1048     app_reply = kzalloc(size, GFP_KERNEL);
1049     if (!app_reply) {
1050         SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1051         rval = -1;
1052     } else {
1053         struct fc_port  *fcport = NULL, *tf;
1054 
1055         app_reply->version = EDIF_VERSION1;
1056 
1057         list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
1058             if (fcport->edif.enable) {
1059                 if (pcnt > app_req.num_ports)
1060                     break;
1061 
1062                 app_reply->elem[pcnt].rekey_count =
1063                     fcport->edif.rekey_cnt;
1064                 app_reply->elem[pcnt].tx_bytes =
1065                     fcport->edif.tx_bytes;
1066                 app_reply->elem[pcnt].rx_bytes =
1067                     fcport->edif.rx_bytes;
1068 
1069                 memcpy(app_reply->elem[pcnt].remote_wwpn,
1070                     fcport->port_name, 8);
1071 
1072                 pcnt++;
1073             }
1074         }
1075         app_reply->elem_count = pcnt;
1076         SET_DID_STATUS(bsg_reply->result, DID_OK);
1077     }
1078 
1079     bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1080     bsg_reply->reply_payload_rcv_len =
1081         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1082            bsg_job->reply_payload.sg_cnt, app_reply,
1083            sizeof(struct app_stats_reply) + (sizeof(struct app_sinfo) * pcnt));
1084 
1085     kfree(app_reply);
1086 
1087     return rval;
1088 }
1089 
1090 static int32_t
1091 qla_edif_ack(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
1092 {
1093     struct fc_port *fcport;
1094     struct aen_complete_cmd ack;
1095     struct fc_bsg_reply     *bsg_reply = bsg_job->reply;
1096 
1097     sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1098               bsg_job->request_payload.sg_cnt, &ack, sizeof(ack));
1099 
1100     ql_dbg(ql_dbg_edif, vha, 0x70cf,
1101            "%s: %06x event_code %x\n",
1102            __func__, ack.port_id.b24, ack.event_code);
1103 
1104     fcport = qla2x00_find_fcport_by_pid(vha, &ack.port_id);
1105     SET_DID_STATUS(bsg_reply->result, DID_OK);
1106 
1107     if (!fcport) {
1108         ql_dbg(ql_dbg_edif, vha, 0x70cf,
1109                "%s: unable to find fcport %06x \n",
1110                __func__, ack.port_id.b24);
1111         return 0;
1112     }
1113 
1114     switch (ack.event_code) {
1115     case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
1116         fcport->edif.sess_down_acked = 1;
1117         break;
1118     default:
1119         break;
1120     }
1121     return 0;
1122 }
1123 
1124 static int qla_edif_consume_dbell(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
1125 {
1126     struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1127     u32 sg_skip, reply_payload_len;
1128     bool keep;
1129     struct edb_node *dbnode = NULL;
1130     struct edif_app_dbell ap;
1131     int dat_size = 0;
1132 
1133     sg_skip = 0;
1134     reply_payload_len = bsg_job->reply_payload.payload_len;
1135 
1136     while ((reply_payload_len - sg_skip) >= sizeof(struct edb_node)) {
1137         dbnode = qla_edb_getnext(vha);
1138         if (dbnode) {
1139             keep = true;
1140             dat_size = 0;
1141             ap.event_code = dbnode->ntype;
1142             switch (dbnode->ntype) {
1143             case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
1144             case VND_CMD_AUTH_STATE_NEEDED:
1145                 ap.port_id = dbnode->u.plogi_did;
1146                 dat_size += sizeof(ap.port_id);
1147                 break;
1148             case VND_CMD_AUTH_STATE_ELS_RCVD:
1149                 ap.port_id = dbnode->u.els_sid;
1150                 dat_size += sizeof(ap.port_id);
1151                 break;
1152             case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
1153                 ap.port_id = dbnode->u.sa_aen.port_id;
1154                 memcpy(&ap.event_data, &dbnode->u,
1155                     sizeof(struct edif_sa_update_aen));
1156                 dat_size += sizeof(struct edif_sa_update_aen);
1157                 break;
1158             default:
1159                 keep = false;
1160                 ql_log(ql_log_warn, vha, 0x09102,
1161                     "%s unknown DB type=%d %p\n",
1162                     __func__, dbnode->ntype, dbnode);
1163                 break;
1164             }
1165             ap.event_data_size = dat_size;
1166             /* 8 = sizeof(ap.event_code + ap.event_data_size) */
1167             dat_size += 8;
1168             if (keep)
1169                 sg_skip += sg_copy_buffer(bsg_job->reply_payload.sg_list,
1170                         bsg_job->reply_payload.sg_cnt,
1171                         &ap, dat_size, sg_skip, false);
1172 
1173             ql_dbg(ql_dbg_edif, vha, 0x09102,
1174                 "%s Doorbell consumed : type=%d %p\n",
1175                 __func__, dbnode->ntype, dbnode);
1176 
1177             kfree(dbnode);
1178         } else {
1179             break;
1180         }
1181     }
1182 
1183     SET_DID_STATUS(bsg_reply->result, DID_OK);
1184     bsg_reply->reply_payload_rcv_len = sg_skip;
1185     bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1186 
1187     return 0;
1188 }
1189 
1190 static void __qla_edif_dbell_bsg_done(scsi_qla_host_t *vha, struct bsg_job *bsg_job,
1191     u32 delay)
1192 {
1193     struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1194 
1195     /* small sleep for doorbell events to accumulate */
1196     if (delay)
1197         msleep(delay);
1198 
1199     qla_edif_consume_dbell(vha, bsg_job);
1200 
1201     bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
1202 }
1203 
1204 static void qla_edif_dbell_bsg_done(scsi_qla_host_t *vha)
1205 {
1206     unsigned long flags;
1207     struct bsg_job *prev_bsg_job = NULL;
1208 
1209     spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
1210     if (vha->e_dbell.dbell_bsg_job) {
1211         prev_bsg_job = vha->e_dbell.dbell_bsg_job;
1212         vha->e_dbell.dbell_bsg_job = NULL;
1213     }
1214     spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
1215 
1216     if (prev_bsg_job)
1217         __qla_edif_dbell_bsg_done(vha, prev_bsg_job, 0);
1218 }
1219 
1220 static int
1221 qla_edif_dbell_bsg(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
1222 {
1223     unsigned long flags;
1224     bool return_bsg = false;
1225 
1226     /* flush previous dbell bsg */
1227     qla_edif_dbell_bsg_done(vha);
1228 
1229     spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
1230     if (list_empty(&vha->e_dbell.head) && DBELL_ACTIVE(vha)) {
1231         /*
1232          * when the next db event happens, bsg_job will return.
1233          * Otherwise, timer will return it.
1234          */
1235         vha->e_dbell.dbell_bsg_job = bsg_job;
1236         vha->e_dbell.bsg_expire = jiffies + 10 * HZ;
1237     } else {
1238         return_bsg = true;
1239     }
1240     spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
1241 
1242     if (return_bsg)
1243         __qla_edif_dbell_bsg_done(vha, bsg_job, 1);
1244 
1245     return 0;
1246 }
1247 
1248 int32_t
1249 qla_edif_app_mgmt(struct bsg_job *bsg_job)
1250 {
1251     struct fc_bsg_request   *bsg_request = bsg_job->request;
1252     struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1253     struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1254     scsi_qla_host_t     *vha = shost_priv(host);
1255     struct app_id       appcheck;
1256     bool done = true;
1257     int32_t         rval = 0;
1258     uint32_t    vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1259     u32 level = ql_dbg_edif;
1260 
1261     /* doorbell is high traffic */
1262     if (vnd_sc == QL_VND_SC_READ_DBELL)
1263         level = 0;
1264 
1265     ql_dbg(level, vha, 0x911d, "%s vnd subcmd=%x\n",
1266         __func__, vnd_sc);
1267 
1268     sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1269         bsg_job->request_payload.sg_cnt, &appcheck,
1270         sizeof(struct app_id));
1271 
1272     if (!vha->hw->flags.edif_enabled ||
1273         test_bit(VPORT_DELETE, &vha->dpc_flags)) {
1274         ql_dbg(level, vha, 0x911d,
1275             "%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n",
1276             __func__, bsg_job, vha->dpc_flags);
1277 
1278         SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1279         goto done;
1280     }
1281 
1282     if (!qla_edif_app_check(vha, appcheck)) {
1283         ql_dbg(level, vha, 0x911d,
1284             "%s app checked failed.\n",
1285             __func__);
1286 
1287         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1288         SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1289         goto done;
1290     }
1291 
1292     switch (vnd_sc) {
1293     case QL_VND_SC_SA_UPDATE:
1294         done = false;
1295         rval = qla24xx_sadb_update(bsg_job);
1296         break;
1297     case QL_VND_SC_APP_START:
1298         rval = qla_edif_app_start(vha, bsg_job);
1299         break;
1300     case QL_VND_SC_APP_STOP:
1301         rval = qla_edif_app_stop(vha, bsg_job);
1302         break;
1303     case QL_VND_SC_AUTH_OK:
1304         rval = qla_edif_app_authok(vha, bsg_job);
1305         break;
1306     case QL_VND_SC_AUTH_FAIL:
1307         rval = qla_edif_app_authfail(vha, bsg_job);
1308         break;
1309     case QL_VND_SC_GET_FCINFO:
1310         rval = qla_edif_app_getfcinfo(vha, bsg_job);
1311         break;
1312     case QL_VND_SC_GET_STATS:
1313         rval = qla_edif_app_getstats(vha, bsg_job);
1314         break;
1315     case QL_VND_SC_AEN_COMPLETE:
1316         rval = qla_edif_ack(vha, bsg_job);
1317         break;
1318     case QL_VND_SC_READ_DBELL:
1319         rval = qla_edif_dbell_bsg(vha, bsg_job);
1320         done = false;
1321         break;
1322     default:
1323         ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n",
1324             __func__,
1325             bsg_request->rqst_data.h_vendor.vendor_cmd[1]);
1326         rval = EXT_STATUS_INVALID_PARAM;
1327         done = false;
1328         break;
1329     }
1330 
1331 done:
1332     if (done) {
1333         ql_dbg(level, vha, 0x7009,
1334             "%s: %d  bsg ptr done %p\n", __func__, __LINE__, bsg_job);
1335         bsg_job_done(bsg_job, bsg_reply->result,
1336             bsg_reply->reply_payload_rcv_len);
1337     }
1338 
1339     return rval;
1340 }
1341 
1342 static struct edif_sa_ctl *
1343 qla_edif_add_sa_ctl(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame,
1344     int dir)
1345 {
1346     struct  edif_sa_ctl *sa_ctl;
1347     struct qla_sa_update_frame *sap;
1348     int index = sa_frame->fast_sa_index;
1349     unsigned long flags = 0;
1350 
1351     sa_ctl = kzalloc(sizeof(*sa_ctl), GFP_KERNEL);
1352     if (!sa_ctl) {
1353         /* couldn't get space */
1354         ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1355             "unable to allocate SA CTL\n");
1356         return NULL;
1357     }
1358 
1359     /*
1360      * need to allocate sa_index here and save it
1361      * in both sa_ctl->index and sa_frame->fast_sa_index;
1362      * If alloc fails then delete sa_ctl and return NULL
1363      */
1364     INIT_LIST_HEAD(&sa_ctl->next);
1365     sap = &sa_ctl->sa_frame;
1366     *sap = *sa_frame;
1367     sa_ctl->index = index;
1368     sa_ctl->fcport = fcport;
1369     sa_ctl->flags = 0;
1370     sa_ctl->state = 0L;
1371     ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1372         "%s: Added sa_ctl %p, index %d, state 0x%lx\n",
1373         __func__, sa_ctl, sa_ctl->index, sa_ctl->state);
1374     spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
1375     if (dir == SAU_FLG_TX)
1376         list_add_tail(&sa_ctl->next, &fcport->edif.tx_sa_list);
1377     else
1378         list_add_tail(&sa_ctl->next, &fcport->edif.rx_sa_list);
1379     spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
1380 
1381     return sa_ctl;
1382 }
1383 
1384 void
1385 qla_edif_flush_sa_ctl_lists(fc_port_t *fcport)
1386 {
1387     struct edif_sa_ctl *sa_ctl, *tsa_ctl;
1388     unsigned long flags = 0;
1389 
1390     spin_lock_irqsave(&fcport->edif.sa_list_lock, flags);
1391 
1392     list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.tx_sa_list,
1393         next) {
1394         list_del(&sa_ctl->next);
1395         kfree(sa_ctl);
1396     }
1397 
1398     list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.rx_sa_list,
1399         next) {
1400         list_del(&sa_ctl->next);
1401         kfree(sa_ctl);
1402     }
1403 
1404     spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags);
1405 }
1406 
1407 struct edif_sa_ctl *
1408 qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, int index, int dir)
1409 {
1410     struct edif_sa_ctl *sa_ctl, *tsa_ctl;
1411     struct list_head *sa_list;
1412 
1413     if (dir == SAU_FLG_TX)
1414         sa_list = &fcport->edif.tx_sa_list;
1415     else
1416         sa_list = &fcport->edif.rx_sa_list;
1417 
1418     list_for_each_entry_safe(sa_ctl, tsa_ctl, sa_list, next) {
1419         if (test_bit(EDIF_SA_CTL_USED, &sa_ctl->state) &&
1420             sa_ctl->index == index)
1421             return sa_ctl;
1422     }
1423     return NULL;
1424 }
1425 
1426 /* add the sa to the correct list */
1427 static int
1428 qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport,
1429     struct qla_sa_update_frame *sa_frame)
1430 {
1431     struct edif_sa_ctl *sa_ctl = NULL;
1432     int dir;
1433     uint16_t sa_index;
1434 
1435     dir = (sa_frame->flags & SAU_FLG_TX);
1436 
1437     /* map the spi to an sa_index */
1438     sa_index = qla_edif_sadb_get_sa_index(fcport, sa_frame);
1439     if (sa_index == RX_DELETE_NO_EDIF_SA_INDEX) {
1440         /* process rx delete */
1441         ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
1442             "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n",
1443             __func__, fcport->loop_id, sa_frame->spi);
1444 
1445         /* build and send the aen */
1446         fcport->edif.rx_sa_set = 1;
1447         fcport->edif.rx_sa_pending = 0;
1448         qla_edb_eventcreate(fcport->vha,
1449             VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
1450             QL_VND_SA_STAT_SUCCESS,
1451             QL_VND_RX_SA_KEY, fcport);
1452 
1453         /* force a return of good bsg status; */
1454         return RX_DELETE_NO_EDIF_SA_INDEX;
1455     } else if (sa_index == INVALID_EDIF_SA_INDEX) {
1456         ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1457             "%s: Failed to get sa_index for spi 0x%x, dir: %d\n",
1458             __func__, sa_frame->spi, dir);
1459         return INVALID_EDIF_SA_INDEX;
1460     }
1461 
1462     ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1463         "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n",
1464         __func__, sa_index, sa_frame->spi, dir, fcport->loop_id);
1465 
1466     /* This is a local copy of sa_frame. */
1467     sa_frame->fast_sa_index = sa_index;
1468     /* create the sa_ctl */
1469     sa_ctl = qla_edif_add_sa_ctl(fcport, sa_frame, dir);
1470     if (!sa_ctl) {
1471         ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1472             "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n",
1473             __func__, sa_frame->spi, dir, sa_index);
1474         return -1;
1475     }
1476 
1477     set_bit(EDIF_SA_CTL_USED, &sa_ctl->state);
1478 
1479     if (dir == SAU_FLG_TX)
1480         fcport->edif.tx_rekey_cnt++;
1481     else
1482         fcport->edif.rx_rekey_cnt++;
1483 
1484     ql_dbg(ql_dbg_edif, fcport->vha, 0x9100,
1485         "%s: Found sa_ctl %p, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n",
1486         __func__, sa_ctl, sa_ctl->index, sa_ctl->state,
1487         fcport->edif.tx_rekey_cnt,
1488         fcport->edif.rx_rekey_cnt, fcport->loop_id);
1489 
1490     return 0;
1491 }
1492 
1493 #define QLA_SA_UPDATE_FLAGS_RX_KEY      0x0
1494 #define QLA_SA_UPDATE_FLAGS_TX_KEY      0x2
1495 #define EDIF_MSLEEP_INTERVAL 100
1496 #define EDIF_RETRY_COUNT  50
1497 
1498 int
1499 qla24xx_sadb_update(struct bsg_job *bsg_job)
1500 {
1501     struct  fc_bsg_reply    *bsg_reply = bsg_job->reply;
1502     struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1503     scsi_qla_host_t *vha = shost_priv(host);
1504     fc_port_t       *fcport = NULL;
1505     srb_t           *sp = NULL;
1506     struct edif_list_entry *edif_entry = NULL;
1507     int         found = 0;
1508     int         rval = 0;
1509     int result = 0, cnt;
1510     struct qla_sa_update_frame sa_frame;
1511     struct srb_iocb *iocb_cmd;
1512     port_id_t portid;
1513 
1514     ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d,
1515         "%s entered, vha: 0x%p\n", __func__, vha);
1516 
1517     sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1518         bsg_job->request_payload.sg_cnt, &sa_frame,
1519         sizeof(struct qla_sa_update_frame));
1520 
1521     /* Check if host is online */
1522     if (!vha->flags.online) {
1523         ql_log(ql_log_warn, vha, 0x70a1, "Host is not online\n");
1524         rval = -EIO;
1525         SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1526         goto done;
1527     }
1528 
1529     if (DBELL_INACTIVE(vha)) {
1530         ql_log(ql_log_warn, vha, 0x70a1, "App not started\n");
1531         rval = -EIO;
1532         SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1533         goto done;
1534     }
1535 
1536     /* silent unaligned access warning */
1537     portid.b.domain = sa_frame.port_id.b.domain;
1538     portid.b.area   = sa_frame.port_id.b.area;
1539     portid.b.al_pa  = sa_frame.port_id.b.al_pa;
1540 
1541     fcport = qla2x00_find_fcport_by_pid(vha, &portid);
1542     if (fcport) {
1543         found = 1;
1544         if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY)
1545             fcport->edif.tx_bytes = 0;
1546         if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_RX_KEY)
1547             fcport->edif.rx_bytes = 0;
1548     }
1549 
1550     if (!found) {
1551         ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n",
1552             sa_frame.port_id.b24);
1553         rval = -EINVAL;
1554         SET_DID_STATUS(bsg_reply->result, DID_TARGET_FAILURE);
1555         goto done;
1556     }
1557 
1558     /* make sure the nport_handle is valid */
1559     if (fcport->loop_id == FC_NO_LOOP_ID) {
1560         ql_dbg(ql_dbg_edif, vha, 0x70e1,
1561             "%s: %8phN lid=FC_NO_LOOP_ID, spi: 0x%x, DS %d, returning NO_CONNECT\n",
1562             __func__, fcport->port_name, sa_frame.spi,
1563             fcport->disc_state);
1564         rval = -EINVAL;
1565         SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT);
1566         goto done;
1567     }
1568 
1569     /* allocate and queue an sa_ctl */
1570     result = qla24xx_check_sadb_avail_slot(bsg_job, fcport, &sa_frame);
1571 
1572     /* failure of bsg */
1573     if (result == INVALID_EDIF_SA_INDEX) {
1574         ql_dbg(ql_dbg_edif, vha, 0x70e1,
1575             "%s: %8phN, skipping update.\n",
1576             __func__, fcport->port_name);
1577         rval = -EINVAL;
1578         SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1579         goto done;
1580 
1581     /* rx delete failure */
1582     } else if (result == RX_DELETE_NO_EDIF_SA_INDEX) {
1583         ql_dbg(ql_dbg_edif, vha, 0x70e1,
1584             "%s: %8phN, skipping rx delete.\n",
1585             __func__, fcport->port_name);
1586         SET_DID_STATUS(bsg_reply->result, DID_OK);
1587         goto done;
1588     }
1589 
1590     ql_dbg(ql_dbg_edif, vha, 0x70e1,
1591         "%s: %8phN, sa_index in sa_frame: %d flags %xh\n",
1592         __func__, fcport->port_name, sa_frame.fast_sa_index,
1593         sa_frame.flags);
1594 
1595     /* looking for rx index and delete */
1596     if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
1597         (sa_frame.flags & SAU_FLG_INV)) {
1598         uint16_t nport_handle = fcport->loop_id;
1599         uint16_t sa_index = sa_frame.fast_sa_index;
1600 
1601         /*
1602          * make sure we have an existing rx key, otherwise just process
1603          * this as a straight delete just like TX
1604          * This is NOT a normal case, it indicates an error recovery or key cleanup
1605          * by the ipsec code above us.
1606          */
1607         edif_entry = qla_edif_list_find_sa_index(fcport, fcport->loop_id);
1608         if (!edif_entry) {
1609             ql_dbg(ql_dbg_edif, vha, 0x911d,
1610                 "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing delete for sa_index 0x%x\n",
1611                 __func__, fcport->loop_id, sa_index);
1612             goto force_rx_delete;
1613         }
1614 
1615         /*
1616          * if we have a forced delete for rx, remove the sa_index from the edif list
1617          * and proceed with normal delete.  The rx delay timer should not be running
1618          */
1619         if ((sa_frame.flags & SAU_FLG_FORCE_DELETE) == SAU_FLG_FORCE_DELETE) {
1620             qla_edif_list_delete_sa_index(fcport, edif_entry);
1621             ql_dbg(ql_dbg_edif, vha, 0x911d,
1622                 "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, forcing DELETE\n",
1623                 __func__, fcport->loop_id, sa_index);
1624             kfree(edif_entry);
1625             goto force_rx_delete;
1626         }
1627 
1628         /*
1629          * delayed rx delete
1630          *
1631          * if delete_sa_index is not invalid then there is already
1632          * a delayed index in progress, return bsg bad status
1633          */
1634         if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) {
1635             struct edif_sa_ctl *sa_ctl;
1636 
1637             ql_dbg(ql_dbg_edif, vha, 0x911d,
1638                 "%s: delete for lid 0x%x, delete_sa_index %d is pending\n",
1639                 __func__, edif_entry->handle, edif_entry->delete_sa_index);
1640 
1641             /* free up the sa_ctl that was allocated with the sa_index */
1642             sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, sa_index,
1643                 (sa_frame.flags & SAU_FLG_TX));
1644             if (sa_ctl) {
1645                 ql_dbg(ql_dbg_edif, vha, 0x3063,
1646                     "%s: freeing sa_ctl for index %d\n",
1647                     __func__, sa_ctl->index);
1648                 qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index);
1649             }
1650 
1651             /* release the sa_index */
1652             ql_dbg(ql_dbg_edif, vha, 0x3063,
1653                 "%s: freeing sa_index %d, nph: 0x%x\n",
1654                 __func__, sa_index, nport_handle);
1655             qla_edif_sadb_delete_sa_index(fcport, nport_handle, sa_index);
1656 
1657             rval = -EINVAL;
1658             SET_DID_STATUS(bsg_reply->result, DID_ERROR);
1659             goto done;
1660         }
1661 
1662         fcport->edif.rekey_cnt++;
1663 
1664         /* configure and start the rx delay timer */
1665         edif_entry->fcport = fcport;
1666         edif_entry->timer.expires = jiffies + RX_DELAY_DELETE_TIMEOUT * HZ;
1667 
1668         ql_dbg(ql_dbg_edif, vha, 0x911d,
1669             "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n",
1670             __func__, edif_entry, sa_index, nport_handle);
1671 
1672         /*
1673          * Start the timer when we queue the delayed rx delete.
1674          * This is an activity timer that goes off if we have not
1675          * received packets with the new sa_index
1676          */
1677         add_timer(&edif_entry->timer);
1678 
1679         /*
1680          * sa_delete for rx key with an active rx key including this one
1681          * add the delete rx sa index to the hash so we can look for it
1682          * in the rsp queue.  Do this after making any changes to the
1683          * edif_entry as part of the rx delete.
1684          */
1685 
1686         ql_dbg(ql_dbg_edif, vha, 0x911d,
1687             "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n",
1688             __func__, sa_index, nport_handle, bsg_job);
1689 
1690         edif_entry->delete_sa_index = sa_index;
1691 
1692         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1693         bsg_reply->result = DID_OK << 16;
1694 
1695         goto done;
1696 
1697     /*
1698      * rx index and update
1699      * add the index to the list and continue with normal update
1700      */
1701     } else if (((sa_frame.flags & SAU_FLG_TX) == 0) &&
1702         ((sa_frame.flags & SAU_FLG_INV) == 0)) {
1703         /* sa_update for rx key */
1704         uint32_t nport_handle = fcport->loop_id;
1705         uint16_t sa_index = sa_frame.fast_sa_index;
1706         int result;
1707 
1708         /*
1709          * add the update rx sa index to the hash so we can look for it
1710          * in the rsp queue and continue normally
1711          */
1712 
1713         ql_dbg(ql_dbg_edif, vha, 0x911d,
1714             "%s:  adding update sa_index %d, lid 0x%x to edif_list\n",
1715             __func__, sa_index, nport_handle);
1716 
1717         result = qla_edif_list_add_sa_update_index(fcport, sa_index,
1718             nport_handle);
1719         if (result) {
1720             ql_dbg(ql_dbg_edif, vha, 0x911d,
1721                 "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n",
1722                 __func__, sa_index, nport_handle);
1723         }
1724     }
1725     if (sa_frame.flags & SAU_FLG_GMAC_MODE)
1726         fcport->edif.aes_gmac = 1;
1727     else
1728         fcport->edif.aes_gmac = 0;
1729 
1730 force_rx_delete:
1731     /*
1732      * sa_update for both rx and tx keys, sa_delete for tx key
1733      * immediately process the request
1734      */
1735     sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1736     if (!sp) {
1737         rval = -ENOMEM;
1738         SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
1739         goto done;
1740     }
1741 
1742     sp->type = SRB_SA_UPDATE;
1743     sp->name = "bsg_sa_update";
1744     sp->u.bsg_job = bsg_job;
1745     /* sp->free = qla2x00_bsg_sp_free; */
1746     sp->free = qla2x00_rel_sp;
1747     sp->done = qla2x00_bsg_job_done;
1748     iocb_cmd = &sp->u.iocb_cmd;
1749     iocb_cmd->u.sa_update.sa_frame  = sa_frame;
1750     cnt = 0;
1751 retry:
1752     rval = qla2x00_start_sp(sp);
1753     switch (rval) {
1754     case QLA_SUCCESS:
1755         break;
1756     case EAGAIN:
1757         msleep(EDIF_MSLEEP_INTERVAL);
1758         cnt++;
1759         if (cnt < EDIF_RETRY_COUNT)
1760             goto retry;
1761 
1762         fallthrough;
1763     default:
1764         ql_log(ql_dbg_edif, vha, 0x70e3,
1765                "%s qla2x00_start_sp failed=%d.\n",
1766                __func__, rval);
1767 
1768         qla2x00_rel_sp(sp);
1769         rval = -EIO;
1770         SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
1771         goto done;
1772     }
1773 
1774     ql_dbg(ql_dbg_edif, vha, 0x911d,
1775         "%s:  %s sent, hdl=%x, portid=%06x.\n",
1776         __func__, sp->name, sp->handle, fcport->d_id.b24);
1777 
1778     fcport->edif.rekey_cnt++;
1779     bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1780     SET_DID_STATUS(bsg_reply->result, DID_OK);
1781 
1782     return 0;
1783 
1784 /*
1785  * send back error status
1786  */
1787 done:
1788     bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1789     ql_dbg(ql_dbg_edif, vha, 0x911d,
1790         "%s:status: FAIL, result: 0x%x, bsg ptr done %p\n",
1791         __func__, bsg_reply->result, bsg_job);
1792     bsg_job_done(bsg_job, bsg_reply->result,
1793         bsg_reply->reply_payload_rcv_len);
1794 
1795     return 0;
1796 }
1797 
1798 static void
1799 qla_enode_free(scsi_qla_host_t *vha, struct enode *node)
1800 {
1801     node->ntype = N_UNDEF;
1802     kfree(node);
1803 }
1804 
1805 /**
1806  * qla_enode_init - initialize enode structs & lock
1807  * @vha: host adapter pointer
1808  *
1809  * should only be called when driver attaching
1810  */
1811 void
1812 qla_enode_init(scsi_qla_host_t *vha)
1813 {
1814     struct  qla_hw_data *ha = vha->hw;
1815     char    name[32];
1816 
1817     if (vha->pur_cinfo.enode_flags == ENODE_ACTIVE) {
1818         /* list still active - error */
1819         ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode still active\n",
1820             __func__);
1821         return;
1822     }
1823 
1824     /* initialize lock which protects pur_core & init list */
1825     spin_lock_init(&vha->pur_cinfo.pur_lock);
1826     INIT_LIST_HEAD(&vha->pur_cinfo.head);
1827 
1828     snprintf(name, sizeof(name), "%s_%d_purex", QLA2XXX_DRIVER_NAME,
1829         ha->pdev->device);
1830 }
1831 
1832 /**
1833  * qla_enode_stop - stop and clear and enode data
1834  * @vha: host adapter pointer
1835  *
1836  * called when app notified it is exiting
1837  */
1838 void
1839 qla_enode_stop(scsi_qla_host_t *vha)
1840 {
1841     unsigned long flags;
1842     struct enode *node, *q;
1843 
1844     if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
1845         /* doorbell list not enabled */
1846         ql_dbg(ql_dbg_edif, vha, 0x09102,
1847             "%s enode not active\n", __func__);
1848         return;
1849     }
1850 
1851     /* grab lock so list doesn't move */
1852     spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1853 
1854     vha->pur_cinfo.enode_flags &= ~ENODE_ACTIVE; /* mark it not active */
1855 
1856     /* hopefully this is a null list at this point */
1857     list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) {
1858         ql_dbg(ql_dbg_edif, vha, 0x910f,
1859             "%s freeing enode type=%x, cnt=%x\n", __func__, node->ntype,
1860             node->dinfo.nodecnt);
1861         list_del_init(&node->list);
1862         qla_enode_free(vha, node);
1863     }
1864     spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1865 }
1866 
1867 static void qla_enode_clear(scsi_qla_host_t *vha, port_id_t portid)
1868 {
1869     unsigned    long flags;
1870     struct enode    *e, *tmp;
1871     struct purexevent   *purex;
1872     LIST_HEAD(enode_list);
1873 
1874     if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) {
1875         ql_dbg(ql_dbg_edif, vha, 0x09102,
1876                "%s enode not active\n", __func__);
1877         return;
1878     }
1879     spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1880     list_for_each_entry_safe(e, tmp, &vha->pur_cinfo.head, list) {
1881         purex = &e->u.purexinfo;
1882         if (purex->pur_info.pur_sid.b24 == portid.b24) {
1883             ql_dbg(ql_dbg_edif, vha, 0x911d,
1884                 "%s free ELS sid=%06x. xchg %x, nb=%xh\n",
1885                 __func__, portid.b24,
1886                 purex->pur_info.pur_rx_xchg_address,
1887                 purex->pur_info.pur_bytes_rcvd);
1888 
1889             list_del_init(&e->list);
1890             list_add_tail(&e->list, &enode_list);
1891         }
1892     }
1893     spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1894 
1895     list_for_each_entry_safe(e, tmp, &enode_list, list) {
1896         list_del_init(&e->list);
1897         qla_enode_free(vha, e);
1898     }
1899 }
1900 
1901 /*
1902  *  allocate enode struct and populate buffer
1903  *  returns: enode pointer with buffers
1904  *           NULL on error
1905  */
1906 static struct enode *
1907 qla_enode_alloc(scsi_qla_host_t *vha, uint32_t ntype)
1908 {
1909     struct enode        *node;
1910     struct purexevent   *purex;
1911 
1912     node = kzalloc(RX_ELS_SIZE, GFP_ATOMIC);
1913     if (!node)
1914         return NULL;
1915 
1916     purex = &node->u.purexinfo;
1917     purex->msgp = (u8 *)(node + 1);
1918     purex->msgp_len = ELS_MAX_PAYLOAD;
1919 
1920     node->ntype = ntype;
1921     INIT_LIST_HEAD(&node->list);
1922     return node;
1923 }
1924 
1925 static void
1926 qla_enode_add(scsi_qla_host_t *vha, struct enode *ptr)
1927 {
1928     unsigned long flags;
1929 
1930     ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x9109,
1931         "%s add enode for type=%x, cnt=%x\n",
1932         __func__, ptr->ntype, ptr->dinfo.nodecnt);
1933 
1934     spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1935     list_add_tail(&ptr->list, &vha->pur_cinfo.head);
1936     spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1937 
1938     return;
1939 }
1940 
1941 static struct enode *
1942 qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2)
1943 {
1944     struct enode        *node_rtn = NULL;
1945     struct enode        *list_node, *q;
1946     unsigned long       flags;
1947     uint32_t        sid;
1948     struct purexevent   *purex;
1949 
1950     /* secure the list from moving under us */
1951     spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags);
1952 
1953     list_for_each_entry_safe(list_node, q, &vha->pur_cinfo.head, list) {
1954 
1955         /* node type determines what p1 and p2 are */
1956         purex = &list_node->u.purexinfo;
1957         sid = p1;
1958 
1959         if (purex->pur_info.pur_sid.b24 == sid) {
1960             /* found it and its complete */
1961             node_rtn = list_node;
1962             list_del(&list_node->list);
1963             break;
1964         }
1965     }
1966 
1967     spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags);
1968 
1969     return node_rtn;
1970 }
1971 
1972 /**
1973  * qla_pur_get_pending - read/return authentication message sent
1974  *  from remote port
1975  * @vha: host adapter pointer
1976  * @fcport: session pointer
1977  * @bsg_job: user request where the message is copy to.
1978  */
1979 static int
1980 qla_pur_get_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
1981     struct bsg_job *bsg_job)
1982 {
1983     struct enode        *ptr;
1984     struct purexevent   *purex;
1985     struct qla_bsg_auth_els_reply *rpl =
1986         (struct qla_bsg_auth_els_reply *)bsg_job->reply;
1987 
1988     bsg_job->reply_len = sizeof(*rpl);
1989 
1990     ptr = qla_enode_find(vha, N_PUREX, fcport->d_id.b24, PUR_GET);
1991     if (!ptr) {
1992         ql_dbg(ql_dbg_edif, vha, 0x9111,
1993             "%s no enode data found for %8phN sid=%06x\n",
1994             __func__, fcport->port_name, fcport->d_id.b24);
1995         SET_DID_STATUS(rpl->r.result, DID_IMM_RETRY);
1996         return -EIO;
1997     }
1998 
1999     /*
2000      * enode is now off the linked list and is ours to deal with
2001      */
2002     purex = &ptr->u.purexinfo;
2003 
2004     /* Copy info back to caller */
2005     rpl->rx_xchg_address = purex->pur_info.pur_rx_xchg_address;
2006 
2007     SET_DID_STATUS(rpl->r.result, DID_OK);
2008     rpl->r.reply_payload_rcv_len =
2009         sg_pcopy_from_buffer(bsg_job->reply_payload.sg_list,
2010         bsg_job->reply_payload.sg_cnt, purex->msgp,
2011         purex->pur_info.pur_bytes_rcvd, 0);
2012 
2013     /* data copy / passback completed - destroy enode */
2014     qla_enode_free(vha, ptr);
2015 
2016     return 0;
2017 }
2018 
2019 /* it is assume qpair lock is held */
2020 static int
2021 qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp,
2022     struct qla_els_pt_arg *a)
2023 {
2024     struct els_entry_24xx *els_iocb;
2025 
2026     els_iocb = __qla2x00_alloc_iocbs(qp, NULL);
2027     if (!els_iocb) {
2028         ql_log(ql_log_warn, vha, 0x700c,
2029             "qla2x00_alloc_iocbs failed.\n");
2030         return QLA_FUNCTION_FAILED;
2031     }
2032 
2033     qla_els_pt_iocb(vha, els_iocb, a);
2034 
2035     ql_dbg(ql_dbg_edif, vha, 0x0183,
2036         "Sending ELS reject ox_id %04x s:%06x -> d:%06x\n",
2037         a->ox_id, a->sid.b24, a->did.b24);
2038     ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185,
2039         vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c));
2040     /* flush iocb to mem before notifying hw doorbell */
2041     wmb();
2042     qla2x00_start_iocbs(vha, qp->req);
2043     return 0;
2044 }
2045 
2046 void
2047 qla_edb_init(scsi_qla_host_t *vha)
2048 {
2049     if (DBELL_ACTIVE(vha)) {
2050         /* list already init'd - error */
2051         ql_dbg(ql_dbg_edif, vha, 0x09102,
2052             "edif db already initialized, cannot reinit\n");
2053         return;
2054     }
2055 
2056     /* initialize lock which protects doorbell & init list */
2057     spin_lock_init(&vha->e_dbell.db_lock);
2058     INIT_LIST_HEAD(&vha->e_dbell.head);
2059 }
2060 
2061 static void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid)
2062 {
2063     unsigned long flags;
2064     struct edb_node *e, *tmp;
2065     port_id_t sid;
2066     LIST_HEAD(edb_list);
2067 
2068     if (DBELL_INACTIVE(vha)) {
2069         /* doorbell list not enabled */
2070         ql_dbg(ql_dbg_edif, vha, 0x09102,
2071                "%s doorbell not enabled\n", __func__);
2072         return;
2073     }
2074 
2075     /* grab lock so list doesn't move */
2076     spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
2077     list_for_each_entry_safe(e, tmp, &vha->e_dbell.head, list) {
2078         switch (e->ntype) {
2079         case VND_CMD_AUTH_STATE_NEEDED:
2080         case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
2081             sid = e->u.plogi_did;
2082             break;
2083         case VND_CMD_AUTH_STATE_ELS_RCVD:
2084             sid = e->u.els_sid;
2085             break;
2086         case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
2087             /* app wants to see this  */
2088             continue;
2089         default:
2090             ql_log(ql_log_warn, vha, 0x09102,
2091                    "%s unknown node type: %x\n", __func__, e->ntype);
2092             sid.b24 = 0;
2093             break;
2094         }
2095         if (sid.b24 == portid.b24) {
2096             ql_dbg(ql_dbg_edif, vha, 0x910f,
2097                    "%s free doorbell event : node type = %x %p\n",
2098                    __func__, e->ntype, e);
2099             list_del_init(&e->list);
2100             list_add_tail(&e->list, &edb_list);
2101         }
2102     }
2103     spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
2104 
2105     list_for_each_entry_safe(e, tmp, &edb_list, list)
2106         qla_edb_node_free(vha, e);
2107 }
2108 
2109 /* function called when app is stopping */
2110 
2111 void
2112 qla_edb_stop(scsi_qla_host_t *vha)
2113 {
2114     unsigned long flags;
2115     struct edb_node *node, *q;
2116 
2117     if (DBELL_INACTIVE(vha)) {
2118         /* doorbell list not enabled */
2119         ql_dbg(ql_dbg_edif, vha, 0x09102,
2120             "%s doorbell not enabled\n", __func__);
2121         return;
2122     }
2123 
2124     /* grab lock so list doesn't move */
2125     spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
2126 
2127     vha->e_dbell.db_flags &= ~EDB_ACTIVE; /* mark it not active */
2128     /* hopefully this is a null list at this point */
2129     list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) {
2130         ql_dbg(ql_dbg_edif, vha, 0x910f,
2131             "%s freeing edb_node type=%x\n",
2132             __func__, node->ntype);
2133         qla_edb_node_free(vha, node);
2134     }
2135     spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
2136 
2137     qla_edif_dbell_bsg_done(vha);
2138 }
2139 
2140 static struct edb_node *
2141 qla_edb_node_alloc(scsi_qla_host_t *vha, uint32_t ntype)
2142 {
2143     struct edb_node *node;
2144 
2145     node = kzalloc(sizeof(*node), GFP_ATOMIC);
2146     if (!node) {
2147         /* couldn't get space */
2148         ql_dbg(ql_dbg_edif, vha, 0x9100,
2149             "edb node unable to be allocated\n");
2150         return NULL;
2151     }
2152 
2153     node->ntype = ntype;
2154     INIT_LIST_HEAD(&node->list);
2155     return node;
2156 }
2157 
2158 /* adds a already allocated enode to the linked list */
2159 static bool
2160 qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr)
2161 {
2162     unsigned long       flags;
2163 
2164     if (DBELL_INACTIVE(vha)) {
2165         /* doorbell list not enabled */
2166         ql_dbg(ql_dbg_edif, vha, 0x09102,
2167             "%s doorbell not enabled\n", __func__);
2168         return false;
2169     }
2170 
2171     spin_lock_irqsave(&vha->e_dbell.db_lock, flags);
2172     list_add_tail(&ptr->list, &vha->e_dbell.head);
2173     spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags);
2174 
2175     return true;
2176 }
2177 
2178 /* adds event to doorbell list */
2179 void
2180 qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype,
2181     uint32_t data, uint32_t data2, fc_port_t    *sfcport)
2182 {
2183     struct edb_node *edbnode;
2184     fc_port_t *fcport = sfcport;
2185     port_id_t id;
2186 
2187     if (!vha->hw->flags.edif_enabled) {
2188         /* edif not enabled */
2189         return;
2190     }
2191 
2192     if (DBELL_INACTIVE(vha)) {
2193         if (fcport)
2194             fcport->edif.auth_state = dbtype;
2195         /* doorbell list not enabled */
2196         ql_dbg(ql_dbg_edif, vha, 0x09102,
2197             "%s doorbell not enabled (type=%d\n", __func__, dbtype);
2198         return;
2199     }
2200 
2201     edbnode = qla_edb_node_alloc(vha, dbtype);
2202     if (!edbnode) {
2203         ql_dbg(ql_dbg_edif, vha, 0x09102,
2204             "%s unable to alloc db node\n", __func__);
2205         return;
2206     }
2207 
2208     if (!fcport) {
2209         id.b.domain = (data >> 16) & 0xff;
2210         id.b.area = (data >> 8) & 0xff;
2211         id.b.al_pa = data & 0xff;
2212         ql_dbg(ql_dbg_edif, vha, 0x09222,
2213             "%s: Arrived s_id: %06x\n", __func__,
2214             id.b24);
2215         fcport = qla2x00_find_fcport_by_pid(vha, &id);
2216         if (!fcport) {
2217             ql_dbg(ql_dbg_edif, vha, 0x09102,
2218                 "%s can't find fcport for sid= 0x%x - ignoring\n",
2219             __func__, id.b24);
2220             kfree(edbnode);
2221             return;
2222         }
2223     }
2224 
2225     /* populate the edb node */
2226     switch (dbtype) {
2227     case VND_CMD_AUTH_STATE_NEEDED:
2228     case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN:
2229         edbnode->u.plogi_did.b24 = fcport->d_id.b24;
2230         break;
2231     case VND_CMD_AUTH_STATE_ELS_RCVD:
2232         edbnode->u.els_sid.b24 = fcport->d_id.b24;
2233         break;
2234     case VND_CMD_AUTH_STATE_SAUPDATE_COMPL:
2235         edbnode->u.sa_aen.port_id = fcport->d_id;
2236         edbnode->u.sa_aen.status =  data;
2237         edbnode->u.sa_aen.key_type =  data2;
2238         edbnode->u.sa_aen.version = EDIF_VERSION1;
2239         break;
2240     default:
2241         ql_dbg(ql_dbg_edif, vha, 0x09102,
2242             "%s unknown type: %x\n", __func__, dbtype);
2243         kfree(edbnode);
2244         edbnode = NULL;
2245         break;
2246     }
2247 
2248     if (edbnode) {
2249         if (!qla_edb_node_add(vha, edbnode)) {
2250             ql_dbg(ql_dbg_edif, vha, 0x09102,
2251                 "%s unable to add dbnode\n", __func__);
2252             kfree(edbnode);
2253             return;
2254         }
2255         ql_dbg(ql_dbg_edif, vha, 0x09102,
2256             "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode);
2257         qla_edif_dbell_bsg_done(vha);
2258         if (fcport)
2259             fcport->edif.auth_state = dbtype;
2260     }
2261 }
2262 
2263 void
2264 qla_edif_timer(scsi_qla_host_t *vha)
2265 {
2266     struct qla_hw_data *ha = vha->hw;
2267 
2268     if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) {
2269         if (DBELL_INACTIVE(vha) &&
2270             ha->edif_post_stop_cnt_down) {
2271             ha->edif_post_stop_cnt_down--;
2272 
2273             /*
2274              * turn off auto 'Plogi Acc + secure=1' feature
2275              * Set Add FW option[3]
2276              * BIT_15, if.
2277              */
2278             if (ha->edif_post_stop_cnt_down == 0) {
2279                 ql_dbg(ql_dbg_async, vha, 0x911d,
2280                        "%s chip reset to turn off PLOGI ACC + secure\n",
2281                        __func__);
2282                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2283             }
2284         } else {
2285             ha->edif_post_stop_cnt_down = 60;
2286         }
2287     }
2288 
2289     if (vha->e_dbell.dbell_bsg_job && time_after_eq(jiffies, vha->e_dbell.bsg_expire))
2290         qla_edif_dbell_bsg_done(vha);
2291 }
2292 
2293 static void qla_noop_sp_done(srb_t *sp, int res)
2294 {
2295     sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2296     /* ref: INIT */
2297     kref_put(&sp->cmd_kref, qla2x00_sp_release);
2298 }
2299 
2300 /*
2301  * Called from work queue
2302  * build and send the sa_update iocb to delete an rx sa_index
2303  */
2304 int
2305 qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e)
2306 {
2307     srb_t *sp;
2308     fc_port_t   *fcport = NULL;
2309     struct srb_iocb *iocb_cmd = NULL;
2310     int rval = QLA_SUCCESS;
2311     struct  edif_sa_ctl *sa_ctl = e->u.sa_update.sa_ctl;
2312     uint16_t nport_handle = e->u.sa_update.nport_handle;
2313 
2314     ql_dbg(ql_dbg_edif, vha, 0x70e6,
2315         "%s: starting,  sa_ctl: %p\n", __func__, sa_ctl);
2316 
2317     if (!sa_ctl) {
2318         ql_dbg(ql_dbg_edif, vha, 0x70e6,
2319             "sa_ctl allocation failed\n");
2320         rval =  -ENOMEM;
2321         goto done;
2322     }
2323 
2324     fcport = sa_ctl->fcport;
2325 
2326     /* Alloc SRB structure */
2327     sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2328     if (!sp) {
2329         ql_dbg(ql_dbg_edif, vha, 0x70e6,
2330          "SRB allocation failed\n");
2331         rval = -ENOMEM;
2332         goto done;
2333     }
2334 
2335     fcport->flags |= FCF_ASYNC_SENT;
2336     iocb_cmd = &sp->u.iocb_cmd;
2337     iocb_cmd->u.sa_update.sa_ctl = sa_ctl;
2338 
2339     ql_dbg(ql_dbg_edif, vha, 0x3073,
2340         "Enter: SA REPL portid=%06x, sa_ctl %p, index %x, nport_handle: 0x%x\n",
2341         fcport->d_id.b24, sa_ctl, sa_ctl->index, nport_handle);
2342     /*
2343      * if this is a sadb cleanup delete, mark it so the isr can
2344      * take the correct action
2345      */
2346     if (sa_ctl->flags & EDIF_SA_CTL_FLG_CLEANUP_DEL) {
2347         /* mark this srb as a cleanup delete */
2348         sp->flags |= SRB_EDIF_CLEANUP_DELETE;
2349         ql_dbg(ql_dbg_edif, vha, 0x70e6,
2350             "%s: sp 0x%p flagged as cleanup delete\n", __func__, sp);
2351     }
2352 
2353     sp->type = SRB_SA_REPLACE;
2354     sp->name = "SA_REPLACE";
2355     sp->fcport = fcport;
2356     sp->free = qla2x00_rel_sp;
2357     sp->done = qla_noop_sp_done;
2358 
2359     rval = qla2x00_start_sp(sp);
2360 
2361     if (rval != QLA_SUCCESS) {
2362         goto done_free_sp;
2363     }
2364 
2365     return rval;
2366 done_free_sp:
2367     kref_put(&sp->cmd_kref, qla2x00_sp_release);
2368     fcport->flags &= ~FCF_ASYNC_SENT;
2369 done:
2370     fcport->flags &= ~FCF_ASYNC_ACTIVE;
2371     return rval;
2372 }
2373 
2374 void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
2375 {
2376     int itr = 0;
2377     struct  scsi_qla_host       *vha = sp->vha;
2378     struct  qla_sa_update_frame *sa_frame =
2379         &sp->u.iocb_cmd.u.sa_update.sa_frame;
2380     u8 flags = 0;
2381 
2382     switch (sa_frame->flags & (SAU_FLG_INV | SAU_FLG_TX)) {
2383     case 0:
2384         ql_dbg(ql_dbg_edif, vha, 0x911d,
2385             "%s: EDIF SA UPDATE RX IOCB  vha: 0x%p  index: %d\n",
2386             __func__, vha, sa_frame->fast_sa_index);
2387         break;
2388     case 1:
2389         ql_dbg(ql_dbg_edif, vha, 0x911d,
2390             "%s: EDIF SA DELETE RX IOCB  vha: 0x%p  index: %d\n",
2391             __func__, vha, sa_frame->fast_sa_index);
2392         flags |= SA_FLAG_INVALIDATE;
2393         break;
2394     case 2:
2395         ql_dbg(ql_dbg_edif, vha, 0x911d,
2396             "%s: EDIF SA UPDATE TX IOCB  vha: 0x%p  index: %d\n",
2397             __func__, vha, sa_frame->fast_sa_index);
2398         flags |= SA_FLAG_TX;
2399         break;
2400     case 3:
2401         ql_dbg(ql_dbg_edif, vha, 0x911d,
2402             "%s: EDIF SA DELETE TX IOCB  vha: 0x%p  index: %d\n",
2403             __func__, vha, sa_frame->fast_sa_index);
2404         flags |= SA_FLAG_TX | SA_FLAG_INVALIDATE;
2405         break;
2406     }
2407 
2408     sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
2409     sa_update_iocb->entry_count = 1;
2410     sa_update_iocb->sys_define = 0;
2411     sa_update_iocb->entry_status = 0;
2412     sa_update_iocb->handle = sp->handle;
2413     sa_update_iocb->u.nport_handle = cpu_to_le16(sp->fcport->loop_id);
2414     sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
2415     sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2416     sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
2417     sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2418 
2419     sa_update_iocb->flags = flags;
2420     sa_update_iocb->salt = cpu_to_le32(sa_frame->salt);
2421     sa_update_iocb->spi = cpu_to_le32(sa_frame->spi);
2422     sa_update_iocb->sa_index = cpu_to_le16(sa_frame->fast_sa_index);
2423 
2424     sa_update_iocb->sa_control |= SA_CNTL_ENC_FCSP;
2425     if (sp->fcport->edif.aes_gmac)
2426         sa_update_iocb->sa_control |= SA_CNTL_AES_GMAC;
2427 
2428     if (sa_frame->flags & SAU_FLG_KEY256) {
2429         sa_update_iocb->sa_control |= SA_CNTL_KEY256;
2430         for (itr = 0; itr < 32; itr++)
2431             sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
2432     } else {
2433         sa_update_iocb->sa_control |= SA_CNTL_KEY128;
2434         for (itr = 0; itr < 16; itr++)
2435             sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr];
2436     }
2437 
2438     ql_dbg(ql_dbg_edif, vha, 0x921d,
2439         "%s SAU Port ID = %02x%02x%02x, flags=%xh, index=%u, ctl=%xh, SPI 0x%x flags 0x%x hdl=%x gmac %d\n",
2440         __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
2441         sa_update_iocb->port_id[0], sa_update_iocb->flags, sa_update_iocb->sa_index,
2442         sa_update_iocb->sa_control, sa_update_iocb->spi, sa_frame->flags, sp->handle,
2443         sp->fcport->edif.aes_gmac);
2444 
2445     if (sa_frame->flags & SAU_FLG_TX)
2446         sp->fcport->edif.tx_sa_pending = 1;
2447     else
2448         sp->fcport->edif.rx_sa_pending = 1;
2449 
2450     sp->fcport->vha->qla_stats.control_requests++;
2451 }
2452 
2453 void
2454 qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb)
2455 {
2456     struct  scsi_qla_host       *vha = sp->vha;
2457     struct srb_iocb *srb_iocb = &sp->u.iocb_cmd;
2458     struct  edif_sa_ctl     *sa_ctl = srb_iocb->u.sa_update.sa_ctl;
2459     uint16_t nport_handle = sp->fcport->loop_id;
2460 
2461     sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE;
2462     sa_update_iocb->entry_count = 1;
2463     sa_update_iocb->sys_define = 0;
2464     sa_update_iocb->entry_status = 0;
2465     sa_update_iocb->handle = sp->handle;
2466 
2467     sa_update_iocb->u.nport_handle = cpu_to_le16(nport_handle);
2468 
2469     sa_update_iocb->vp_index = sp->fcport->vha->vp_idx;
2470     sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2471     sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area;
2472     sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2473 
2474     /* Invalidate the index. salt, spi, control & key are ignore */
2475     sa_update_iocb->flags = SA_FLAG_INVALIDATE;
2476     sa_update_iocb->salt = 0;
2477     sa_update_iocb->spi = 0;
2478     sa_update_iocb->sa_index = cpu_to_le16(sa_ctl->index);
2479     sa_update_iocb->sa_control = 0;
2480 
2481     ql_dbg(ql_dbg_edif, vha, 0x921d,
2482         "%s SAU DELETE RX Port ID = %02x:%02x:%02x, lid %d flags=%xh, index=%u, hdl=%x\n",
2483         __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1],
2484         sa_update_iocb->port_id[0], nport_handle, sa_update_iocb->flags,
2485         sa_update_iocb->sa_index, sp->handle);
2486 
2487     sp->fcport->vha->qla_stats.control_requests++;
2488 }
2489 
2490 void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp)
2491 {
2492     struct purex_entry_24xx *p = *pkt;
2493     struct enode        *ptr;
2494     int     sid;
2495     u16 totlen;
2496     struct purexevent   *purex;
2497     struct scsi_qla_host *host = NULL;
2498     int rc;
2499     struct fc_port *fcport;
2500     struct qla_els_pt_arg a;
2501     be_id_t beid;
2502 
2503     memset(&a, 0, sizeof(a));
2504 
2505     a.els_opcode = ELS_AUTH_ELS;
2506     a.nport_handle = p->nport_handle;
2507     a.rx_xchg_address = p->rx_xchg_addr;
2508     a.did.b.domain = p->s_id[2];
2509     a.did.b.area   = p->s_id[1];
2510     a.did.b.al_pa  = p->s_id[0];
2511     a.tx_byte_count = a.tx_len = sizeof(struct fc_els_ls_rjt);
2512     a.tx_addr = vha->hw->elsrej.cdma;
2513     a.vp_idx = vha->vp_idx;
2514     a.control_flags = EPD_ELS_RJT;
2515     a.ox_id = le16_to_cpu(p->ox_id);
2516 
2517     sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16);
2518 
2519     totlen = (le16_to_cpu(p->frame_size) & 0x0fff) - PURX_ELS_HEADER_SIZE;
2520     if (le16_to_cpu(p->status_flags) & 0x8000) {
2521         totlen = le16_to_cpu(p->trunc_frame_size);
2522         qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2523         __qla_consume_iocb(vha, pkt, rsp);
2524         return;
2525     }
2526 
2527     if (totlen > ELS_MAX_PAYLOAD) {
2528         ql_dbg(ql_dbg_edif, vha, 0x0910d,
2529             "%s WARNING: verbose ELS frame received (totlen=%x)\n",
2530             __func__, totlen);
2531         qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2532         __qla_consume_iocb(vha, pkt, rsp);
2533         return;
2534     }
2535 
2536     if (!vha->hw->flags.edif_enabled) {
2537         /* edif support not enabled */
2538         ql_dbg(ql_dbg_edif, vha, 0x910e, "%s edif not enabled\n",
2539             __func__);
2540         qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2541         __qla_consume_iocb(vha, pkt, rsp);
2542         return;
2543     }
2544 
2545     ptr = qla_enode_alloc(vha, N_PUREX);
2546     if (!ptr) {
2547         ql_dbg(ql_dbg_edif, vha, 0x09109,
2548             "WARNING: enode alloc failed for sid=%x\n",
2549             sid);
2550         qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2551         __qla_consume_iocb(vha, pkt, rsp);
2552         return;
2553     }
2554 
2555     purex = &ptr->u.purexinfo;
2556     purex->pur_info.pur_sid = a.did;
2557     purex->pur_info.pur_bytes_rcvd = totlen;
2558     purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr);
2559     purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle);
2560     purex->pur_info.pur_did.b.domain =  p->d_id[2];
2561     purex->pur_info.pur_did.b.area =  p->d_id[1];
2562     purex->pur_info.pur_did.b.al_pa =  p->d_id[0];
2563     purex->pur_info.vp_idx = p->vp_idx;
2564 
2565     a.sid = purex->pur_info.pur_did;
2566 
2567     rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp,
2568         purex->msgp_len);
2569     if (rc) {
2570         qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2571         qla_enode_free(vha, ptr);
2572         return;
2573     }
2574     beid.al_pa = purex->pur_info.pur_did.b.al_pa;
2575     beid.area   = purex->pur_info.pur_did.b.area;
2576     beid.domain = purex->pur_info.pur_did.b.domain;
2577     host = qla_find_host_by_d_id(vha, beid);
2578     if (!host) {
2579         ql_log(ql_log_fatal, vha, 0x508b,
2580             "%s Drop ELS due to unable to find host %06x\n",
2581             __func__, purex->pur_info.pur_did.b24);
2582 
2583         qla_els_reject_iocb(vha, (*rsp)->qpair, &a);
2584         qla_enode_free(vha, ptr);
2585         return;
2586     }
2587 
2588     fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid);
2589 
2590     if (DBELL_INACTIVE(vha)) {
2591         ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n",
2592             __func__, host->e_dbell.db_flags,
2593             fcport ? fcport->d_id.b24 : 0);
2594 
2595         qla_els_reject_iocb(host, (*rsp)->qpair, &a);
2596         qla_enode_free(host, ptr);
2597         return;
2598     }
2599 
2600     if (fcport && EDIF_SESSION_DOWN(fcport)) {
2601         ql_dbg(ql_dbg_edif, host, 0x13b6,
2602             "%s terminate exchange. Send logo to 0x%x\n",
2603             __func__, a.did.b24);
2604 
2605         a.tx_byte_count = a.tx_len = 0;
2606         a.tx_addr = 0;
2607         a.control_flags = EPD_RX_XCHG;  /* EPD_RX_XCHG = terminate cmd */
2608         qla_els_reject_iocb(host, (*rsp)->qpair, &a);
2609         qla_enode_free(host, ptr);
2610         /* send logo to let remote port knows to tear down session */
2611         fcport->send_els_logo = 1;
2612         qlt_schedule_sess_for_deletion(fcport);
2613         return;
2614     }
2615 
2616     /* add the local enode to the list */
2617     qla_enode_add(host, ptr);
2618 
2619     ql_dbg(ql_dbg_edif, host, 0x0910c,
2620         "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n",
2621         __func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24,
2622         purex->pur_info.pur_did.b24, purex->pur_info.pur_rx_xchg_address);
2623 
2624     qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL);
2625 }
2626 
2627 static uint16_t  qla_edif_get_sa_index_from_freepool(fc_port_t *fcport, int dir)
2628 {
2629     struct scsi_qla_host *vha = fcport->vha;
2630     struct qla_hw_data *ha = vha->hw;
2631     void *sa_id_map;
2632     unsigned long flags = 0;
2633     u16 sa_index;
2634 
2635     ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
2636         "%s: entry\n", __func__);
2637 
2638     if (dir)
2639         sa_id_map = ha->edif_tx_sa_id_map;
2640     else
2641         sa_id_map = ha->edif_rx_sa_id_map;
2642 
2643     spin_lock_irqsave(&ha->sadb_fp_lock, flags);
2644     sa_index = find_first_zero_bit(sa_id_map, EDIF_NUM_SA_INDEX);
2645     if (sa_index >=  EDIF_NUM_SA_INDEX) {
2646         spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
2647         return INVALID_EDIF_SA_INDEX;
2648     }
2649     set_bit(sa_index, sa_id_map);
2650     spin_unlock_irqrestore(&ha->sadb_fp_lock, flags);
2651 
2652     if (dir)
2653         sa_index += EDIF_TX_SA_INDEX_BASE;
2654 
2655     ql_dbg(ql_dbg_edif, vha, 0x3063,
2656         "%s: index retrieved from free pool %d\n", __func__, sa_index);
2657 
2658     return sa_index;
2659 }
2660 
2661 /* find an sadb entry for an nport_handle */
2662 static struct edif_sa_index_entry *
2663 qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle,
2664         struct list_head *sa_list)
2665 {
2666     struct edif_sa_index_entry *entry;
2667     struct edif_sa_index_entry *tentry;
2668     struct list_head *indx_list = sa_list;
2669 
2670     list_for_each_entry_safe(entry, tentry, indx_list, next) {
2671         if (entry->handle == nport_handle)
2672             return entry;
2673     }
2674     return NULL;
2675 }
2676 
2677 /* remove an sa_index from the nport_handle and return it to the free pool */
2678 static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle,
2679         uint16_t sa_index)
2680 {
2681     struct edif_sa_index_entry *entry;
2682     struct list_head *sa_list;
2683     int dir = (sa_index < EDIF_TX_SA_INDEX_BASE) ? 0 : 1;
2684     int slot = 0;
2685     int free_slot_count = 0;
2686     scsi_qla_host_t *vha = fcport->vha;
2687     struct qla_hw_data *ha = vha->hw;
2688     unsigned long flags = 0;
2689 
2690     ql_dbg(ql_dbg_edif, vha, 0x3063,
2691         "%s: entry\n", __func__);
2692 
2693     if (dir)
2694         sa_list = &ha->sadb_tx_index_list;
2695     else
2696         sa_list = &ha->sadb_rx_index_list;
2697 
2698     entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
2699     if (!entry) {
2700         ql_dbg(ql_dbg_edif, vha, 0x3063,
2701             "%s: no entry found for nport_handle 0x%x\n",
2702             __func__, nport_handle);
2703         return -1;
2704     }
2705 
2706     spin_lock_irqsave(&ha->sadb_lock, flags);
2707     /*
2708      * each tx/rx direction has up to 2 sa indexes/slots. 1 slot for in flight traffic
2709      * the other is use at re-key time.
2710      */
2711     for (slot = 0; slot < 2; slot++) {
2712         if (entry->sa_pair[slot].sa_index == sa_index) {
2713             entry->sa_pair[slot].sa_index = INVALID_EDIF_SA_INDEX;
2714             entry->sa_pair[slot].spi = 0;
2715             free_slot_count++;
2716             qla_edif_add_sa_index_to_freepool(fcport, dir, sa_index);
2717         } else if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
2718             free_slot_count++;
2719         }
2720     }
2721 
2722     if (free_slot_count == 2) {
2723         list_del(&entry->next);
2724         kfree(entry);
2725     }
2726     spin_unlock_irqrestore(&ha->sadb_lock, flags);
2727 
2728     ql_dbg(ql_dbg_edif, vha, 0x3063,
2729         "%s: sa_index %d removed, free_slot_count: %d\n",
2730         __func__, sa_index, free_slot_count);
2731 
2732     return 0;
2733 }
2734 
2735 void
2736 qla28xx_sa_update_iocb_entry(scsi_qla_host_t *v, struct req_que *req,
2737     struct sa_update_28xx *pkt)
2738 {
2739     const char *func = "SA_UPDATE_RESPONSE_IOCB";
2740     srb_t *sp;
2741     struct edif_sa_ctl *sa_ctl;
2742     int old_sa_deleted = 1;
2743     uint16_t nport_handle;
2744     struct scsi_qla_host *vha;
2745 
2746     sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
2747 
2748     if (!sp) {
2749         ql_dbg(ql_dbg_edif, v, 0x3063,
2750             "%s: no sp found for pkt\n", __func__);
2751         return;
2752     }
2753     /* use sp->vha due to npiv */
2754     vha = sp->vha;
2755 
2756     switch (pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) {
2757     case 0:
2758         ql_dbg(ql_dbg_edif, vha, 0x3063,
2759             "%s: EDIF SA UPDATE RX IOCB  vha: 0x%p  index: %d\n",
2760             __func__, vha, pkt->sa_index);
2761         break;
2762     case 1:
2763         ql_dbg(ql_dbg_edif, vha, 0x3063,
2764             "%s: EDIF SA DELETE RX IOCB  vha: 0x%p  index: %d\n",
2765             __func__, vha, pkt->sa_index);
2766         break;
2767     case 2:
2768         ql_dbg(ql_dbg_edif, vha, 0x3063,
2769             "%s: EDIF SA UPDATE TX IOCB  vha: 0x%p  index: %d\n",
2770             __func__, vha, pkt->sa_index);
2771         break;
2772     case 3:
2773         ql_dbg(ql_dbg_edif, vha, 0x3063,
2774             "%s: EDIF SA DELETE TX IOCB  vha: 0x%p  index: %d\n",
2775             __func__, vha, pkt->sa_index);
2776         break;
2777     }
2778 
2779     /*
2780      * dig the nport handle out of the iocb, fcport->loop_id can not be trusted
2781      * to be correct during cleanup sa_update iocbs.
2782      */
2783     nport_handle = sp->fcport->loop_id;
2784 
2785     ql_dbg(ql_dbg_edif, vha, 0x3063,
2786         "%s: %8phN comp status=%x old_sa_info=%x new_sa_info=%x lid %d, index=0x%x pkt_flags %xh hdl=%x\n",
2787         __func__, sp->fcport->port_name, pkt->u.comp_sts, pkt->old_sa_info, pkt->new_sa_info,
2788         nport_handle, pkt->sa_index, pkt->flags, sp->handle);
2789 
2790     /* if rx delete, remove the timer */
2791     if ((pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) ==  SA_FLAG_INVALIDATE) {
2792         struct edif_list_entry *edif_entry;
2793 
2794         sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2795 
2796         edif_entry = qla_edif_list_find_sa_index(sp->fcport, nport_handle);
2797         if (edif_entry) {
2798             ql_dbg(ql_dbg_edif, vha, 0x5033,
2799                 "%s: removing edif_entry %p, new sa_index: 0x%x\n",
2800                 __func__, edif_entry, pkt->sa_index);
2801             qla_edif_list_delete_sa_index(sp->fcport, edif_entry);
2802             del_timer(&edif_entry->timer);
2803 
2804             ql_dbg(ql_dbg_edif, vha, 0x5033,
2805                 "%s: releasing edif_entry %p, new sa_index: 0x%x\n",
2806                 __func__, edif_entry, pkt->sa_index);
2807 
2808             kfree(edif_entry);
2809         }
2810     }
2811 
2812     /*
2813      * if this is a delete for either tx or rx, make sure it succeeded.
2814      * The new_sa_info field should be 0xffff on success
2815      */
2816     if (pkt->flags & SA_FLAG_INVALIDATE)
2817         old_sa_deleted = (le16_to_cpu(pkt->new_sa_info) == 0xffff) ? 1 : 0;
2818 
2819     /* Process update and delete the same way */
2820 
2821     /* If this is an sadb cleanup delete, bypass sending events to IPSEC */
2822     if (sp->flags & SRB_EDIF_CLEANUP_DELETE) {
2823         sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2824         ql_dbg(ql_dbg_edif, vha, 0x3063,
2825             "%s: nph 0x%x, sa_index %d removed from fw\n",
2826             __func__, sp->fcport->loop_id, pkt->sa_index);
2827 
2828     } else if ((pkt->entry_status == 0) && (pkt->u.comp_sts == 0) &&
2829         old_sa_deleted) {
2830         /*
2831          * Note: Wa are only keeping track of latest SA,
2832          * so we know when we can start enableing encryption per I/O.
2833          * If all SA's get deleted, let FW reject the IOCB.
2834 
2835          * TODO: edif: don't set enabled here I think
2836          * TODO: edif: prli complete is where it should be set
2837          */
2838         ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
2839             "SA(%x)updated for s_id %02x%02x%02x\n",
2840             pkt->new_sa_info,
2841             pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
2842         sp->fcport->edif.enable = 1;
2843         if (pkt->flags & SA_FLAG_TX) {
2844             sp->fcport->edif.tx_sa_set = 1;
2845             sp->fcport->edif.tx_sa_pending = 0;
2846             qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2847                 QL_VND_SA_STAT_SUCCESS,
2848                 QL_VND_TX_SA_KEY, sp->fcport);
2849         } else {
2850             sp->fcport->edif.rx_sa_set = 1;
2851             sp->fcport->edif.rx_sa_pending = 0;
2852             qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2853                 QL_VND_SA_STAT_SUCCESS,
2854                 QL_VND_RX_SA_KEY, sp->fcport);
2855         }
2856     } else {
2857         ql_dbg(ql_dbg_edif, vha, 0x3063,
2858             "%s: %8phN SA update FAILED: sa_index: %d, new_sa_info %d, %02x%02x%02x\n",
2859             __func__, sp->fcport->port_name, pkt->sa_index, pkt->new_sa_info,
2860             pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]);
2861 
2862         if (pkt->flags & SA_FLAG_TX)
2863             qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2864                 (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
2865                 QL_VND_TX_SA_KEY, sp->fcport);
2866         else
2867             qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL,
2868                 (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED,
2869                 QL_VND_RX_SA_KEY, sp->fcport);
2870     }
2871 
2872     /* for delete, release sa_ctl, sa_index */
2873     if (pkt->flags & SA_FLAG_INVALIDATE) {
2874         /* release the sa_ctl */
2875         sa_ctl = qla_edif_find_sa_ctl_by_index(sp->fcport,
2876             le16_to_cpu(pkt->sa_index), (pkt->flags & SA_FLAG_TX));
2877         if (sa_ctl &&
2878             qla_edif_find_sa_ctl_by_index(sp->fcport, sa_ctl->index,
2879             (pkt->flags & SA_FLAG_TX)) != NULL) {
2880             ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063,
2881                 "%s: freeing sa_ctl for index %d\n",
2882                 __func__, sa_ctl->index);
2883             qla_edif_free_sa_ctl(sp->fcport, sa_ctl, sa_ctl->index);
2884         } else {
2885             ql_dbg(ql_dbg_edif, vha, 0x3063,
2886                 "%s: sa_ctl NOT freed, sa_ctl: %p\n",
2887                 __func__, sa_ctl);
2888         }
2889         ql_dbg(ql_dbg_edif, vha, 0x3063,
2890             "%s: freeing sa_index %d, nph: 0x%x\n",
2891             __func__, le16_to_cpu(pkt->sa_index), nport_handle);
2892         qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
2893             le16_to_cpu(pkt->sa_index));
2894     /*
2895      * check for a failed sa_update and remove
2896      * the sadb entry.
2897      */
2898     } else if (pkt->u.comp_sts) {
2899         ql_dbg(ql_dbg_edif, vha, 0x3063,
2900             "%s: freeing sa_index %d, nph: 0x%x\n",
2901             __func__, pkt->sa_index, nport_handle);
2902         qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle,
2903             le16_to_cpu(pkt->sa_index));
2904         switch (le16_to_cpu(pkt->u.comp_sts)) {
2905         case CS_PORT_EDIF_UNAVAIL:
2906         case CS_PORT_EDIF_LOGOUT:
2907             qlt_schedule_sess_for_deletion(sp->fcport);
2908             break;
2909         default:
2910             break;
2911         }
2912     }
2913 
2914     sp->done(sp, 0);
2915 }
2916 
2917 /**
2918  * qla28xx_start_scsi_edif() - Send a SCSI type 6 command to the ISP
2919  * @sp: command to send to the ISP
2920  *
2921  * Return: non-zero if a failure occurred, else zero.
2922  */
2923 int
2924 qla28xx_start_scsi_edif(srb_t *sp)
2925 {
2926     int             nseg;
2927     unsigned long   flags;
2928     struct scsi_cmnd *cmd;
2929     uint32_t        *clr_ptr;
2930     uint32_t        index, i;
2931     uint32_t        handle;
2932     uint16_t        cnt;
2933     int16_t        req_cnt;
2934     uint16_t        tot_dsds;
2935     __be32 *fcp_dl;
2936     uint8_t additional_cdb_len;
2937     struct ct6_dsd *ctx;
2938     struct scsi_qla_host *vha = sp->vha;
2939     struct qla_hw_data *ha = vha->hw;
2940     struct cmd_type_6 *cmd_pkt;
2941     struct dsd64    *cur_dsd;
2942     uint8_t     avail_dsds = 0;
2943     struct scatterlist *sg;
2944     struct req_que *req = sp->qpair->req;
2945     spinlock_t *lock = sp->qpair->qp_lock_ptr;
2946 
2947     /* Setup device pointers. */
2948     cmd = GET_CMD_SP(sp);
2949 
2950     /* So we know we haven't pci_map'ed anything yet */
2951     tot_dsds = 0;
2952 
2953     /* Send marker if required */
2954     if (vha->marker_needed != 0) {
2955         if (qla2x00_marker(vha, sp->qpair, 0, 0, MK_SYNC_ALL) !=
2956             QLA_SUCCESS) {
2957             ql_log(ql_log_warn, vha, 0x300c,
2958                 "qla2x00_marker failed for cmd=%p.\n", cmd);
2959             return QLA_FUNCTION_FAILED;
2960         }
2961         vha->marker_needed = 0;
2962     }
2963 
2964     /* Acquire ring specific lock */
2965     spin_lock_irqsave(lock, flags);
2966 
2967     /* Check for room in outstanding command list. */
2968     handle = req->current_outstanding_cmd;
2969     for (index = 1; index < req->num_outstanding_cmds; index++) {
2970         handle++;
2971         if (handle == req->num_outstanding_cmds)
2972             handle = 1;
2973         if (!req->outstanding_cmds[handle])
2974             break;
2975     }
2976     if (index == req->num_outstanding_cmds)
2977         goto queuing_error;
2978 
2979     /* Map the sg table so we have an accurate count of sg entries needed */
2980     if (scsi_sg_count(cmd)) {
2981         nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2982             scsi_sg_count(cmd), cmd->sc_data_direction);
2983         if (unlikely(!nseg))
2984             goto queuing_error;
2985     } else {
2986         nseg = 0;
2987     }
2988 
2989     tot_dsds = nseg;
2990     req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2991 
2992     sp->iores.res_type = RESOURCE_INI;
2993     sp->iores.iocb_cnt = req_cnt;
2994     if (qla_get_iocbs(sp->qpair, &sp->iores))
2995         goto queuing_error;
2996 
2997     if (req->cnt < (req_cnt + 2)) {
2998         cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2999             rd_reg_dword(req->req_q_out);
3000         if (req->ring_index < cnt)
3001             req->cnt = cnt - req->ring_index;
3002         else
3003             req->cnt = req->length -
3004                 (req->ring_index - cnt);
3005         if (req->cnt < (req_cnt + 2))
3006             goto queuing_error;
3007     }
3008 
3009     ctx = sp->u.scmd.ct6_ctx =
3010         mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3011     if (!ctx) {
3012         ql_log(ql_log_fatal, vha, 0x3010,
3013             "Failed to allocate ctx for cmd=%p.\n", cmd);
3014         goto queuing_error;
3015     }
3016 
3017     memset(ctx, 0, sizeof(struct ct6_dsd));
3018     ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3019         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3020     if (!ctx->fcp_cmnd) {
3021         ql_log(ql_log_fatal, vha, 0x3011,
3022             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3023         goto queuing_error;
3024     }
3025 
3026     /* Initialize the DSD list and dma handle */
3027     INIT_LIST_HEAD(&ctx->dsd_list);
3028     ctx->dsd_use_cnt = 0;
3029 
3030     if (cmd->cmd_len > 16) {
3031         additional_cdb_len = cmd->cmd_len - 16;
3032         if ((cmd->cmd_len % 4) != 0) {
3033             /*
3034              * SCSI command bigger than 16 bytes must be
3035              * multiple of 4
3036              */
3037             ql_log(ql_log_warn, vha, 0x3012,
3038                 "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
3039                 cmd->cmd_len, cmd);
3040             goto queuing_error_fcp_cmnd;
3041         }
3042         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3043     } else {
3044         additional_cdb_len = 0;
3045         ctx->fcp_cmnd_len = 12 + 16 + 4;
3046     }
3047 
3048     cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3049     cmd_pkt->handle = make_handle(req->id, handle);
3050 
3051     /*
3052      * Zero out remaining portion of packet.
3053      * tagged queuing modifier -- default is TSK_SIMPLE (0).
3054      */
3055     clr_ptr = (uint32_t *)cmd_pkt + 2;
3056     memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3057     cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3058 
3059     /* No data transfer */
3060     if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
3061         cmd_pkt->byte_count = cpu_to_le32(0);
3062         goto no_dsds;
3063     }
3064 
3065     /* Set transfer direction */
3066     if (cmd->sc_data_direction == DMA_TO_DEVICE) {
3067         cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
3068         vha->qla_stats.output_bytes += scsi_bufflen(cmd);
3069         vha->qla_stats.output_requests++;
3070         sp->fcport->edif.tx_bytes += scsi_bufflen(cmd);
3071     } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
3072         cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
3073         vha->qla_stats.input_bytes += scsi_bufflen(cmd);
3074         vha->qla_stats.input_requests++;
3075         sp->fcport->edif.rx_bytes += scsi_bufflen(cmd);
3076     }
3077 
3078     cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
3079     cmd_pkt->control_flags &= ~(cpu_to_le16(CF_NEW_SA));
3080 
3081     /* One DSD is available in the Command Type 6 IOCB */
3082     avail_dsds = 1;
3083     cur_dsd = &cmd_pkt->fcp_dsd;
3084 
3085     /* Load data segments */
3086     scsi_for_each_sg(cmd, sg, tot_dsds, i) {
3087         dma_addr_t      sle_dma;
3088         cont_a64_entry_t *cont_pkt;
3089 
3090         /* Allocate additional continuation packets? */
3091         if (avail_dsds == 0) {
3092             /*
3093              * Five DSDs are available in the Continuation
3094              * Type 1 IOCB.
3095              */
3096             cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
3097             cur_dsd = cont_pkt->dsd;
3098             avail_dsds = 5;
3099         }
3100 
3101         sle_dma = sg_dma_address(sg);
3102         put_unaligned_le64(sle_dma, &cur_dsd->address);
3103         cur_dsd->length = cpu_to_le32(sg_dma_len(sg));
3104         cur_dsd++;
3105         avail_dsds--;
3106     }
3107 
3108 no_dsds:
3109     /* Set NPORT-ID and LUN number*/
3110     cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3111     cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3112     cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3113     cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3114     cmd_pkt->vp_index = sp->vha->vp_idx;
3115 
3116     cmd_pkt->entry_type = COMMAND_TYPE_6;
3117 
3118     /* Set total data segment count. */
3119     cmd_pkt->entry_count = (uint8_t)req_cnt;
3120 
3121     int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3122     host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3123 
3124     /* build FCP_CMND IU */
3125     int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3126     ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3127 
3128     if (cmd->sc_data_direction == DMA_TO_DEVICE)
3129         ctx->fcp_cmnd->additional_cdb_len |= 1;
3130     else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3131         ctx->fcp_cmnd->additional_cdb_len |= 2;
3132 
3133     /* Populate the FCP_PRIO. */
3134     if (ha->flags.fcp_prio_enabled)
3135         ctx->fcp_cmnd->task_attribute |=
3136             sp->fcport->fcp_prio << 3;
3137 
3138     memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3139 
3140     fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
3141         additional_cdb_len);
3142     *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3143 
3144     cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3145     put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address);
3146 
3147     sp->flags |= SRB_FCP_CMND_DMA_VALID;
3148     cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3149     /* Set total data segment count. */
3150     cmd_pkt->entry_count = (uint8_t)req_cnt;
3151     cmd_pkt->entry_status = 0;
3152 
3153     /* Build command packet. */
3154     req->current_outstanding_cmd = handle;
3155     req->outstanding_cmds[handle] = sp;
3156     sp->handle = handle;
3157     cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3158     req->cnt -= req_cnt;
3159 
3160     /* Adjust ring index. */
3161     wmb();
3162     req->ring_index++;
3163     if (req->ring_index == req->length) {
3164         req->ring_index = 0;
3165         req->ring_ptr = req->ring;
3166     } else {
3167         req->ring_ptr++;
3168     }
3169 
3170     sp->qpair->cmd_cnt++;
3171     /* Set chip new ring index. */
3172     wrt_reg_dword(req->req_q_in, req->ring_index);
3173 
3174     spin_unlock_irqrestore(lock, flags);
3175 
3176     return QLA_SUCCESS;
3177 
3178 queuing_error_fcp_cmnd:
3179     dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3180 queuing_error:
3181     if (tot_dsds)
3182         scsi_dma_unmap(cmd);
3183 
3184     if (sp->u.scmd.ct6_ctx) {
3185         mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
3186         sp->u.scmd.ct6_ctx = NULL;
3187     }
3188     qla_put_iocbs(sp->qpair, &sp->iores);
3189     spin_unlock_irqrestore(lock, flags);
3190 
3191     return QLA_FUNCTION_FAILED;
3192 }
3193 
3194 /**********************************************
3195  * edif update/delete sa_index list functions *
3196  **********************************************/
3197 
3198 /* clear the edif_indx_list for this port */
3199 void qla_edif_list_del(fc_port_t *fcport)
3200 {
3201     struct edif_list_entry *indx_lst;
3202     struct edif_list_entry *tindx_lst;
3203     struct list_head *indx_list = &fcport->edif.edif_indx_list;
3204     unsigned long flags = 0;
3205 
3206     spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
3207     list_for_each_entry_safe(indx_lst, tindx_lst, indx_list, next) {
3208         list_del(&indx_lst->next);
3209         kfree(indx_lst);
3210     }
3211     spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3212 }
3213 
3214 /******************
3215  * SADB functions *
3216  ******************/
3217 
3218 /* allocate/retrieve an sa_index for a given spi */
3219 static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport,
3220         struct qla_sa_update_frame *sa_frame)
3221 {
3222     struct edif_sa_index_entry *entry;
3223     struct list_head *sa_list;
3224     uint16_t sa_index;
3225     int dir = sa_frame->flags & SAU_FLG_TX;
3226     int slot = 0;
3227     int free_slot = -1;
3228     scsi_qla_host_t *vha = fcport->vha;
3229     struct qla_hw_data *ha = vha->hw;
3230     unsigned long flags = 0;
3231     uint16_t nport_handle = fcport->loop_id;
3232 
3233     ql_dbg(ql_dbg_edif, vha, 0x3063,
3234         "%s: entry  fc_port: %p, nport_handle: 0x%x\n",
3235         __func__, fcport, nport_handle);
3236 
3237     if (dir)
3238         sa_list = &ha->sadb_tx_index_list;
3239     else
3240         sa_list = &ha->sadb_rx_index_list;
3241 
3242     entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list);
3243     if (!entry) {
3244         if ((sa_frame->flags & (SAU_FLG_TX | SAU_FLG_INV)) == SAU_FLG_INV) {
3245             ql_dbg(ql_dbg_edif, vha, 0x3063,
3246                 "%s: rx delete request with no entry\n", __func__);
3247             return RX_DELETE_NO_EDIF_SA_INDEX;
3248         }
3249 
3250         /* if there is no entry for this nport, add one */
3251         entry = kzalloc((sizeof(struct edif_sa_index_entry)), GFP_ATOMIC);
3252         if (!entry)
3253             return INVALID_EDIF_SA_INDEX;
3254 
3255         sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
3256         if (sa_index == INVALID_EDIF_SA_INDEX) {
3257             kfree(entry);
3258             return INVALID_EDIF_SA_INDEX;
3259         }
3260 
3261         INIT_LIST_HEAD(&entry->next);
3262         entry->handle = nport_handle;
3263         entry->fcport = fcport;
3264         entry->sa_pair[0].spi = sa_frame->spi;
3265         entry->sa_pair[0].sa_index = sa_index;
3266         entry->sa_pair[1].spi = 0;
3267         entry->sa_pair[1].sa_index = INVALID_EDIF_SA_INDEX;
3268         spin_lock_irqsave(&ha->sadb_lock, flags);
3269         list_add_tail(&entry->next, sa_list);
3270         spin_unlock_irqrestore(&ha->sadb_lock, flags);
3271         ql_dbg(ql_dbg_edif, vha, 0x3063,
3272             "%s: Created new sadb entry for nport_handle 0x%x, spi 0x%x, returning sa_index %d\n",
3273             __func__, nport_handle, sa_frame->spi, sa_index);
3274 
3275         return sa_index;
3276     }
3277 
3278     spin_lock_irqsave(&ha->sadb_lock, flags);
3279 
3280     /* see if we already have an entry for this spi */
3281     for (slot = 0; slot < 2; slot++) {
3282         if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) {
3283             free_slot = slot;
3284         } else {
3285             if (entry->sa_pair[slot].spi == sa_frame->spi) {
3286                 spin_unlock_irqrestore(&ha->sadb_lock, flags);
3287                 ql_dbg(ql_dbg_edif, vha, 0x3063,
3288                     "%s: sadb slot %d entry for lid 0x%x, spi 0x%x found, sa_index %d\n",
3289                     __func__, slot, entry->handle, sa_frame->spi,
3290                     entry->sa_pair[slot].sa_index);
3291                 return entry->sa_pair[slot].sa_index;
3292             }
3293         }
3294     }
3295     spin_unlock_irqrestore(&ha->sadb_lock, flags);
3296 
3297     /* both slots are used */
3298     if (free_slot == -1) {
3299         ql_dbg(ql_dbg_edif, vha, 0x3063,
3300             "%s: WARNING: No free slots in sadb for nport_handle 0x%x, spi: 0x%x\n",
3301             __func__, entry->handle, sa_frame->spi);
3302         ql_dbg(ql_dbg_edif, vha, 0x3063,
3303             "%s: Slot 0  spi: 0x%x  sa_index: %d,  Slot 1  spi: 0x%x  sa_index: %d\n",
3304             __func__, entry->sa_pair[0].spi, entry->sa_pair[0].sa_index,
3305             entry->sa_pair[1].spi, entry->sa_pair[1].sa_index);
3306 
3307         return INVALID_EDIF_SA_INDEX;
3308     }
3309 
3310     /* there is at least one free slot, use it */
3311     sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir);
3312     if (sa_index == INVALID_EDIF_SA_INDEX) {
3313         ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
3314             "%s: empty freepool!!\n", __func__);
3315         return INVALID_EDIF_SA_INDEX;
3316     }
3317 
3318     spin_lock_irqsave(&ha->sadb_lock, flags);
3319     entry->sa_pair[free_slot].spi = sa_frame->spi;
3320     entry->sa_pair[free_slot].sa_index = sa_index;
3321     spin_unlock_irqrestore(&ha->sadb_lock, flags);
3322     ql_dbg(ql_dbg_edif, fcport->vha, 0x3063,
3323         "%s: sadb slot %d entry for nport_handle 0x%x, spi 0x%x added, returning sa_index %d\n",
3324         __func__, free_slot, entry->handle, sa_frame->spi, sa_index);
3325 
3326     return sa_index;
3327 }
3328 
3329 /* release any sadb entries -- only done at teardown */
3330 void qla_edif_sadb_release(struct qla_hw_data *ha)
3331 {
3332     struct edif_sa_index_entry *entry, *tmp;
3333 
3334     list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) {
3335         list_del(&entry->next);
3336         kfree(entry);
3337     }
3338 
3339     list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) {
3340         list_del(&entry->next);
3341         kfree(entry);
3342     }
3343 }
3344 
3345 /**************************
3346  * sadb freepool functions
3347  **************************/
3348 
3349 /* build the rx and tx sa_index free pools -- only done at fcport init */
3350 int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha)
3351 {
3352     ha->edif_tx_sa_id_map =
3353         kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
3354 
3355     if (!ha->edif_tx_sa_id_map) {
3356         ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
3357             "Unable to allocate memory for sadb tx.\n");
3358         return -ENOMEM;
3359     }
3360 
3361     ha->edif_rx_sa_id_map =
3362         kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL);
3363     if (!ha->edif_rx_sa_id_map) {
3364         kfree(ha->edif_tx_sa_id_map);
3365         ha->edif_tx_sa_id_map = NULL;
3366         ql_log_pci(ql_log_fatal, ha->pdev, 0x0009,
3367             "Unable to allocate memory for sadb rx.\n");
3368         return -ENOMEM;
3369     }
3370     return 0;
3371 }
3372 
3373 /* release the free pool - only done during fcport teardown */
3374 void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha)
3375 {
3376     kfree(ha->edif_tx_sa_id_map);
3377     ha->edif_tx_sa_id_map = NULL;
3378     kfree(ha->edif_rx_sa_id_map);
3379     ha->edif_rx_sa_id_map = NULL;
3380 }
3381 
3382 static void __chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
3383         fc_port_t *fcport, uint32_t handle, uint16_t sa_index)
3384 {
3385     struct edif_list_entry *edif_entry;
3386     struct edif_sa_ctl *sa_ctl;
3387     uint16_t delete_sa_index = INVALID_EDIF_SA_INDEX;
3388     unsigned long flags = 0;
3389     uint16_t nport_handle = fcport->loop_id;
3390     uint16_t cached_nport_handle;
3391 
3392     spin_lock_irqsave(&fcport->edif.indx_list_lock, flags);
3393     edif_entry = qla_edif_list_find_sa_index(fcport, nport_handle);
3394     if (!edif_entry) {
3395         spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3396         return;     /* no pending delete for this handle */
3397     }
3398 
3399     /*
3400      * check for no pending delete for this index or iocb does not
3401      * match rx sa_index
3402      */
3403     if (edif_entry->delete_sa_index == INVALID_EDIF_SA_INDEX ||
3404         edif_entry->update_sa_index != sa_index) {
3405         spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3406         return;
3407     }
3408 
3409     /*
3410      * wait until we have seen at least EDIF_DELAY_COUNT transfers before
3411      * queueing RX delete
3412      */
3413     if (edif_entry->count++ < EDIF_RX_DELETE_FILTER_COUNT) {
3414         spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3415         return;
3416     }
3417 
3418     ql_dbg(ql_dbg_edif, vha, 0x5033,
3419         "%s: invalidating delete_sa_index,  update_sa_index: 0x%x sa_index: 0x%x, delete_sa_index: 0x%x\n",
3420         __func__, edif_entry->update_sa_index, sa_index, edif_entry->delete_sa_index);
3421 
3422     delete_sa_index = edif_entry->delete_sa_index;
3423     edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX;
3424     cached_nport_handle = edif_entry->handle;
3425     spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags);
3426 
3427     /* sanity check on the nport handle */
3428     if (nport_handle != cached_nport_handle) {
3429         ql_dbg(ql_dbg_edif, vha, 0x3063,
3430             "%s: POST SA DELETE nport_handle mismatch: lid: 0x%x, edif_entry nph: 0x%x\n",
3431             __func__, nport_handle, cached_nport_handle);
3432     }
3433 
3434     /* find the sa_ctl for the delete and schedule the delete */
3435     sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, delete_sa_index, 0);
3436     if (sa_ctl) {
3437         ql_dbg(ql_dbg_edif, vha, 0x3063,
3438             "%s: POST SA DELETE sa_ctl: %p, index recvd %d\n",
3439             __func__, sa_ctl, sa_index);
3440         ql_dbg(ql_dbg_edif, vha, 0x3063,
3441             "delete index %d, update index: %d, nport handle: 0x%x, handle: 0x%x\n",
3442             delete_sa_index,
3443             edif_entry->update_sa_index, nport_handle, handle);
3444 
3445         sa_ctl->flags = EDIF_SA_CTL_FLG_DEL;
3446         set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state);
3447         qla_post_sa_replace_work(fcport->vha, fcport,
3448             nport_handle, sa_ctl);
3449     } else {
3450         ql_dbg(ql_dbg_edif, vha, 0x3063,
3451             "%s: POST SA DELETE sa_ctl not found for delete_sa_index: %d\n",
3452             __func__, delete_sa_index);
3453     }
3454 }
3455 
3456 void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha,
3457         srb_t *sp, struct sts_entry_24xx *sts24)
3458 {
3459     fc_port_t *fcport = sp->fcport;
3460     /* sa_index used by this iocb */
3461     struct scsi_cmnd *cmd = GET_CMD_SP(sp);
3462     uint32_t handle;
3463 
3464     handle = (uint32_t)LSW(sts24->handle);
3465 
3466     /* find out if this status iosb is for a scsi read */
3467     if (cmd->sc_data_direction != DMA_FROM_DEVICE)
3468         return;
3469 
3470     return __chk_edif_rx_sa_delete_pending(vha, fcport, handle,
3471        le16_to_cpu(sts24->edif_sa_index));
3472 }
3473 
3474 void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport,
3475         struct ctio7_from_24xx *pkt)
3476 {
3477     __chk_edif_rx_sa_delete_pending(vha, fcport,
3478         pkt->handle, le16_to_cpu(pkt->edif_sa_index));
3479 }
3480 
3481 static void qla_parse_auth_els_ctl(struct srb *sp)
3482 {
3483     struct qla_els_pt_arg *a = &sp->u.bsg_cmd.u.els_arg;
3484     struct bsg_job *bsg_job = sp->u.bsg_cmd.bsg_job;
3485     struct fc_bsg_request *request = bsg_job->request;
3486     struct qla_bsg_auth_els_request *p =
3487         (struct qla_bsg_auth_els_request *)bsg_job->request;
3488 
3489     a->tx_len = a->tx_byte_count = sp->remap.req.len;
3490     a->tx_addr = sp->remap.req.dma;
3491     a->rx_len = a->rx_byte_count = sp->remap.rsp.len;
3492     a->rx_addr = sp->remap.rsp.dma;
3493 
3494     if (p->e.sub_cmd == SEND_ELS_REPLY) {
3495         a->control_flags = p->e.extra_control_flags << 13;
3496         a->rx_xchg_address = cpu_to_le32(p->e.extra_rx_xchg_address);
3497         if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_ACC)
3498             a->els_opcode = ELS_LS_ACC;
3499         else if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_RJT)
3500             a->els_opcode = ELS_LS_RJT;
3501     }
3502     a->did = sp->fcport->d_id;
3503     a->els_opcode =  request->rqst_data.h_els.command_code;
3504     a->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3505     a->vp_idx = sp->vha->vp_idx;
3506 }
3507 
3508 int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
3509 {
3510     struct fc_bsg_request *bsg_request = bsg_job->request;
3511     struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3512     fc_port_t *fcport = NULL;
3513     struct qla_hw_data *ha = vha->hw;
3514     srb_t *sp;
3515     int rval =  (DID_ERROR << 16), cnt;
3516     port_id_t d_id;
3517     struct qla_bsg_auth_els_request *p =
3518         (struct qla_bsg_auth_els_request *)bsg_job->request;
3519     struct qla_bsg_auth_els_reply *rpl =
3520         (struct qla_bsg_auth_els_reply *)bsg_job->reply;
3521 
3522     rpl->version = EDIF_VERSION1;
3523 
3524     d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2];
3525     d_id.b.area = bsg_request->rqst_data.h_els.port_id[1];
3526     d_id.b.domain = bsg_request->rqst_data.h_els.port_id[0];
3527 
3528     /* find matching d_id in fcport list */
3529     fcport = qla2x00_find_fcport_by_pid(vha, &d_id);
3530     if (!fcport) {
3531         ql_dbg(ql_dbg_edif, vha, 0x911a,
3532             "%s fcport not find online portid=%06x.\n",
3533             __func__, d_id.b24);
3534         SET_DID_STATUS(bsg_reply->result, DID_ERROR);
3535         return -EIO;
3536     }
3537 
3538     if (qla_bsg_check(vha, bsg_job, fcport))
3539         return 0;
3540 
3541     if (EDIF_SESS_DELETE(fcport)) {
3542         ql_dbg(ql_dbg_edif, vha, 0x910d,
3543             "%s ELS code %x, no loop id.\n", __func__,
3544             bsg_request->rqst_data.r_els.els_code);
3545         SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
3546         return -ENXIO;
3547     }
3548 
3549     if (!vha->flags.online) {
3550         ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
3551         SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
3552         rval = -EIO;
3553         goto done;
3554     }
3555 
3556     /* pass through is supported only for ISP 4Gb or higher */
3557     if (!IS_FWI2_CAPABLE(ha)) {
3558         ql_dbg(ql_dbg_user, vha, 0x7001,
3559             "ELS passthru not supported for ISP23xx based adapters.\n");
3560         SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET);
3561         rval = -EPERM;
3562         goto done;
3563     }
3564 
3565     sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3566     if (!sp) {
3567         ql_dbg(ql_dbg_user, vha, 0x7004,
3568             "Failed get sp pid=%06x\n", fcport->d_id.b24);
3569         rval = -ENOMEM;
3570         SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3571         goto done;
3572     }
3573 
3574     sp->remap.req.len = bsg_job->request_payload.payload_len;
3575     sp->remap.req.buf = dma_pool_alloc(ha->purex_dma_pool,
3576         GFP_KERNEL, &sp->remap.req.dma);
3577     if (!sp->remap.req.buf) {
3578         ql_dbg(ql_dbg_user, vha, 0x7005,
3579             "Failed allocate request dma len=%x\n",
3580             bsg_job->request_payload.payload_len);
3581         rval = -ENOMEM;
3582         SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3583         goto done_free_sp;
3584     }
3585 
3586     sp->remap.rsp.len = bsg_job->reply_payload.payload_len;
3587     sp->remap.rsp.buf = dma_pool_alloc(ha->purex_dma_pool,
3588         GFP_KERNEL, &sp->remap.rsp.dma);
3589     if (!sp->remap.rsp.buf) {
3590         ql_dbg(ql_dbg_user, vha, 0x7006,
3591             "Failed allocate response dma len=%x\n",
3592             bsg_job->reply_payload.payload_len);
3593         rval = -ENOMEM;
3594         SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3595         goto done_free_remap_req;
3596     }
3597     sg_copy_to_buffer(bsg_job->request_payload.sg_list,
3598         bsg_job->request_payload.sg_cnt, sp->remap.req.buf,
3599         sp->remap.req.len);
3600     sp->remap.remapped = true;
3601 
3602     sp->type = SRB_ELS_CMD_HST_NOLOGIN;
3603     sp->name = "SPCN_BSG_HST_NOLOGIN";
3604     sp->u.bsg_cmd.bsg_job = bsg_job;
3605     qla_parse_auth_els_ctl(sp);
3606 
3607     sp->free = qla2x00_bsg_sp_free;
3608     sp->done = qla2x00_bsg_job_done;
3609 
3610     cnt = 0;
3611 retry:
3612     rval = qla2x00_start_sp(sp);
3613     switch (rval) {
3614     case QLA_SUCCESS:
3615         ql_dbg(ql_dbg_edif, vha, 0x700a,
3616                "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n",
3617                __func__, sc_to_str(p->e.sub_cmd), fcport->port_name,
3618                p->e.extra_rx_xchg_address, p->e.extra_control_flags,
3619                sp->handle, sp->remap.req.len, bsg_job);
3620         break;
3621     case EAGAIN:
3622         msleep(EDIF_MSLEEP_INTERVAL);
3623         cnt++;
3624         if (cnt < EDIF_RETRY_COUNT)
3625             goto retry;
3626         fallthrough;
3627     default:
3628         ql_log(ql_log_warn, vha, 0x700e,
3629             "%s qla2x00_start_sp failed = %d\n", __func__, rval);
3630         SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY);
3631         rval = -EIO;
3632         goto done_free_remap_rsp;
3633     }
3634     return rval;
3635 
3636 done_free_remap_rsp:
3637     dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
3638         sp->remap.rsp.dma);
3639 done_free_remap_req:
3640     dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
3641         sp->remap.req.dma);
3642 done_free_sp:
3643     qla2x00_rel_sp(sp);
3644 
3645 done:
3646     return rval;
3647 }
3648 
3649 void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess)
3650 {
3651     u16 cnt = 0;
3652 
3653     if (sess->edif.app_sess_online && DBELL_ACTIVE(vha)) {
3654         ql_dbg(ql_dbg_disc, vha, 0xf09c,
3655             "%s: sess %8phN send port_offline event\n",
3656             __func__, sess->port_name);
3657         sess->edif.app_sess_online = 0;
3658         sess->edif.sess_down_acked = 0;
3659         qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN,
3660             sess->d_id.b24, 0, sess);
3661         qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24);
3662 
3663         while (!READ_ONCE(sess->edif.sess_down_acked) &&
3664                !test_bit(VPORT_DELETE, &vha->dpc_flags)) {
3665             msleep(100);
3666             cnt++;
3667             if (cnt > 100)
3668                 break;
3669         }
3670         sess->edif.sess_down_acked = 0;
3671         ql_dbg(ql_dbg_disc, vha, 0xf09c,
3672                "%s: sess %8phN port_offline event completed\n",
3673                __func__, sess->port_name);
3674     }
3675 }
3676 
3677 void qla_edif_clear_appdata(struct scsi_qla_host *vha, struct fc_port *fcport)
3678 {
3679     if (!(fcport->flags & FCF_FCSP_DEVICE))
3680         return;
3681 
3682     qla_edb_clear(vha, fcport->d_id);
3683     qla_enode_clear(vha, fcport->d_id);
3684 }