0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/blkdev.h>
0025 #include <linux/delay.h>
0026 #include <linux/slab.h>
0027 #include <linux/pci.h>
0028 #include <linux/kthread.h>
0029 #include <linux/interrupt.h>
0030 #include <linux/lockdep.h>
0031 #include <linux/utsname.h>
0032
0033 #include <scsi/scsi.h>
0034 #include <scsi/scsi_device.h>
0035 #include <scsi/scsi_host.h>
0036 #include <scsi/scsi_transport_fc.h>
0037 #include <scsi/fc/fc_fs.h>
0038
0039 #include "lpfc_hw4.h"
0040 #include "lpfc_hw.h"
0041 #include "lpfc_nl.h"
0042 #include "lpfc_disc.h"
0043 #include "lpfc_sli.h"
0044 #include "lpfc_sli4.h"
0045 #include "lpfc.h"
0046 #include "lpfc_scsi.h"
0047 #include "lpfc_nvme.h"
0048 #include "lpfc_logmsg.h"
0049 #include "lpfc_crtn.h"
0050 #include "lpfc_vport.h"
0051 #include "lpfc_debugfs.h"
0052
0053
0054 static uint8_t lpfcAlpaArray[] = {
0055 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
0056 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
0057 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
0058 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
0059 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
0060 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
0061 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
0062 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
0063 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
0064 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
0065 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
0066 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
0067 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
0068 };
0069
0070 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
0071 static void lpfc_disc_flush_list(struct lpfc_vport *vport);
0072 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
0073 static int lpfc_fcf_inuse(struct lpfc_hba *);
0074 static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
0075 static void lpfc_check_inactive_vmid(struct lpfc_hba *phba);
0076 static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba);
0077
0078 static int
0079 lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp)
0080 {
0081 if (ndlp->nlp_fc4_type ||
0082 ndlp->nlp_type & NLP_FABRIC)
0083 return 1;
0084 return 0;
0085 }
0086
0087
0088
0089
0090
0091
0092 static int
0093 lpfc_rport_invalid(struct fc_rport *rport)
0094 {
0095 struct lpfc_rport_data *rdata;
0096 struct lpfc_nodelist *ndlp;
0097
0098 if (!rport) {
0099 pr_err("**** %s: NULL rport, exit.\n", __func__);
0100 return -EINVAL;
0101 }
0102
0103 rdata = rport->dd_data;
0104 if (!rdata) {
0105 pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n",
0106 __func__, rport, rport->scsi_target_id);
0107 return -EINVAL;
0108 }
0109
0110 ndlp = rdata->pnode;
0111 if (!rdata->pnode) {
0112 pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n",
0113 __func__, rport, rport->scsi_target_id);
0114 return -EINVAL;
0115 }
0116
0117 if (!ndlp->vport) {
0118 pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px "
0119 "SID x%x\n", __func__, ndlp, ndlp->nlp_DID, rport,
0120 rport->scsi_target_id);
0121 return -EINVAL;
0122 }
0123 return 0;
0124 }
0125
0126 void
0127 lpfc_terminate_rport_io(struct fc_rport *rport)
0128 {
0129 struct lpfc_rport_data *rdata;
0130 struct lpfc_nodelist *ndlp;
0131 struct lpfc_vport *vport;
0132
0133 if (lpfc_rport_invalid(rport))
0134 return;
0135
0136 rdata = rport->dd_data;
0137 ndlp = rdata->pnode;
0138 vport = ndlp->vport;
0139 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
0140 "rport terminate: sid:x%x did:x%x flg:x%x",
0141 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
0142
0143 if (ndlp->nlp_sid != NLP_NO_SID)
0144 lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
0145 }
0146
0147
0148
0149
0150 void
0151 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
0152 {
0153 struct lpfc_nodelist *ndlp;
0154 struct lpfc_vport *vport;
0155 struct lpfc_hba *phba;
0156 struct lpfc_work_evt *evtp;
0157 unsigned long iflags;
0158
0159 ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
0160 if (!ndlp)
0161 return;
0162
0163 vport = ndlp->vport;
0164 phba = vport->phba;
0165
0166 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
0167 "rport devlosscb: sid:x%x did:x%x flg:x%x",
0168 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
0169
0170 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
0171 "3181 dev_loss_callbk x%06x, rport x%px flg x%x "
0172 "load_flag x%x refcnt %d state %d xpt x%x\n",
0173 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
0174 vport->load_flag, kref_read(&ndlp->kref),
0175 ndlp->nlp_state, ndlp->fc4_xpt_flags);
0176
0177
0178
0179
0180 if (vport->load_flag & FC_UNLOADING) {
0181 ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
0182 ndlp->rport = NULL;
0183
0184 ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
0185
0186
0187
0188 if (ndlp->fc4_xpt_flags == NLP_XPT_REGD)
0189 ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
0190
0191
0192
0193
0194 lpfc_nlp_put(ndlp);
0195 return;
0196 }
0197
0198 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
0199 return;
0200
0201 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
0202 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
0203 "6789 rport name %llx != node port name %llx",
0204 rport->port_name,
0205 wwn_to_u64(ndlp->nlp_portname.u.wwn));
0206
0207 evtp = &ndlp->dev_loss_evt;
0208
0209 if (!list_empty(&evtp->evt_listp)) {
0210 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
0211 "6790 rport name %llx dev_loss_evt pending\n",
0212 rport->port_name);
0213 return;
0214 }
0215
0216 spin_lock_irqsave(&ndlp->lock, iflags);
0217 ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
0218
0219
0220
0221
0222 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
0223 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
0224
0225
0226
0227
0228
0229 ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
0230 ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
0231 ndlp->rport = NULL;
0232 spin_unlock_irqrestore(&ndlp->lock, iflags);
0233
0234 if (phba->worker_thread) {
0235
0236
0237
0238 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
0239
0240 spin_lock_irqsave(&phba->hbalock, iflags);
0241 if (evtp->evt_arg1) {
0242 evtp->evt = LPFC_EVT_DEV_LOSS;
0243 list_add_tail(&evtp->evt_listp, &phba->work_list);
0244 lpfc_worker_wake_up(phba);
0245 }
0246 spin_unlock_irqrestore(&phba->hbalock, iflags);
0247 } else {
0248 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
0249 "3188 worker thread is stopped %s x%06x, "
0250 " rport x%px flg x%x load_flag x%x refcnt "
0251 "%d\n", __func__, ndlp->nlp_DID,
0252 ndlp->rport, ndlp->nlp_flag,
0253 vport->load_flag, kref_read(&ndlp->kref));
0254 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
0255 spin_lock_irqsave(&ndlp->lock, iflags);
0256
0257 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
0258 spin_unlock_irqrestore(&ndlp->lock, iflags);
0259 lpfc_disc_state_machine(vport, ndlp, NULL,
0260 NLP_EVT_DEVICE_RM);
0261 }
0262
0263 }
0264
0265 return;
0266 }
0267
0268
0269
0270
0271
0272
0273
0274
0275 static void lpfc_check_inactive_vmid_one(struct lpfc_vport *vport)
0276 {
0277 u16 keep;
0278 u32 difftime = 0, r, bucket;
0279 u64 *lta;
0280 int cpu;
0281 struct lpfc_vmid *vmp;
0282
0283 write_lock(&vport->vmid_lock);
0284
0285 if (!vport->cur_vmid_cnt)
0286 goto out;
0287
0288
0289 hash_for_each(vport->hash_table, bucket, vmp, hnode) {
0290 keep = 0;
0291 if (vmp->flag & LPFC_VMID_REGISTERED) {
0292
0293
0294 for_each_possible_cpu(cpu) {
0295
0296 lta = per_cpu_ptr(vmp->last_io_time, cpu);
0297 if (!lta)
0298 continue;
0299 difftime = (jiffies) - (*lta);
0300 if ((vport->vmid_inactivity_timeout *
0301 JIFFIES_PER_HR) > difftime) {
0302 keep = 1;
0303 break;
0304 }
0305 }
0306
0307
0308
0309 if (!keep) {
0310
0311 vmp->flag = LPFC_VMID_DE_REGISTER;
0312 write_unlock(&vport->vmid_lock);
0313 if (vport->vmid_priority_tagging)
0314 r = lpfc_vmid_uvem(vport, vmp, false);
0315 else
0316 r = lpfc_vmid_cmd(vport,
0317 SLI_CTAS_DAPP_IDENT,
0318 vmp);
0319
0320
0321
0322 write_lock(&vport->vmid_lock);
0323 if (!r) {
0324 struct lpfc_vmid *ht = vmp;
0325
0326 vport->cur_vmid_cnt--;
0327 ht->flag = LPFC_VMID_SLOT_FREE;
0328 free_percpu(ht->last_io_time);
0329 ht->last_io_time = NULL;
0330 hash_del(&ht->hnode);
0331 }
0332 }
0333 }
0334 }
0335 out:
0336 write_unlock(&vport->vmid_lock);
0337 }
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349 static void lpfc_check_inactive_vmid(struct lpfc_hba *phba)
0350 {
0351 struct lpfc_vport *vport;
0352 struct lpfc_vport **vports;
0353 int i;
0354
0355 vports = lpfc_create_vport_work_array(phba);
0356 if (!vports)
0357 return;
0358
0359 for (i = 0; i <= phba->max_vports; i++) {
0360 if ((!vports[i]) && (i == 0))
0361 vport = phba->pport;
0362 else
0363 vport = vports[i];
0364 if (!vport)
0365 break;
0366
0367 lpfc_check_inactive_vmid_one(vport);
0368 }
0369 lpfc_destroy_vport_work_array(phba, vports);
0370 }
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381 void
0382 lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
0383 struct lpfc_nodelist *ndlp)
0384 {
0385 unsigned long iflags;
0386
0387 spin_lock_irqsave(&ndlp->lock, iflags);
0388 if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) {
0389 ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS;
0390 spin_unlock_irqrestore(&ndlp->lock, iflags);
0391 lpfc_nlp_get(ndlp);
0392 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
0393 "8438 Devloss timeout reversed on DID x%x "
0394 "refcnt %d ndlp %p flag x%x "
0395 "port_state = x%x\n",
0396 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp,
0397 ndlp->nlp_flag, vport->port_state);
0398 spin_lock_irqsave(&ndlp->lock, iflags);
0399 }
0400 spin_unlock_irqrestore(&ndlp->lock, iflags);
0401 }
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412
0413 static int
0414 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
0415 {
0416 struct lpfc_vport *vport;
0417 struct lpfc_hba *phba;
0418 uint8_t *name;
0419 int warn_on = 0;
0420 int fcf_inuse = 0;
0421 bool recovering = false;
0422 struct fc_vport *fc_vport = NULL;
0423 unsigned long iflags;
0424
0425 vport = ndlp->vport;
0426 name = (uint8_t *)&ndlp->nlp_portname;
0427 phba = vport->phba;
0428
0429 spin_lock_irqsave(&ndlp->lock, iflags);
0430 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
0431 spin_unlock_irqrestore(&ndlp->lock, iflags);
0432
0433 if (phba->sli_rev == LPFC_SLI_REV4)
0434 fcf_inuse = lpfc_fcf_inuse(phba);
0435
0436 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
0437 "rport devlosstmo:did:x%x type:x%x id:x%x",
0438 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid);
0439
0440 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
0441 "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n",
0442 __func__, ndlp->nlp_DID, ndlp->nlp_flag,
0443 ndlp->fc4_xpt_flags, kref_read(&ndlp->kref));
0444
0445
0446 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
0447 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
0448 "0284 Devloss timeout Ignored on "
0449 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
0450 "NPort x%x\n",
0451 *name, *(name+1), *(name+2), *(name+3),
0452 *(name+4), *(name+5), *(name+6), *(name+7),
0453 ndlp->nlp_DID);
0454 return fcf_inuse;
0455 }
0456
0457
0458 if (ndlp->nlp_type & NLP_FABRIC) {
0459 spin_lock_irqsave(&ndlp->lock, iflags);
0460
0461
0462
0463
0464 switch (ndlp->nlp_DID) {
0465 case Fabric_DID:
0466 fc_vport = vport->fc_vport;
0467 if (fc_vport &&
0468 fc_vport->vport_state == FC_VPORT_INITIALIZING)
0469 recovering = true;
0470 break;
0471 case Fabric_Cntl_DID:
0472 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
0473 recovering = true;
0474 break;
0475 case FDMI_DID:
0476 fallthrough;
0477 case NameServer_DID:
0478 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
0479 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE)
0480 recovering = true;
0481 break;
0482 }
0483 spin_unlock_irqrestore(&ndlp->lock, iflags);
0484
0485
0486
0487
0488
0489 if (recovering) {
0490 lpfc_printf_vlog(vport, KERN_INFO,
0491 LOG_DISCOVERY | LOG_NODE,
0492 "8436 Devloss timeout marked on "
0493 "DID x%x refcnt %d ndlp %p "
0494 "flag x%x port_state = x%x\n",
0495 ndlp->nlp_DID, kref_read(&ndlp->kref),
0496 ndlp, ndlp->nlp_flag,
0497 vport->port_state);
0498 spin_lock_irqsave(&ndlp->lock, iflags);
0499 ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS;
0500 spin_unlock_irqrestore(&ndlp->lock, iflags);
0501 } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
0502
0503
0504
0505
0506 lpfc_printf_vlog(vport, KERN_INFO,
0507 LOG_DISCOVERY | LOG_NODE,
0508 "8437 Devloss timeout ignored on "
0509 "DID x%x refcnt %d ndlp %p "
0510 "flag x%x port_state = x%x\n",
0511 ndlp->nlp_DID, kref_read(&ndlp->kref),
0512 ndlp, ndlp->nlp_flag,
0513 vport->port_state);
0514 return fcf_inuse;
0515 }
0516
0517 lpfc_nlp_put(ndlp);
0518 return fcf_inuse;
0519 }
0520
0521 if (ndlp->nlp_sid != NLP_NO_SID) {
0522 warn_on = 1;
0523 lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
0524 }
0525
0526 if (warn_on) {
0527 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
0528 "0203 Devloss timeout on "
0529 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
0530 "NPort x%06x Data: x%x x%x x%x refcnt %d\n",
0531 *name, *(name+1), *(name+2), *(name+3),
0532 *(name+4), *(name+5), *(name+6), *(name+7),
0533 ndlp->nlp_DID, ndlp->nlp_flag,
0534 ndlp->nlp_state, ndlp->nlp_rpi,
0535 kref_read(&ndlp->kref));
0536 } else {
0537 lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
0538 "0204 Devloss timeout on "
0539 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
0540 "NPort x%06x Data: x%x x%x x%x\n",
0541 *name, *(name+1), *(name+2), *(name+3),
0542 *(name+4), *(name+5), *(name+6), *(name+7),
0543 ndlp->nlp_DID, ndlp->nlp_flag,
0544 ndlp->nlp_state, ndlp->nlp_rpi);
0545 }
0546
0547
0548
0549
0550 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
0551 ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) {
0552 return fcf_inuse;
0553 }
0554
0555 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
0556 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
0557
0558 return fcf_inuse;
0559 }
0560
0561 static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba)
0562 {
0563 struct lpfc_vport *vport;
0564 struct lpfc_vport **vports;
0565 int i;
0566
0567 vports = lpfc_create_vport_work_array(phba);
0568 if (!vports)
0569 return;
0570
0571 for (i = 0; i <= phba->max_vports; i++) {
0572 if ((!vports[i]) && (i == 0))
0573 vport = phba->pport;
0574 else
0575 vport = vports[i];
0576 if (!vport)
0577 break;
0578
0579 if (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA) {
0580 if (!lpfc_issue_els_qfpa(vport))
0581 vport->vmid_flag &= ~LPFC_VMID_ISSUE_QFPA;
0582 }
0583 }
0584 lpfc_destroy_vport_work_array(phba, vports);
0585 }
0586
0587
0588
0589
0590
0591
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605 static void
0606 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
0607 uint32_t nlp_did)
0608 {
0609
0610
0611
0612 if (!fcf_inuse)
0613 return;
0614
0615 if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
0616 spin_lock_irq(&phba->hbalock);
0617 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
0618 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
0619 spin_unlock_irq(&phba->hbalock);
0620 return;
0621 }
0622 phba->hba_flag |= HBA_DEVLOSS_TMO;
0623 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
0624 "2847 Last remote node (x%x) using "
0625 "FCF devloss tmo\n", nlp_did);
0626 }
0627 if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
0628 spin_unlock_irq(&phba->hbalock);
0629 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
0630 "2868 Devloss tmo to FCF rediscovery "
0631 "in progress\n");
0632 return;
0633 }
0634 if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
0635 spin_unlock_irq(&phba->hbalock);
0636 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
0637 "2869 Devloss tmo to idle FIP engine, "
0638 "unreg in-use FCF and rescan.\n");
0639
0640 lpfc_unregister_fcf_rescan(phba);
0641 return;
0642 }
0643 spin_unlock_irq(&phba->hbalock);
0644 if (phba->hba_flag & FCF_TS_INPROG)
0645 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
0646 "2870 FCF table scan in progress\n");
0647 if (phba->hba_flag & FCF_RR_INPROG)
0648 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
0649 "2871 FLOGI roundrobin FCF failover "
0650 "in progress\n");
0651 }
0652 lpfc_unregister_unused_fcf(phba);
0653 }
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665 struct lpfc_fast_path_event *
0666 lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
0667 struct lpfc_fast_path_event *ret;
0668
0669
0670 if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
0671 return NULL;
0672
0673 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
0674 GFP_ATOMIC);
0675 if (ret) {
0676 atomic_inc(&phba->fast_event_count);
0677 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
0678 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
0679 }
0680 return ret;
0681 }
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691 void
0692 lpfc_free_fast_evt(struct lpfc_hba *phba,
0693 struct lpfc_fast_path_event *evt) {
0694
0695 atomic_dec(&phba->fast_event_count);
0696 kfree(evt);
0697 }
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708 static void
0709 lpfc_send_fastpath_evt(struct lpfc_hba *phba,
0710 struct lpfc_work_evt *evtp)
0711 {
0712 unsigned long evt_category, evt_sub_category;
0713 struct lpfc_fast_path_event *fast_evt_data;
0714 char *evt_data;
0715 uint32_t evt_data_size;
0716 struct Scsi_Host *shost;
0717
0718 fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
0719 work_evt);
0720
0721 evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
0722 evt_sub_category = (unsigned long) fast_evt_data->un.
0723 fabric_evt.subcategory;
0724 shost = lpfc_shost_from_vport(fast_evt_data->vport);
0725 if (evt_category == FC_REG_FABRIC_EVENT) {
0726 if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
0727 evt_data = (char *) &fast_evt_data->un.read_check_error;
0728 evt_data_size = sizeof(fast_evt_data->un.
0729 read_check_error);
0730 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
0731 (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
0732 evt_data = (char *) &fast_evt_data->un.fabric_evt;
0733 evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
0734 } else {
0735 lpfc_free_fast_evt(phba, fast_evt_data);
0736 return;
0737 }
0738 } else if (evt_category == FC_REG_SCSI_EVENT) {
0739 switch (evt_sub_category) {
0740 case LPFC_EVENT_QFULL:
0741 case LPFC_EVENT_DEVBSY:
0742 evt_data = (char *) &fast_evt_data->un.scsi_evt;
0743 evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
0744 break;
0745 case LPFC_EVENT_CHECK_COND:
0746 evt_data = (char *) &fast_evt_data->un.check_cond_evt;
0747 evt_data_size = sizeof(fast_evt_data->un.
0748 check_cond_evt);
0749 break;
0750 case LPFC_EVENT_VARQUEDEPTH:
0751 evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
0752 evt_data_size = sizeof(fast_evt_data->un.
0753 queue_depth_evt);
0754 break;
0755 default:
0756 lpfc_free_fast_evt(phba, fast_evt_data);
0757 return;
0758 }
0759 } else {
0760 lpfc_free_fast_evt(phba, fast_evt_data);
0761 return;
0762 }
0763
0764 if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
0765 fc_host_post_vendor_event(shost,
0766 fc_get_event_number(),
0767 evt_data_size,
0768 evt_data,
0769 LPFC_NL_VENDOR_ID);
0770
0771 lpfc_free_fast_evt(phba, fast_evt_data);
0772 return;
0773 }
0774
0775 static void
0776 lpfc_work_list_done(struct lpfc_hba *phba)
0777 {
0778 struct lpfc_work_evt *evtp = NULL;
0779 struct lpfc_nodelist *ndlp;
0780 int free_evt;
0781 int fcf_inuse;
0782 uint32_t nlp_did;
0783 bool hba_pci_err;
0784
0785 spin_lock_irq(&phba->hbalock);
0786 while (!list_empty(&phba->work_list)) {
0787 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
0788 evt_listp);
0789 spin_unlock_irq(&phba->hbalock);
0790 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
0791 free_evt = 1;
0792 switch (evtp->evt) {
0793 case LPFC_EVT_ELS_RETRY:
0794 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
0795 if (!hba_pci_err) {
0796 lpfc_els_retry_delay_handler(ndlp);
0797 free_evt = 0;
0798 }
0799
0800
0801
0802 lpfc_nlp_put(ndlp);
0803 break;
0804 case LPFC_EVT_DEV_LOSS:
0805 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
0806 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
0807 free_evt = 0;
0808
0809
0810
0811 nlp_did = ndlp->nlp_DID;
0812 lpfc_nlp_put(ndlp);
0813 if (phba->sli_rev == LPFC_SLI_REV4)
0814 lpfc_sli4_post_dev_loss_tmo_handler(phba,
0815 fcf_inuse,
0816 nlp_did);
0817 break;
0818 case LPFC_EVT_RECOVER_PORT:
0819 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
0820 if (!hba_pci_err) {
0821 lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
0822 free_evt = 0;
0823 }
0824
0825
0826
0827 lpfc_nlp_put(ndlp);
0828 break;
0829 case LPFC_EVT_ONLINE:
0830 if (phba->link_state < LPFC_LINK_DOWN)
0831 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
0832 else
0833 *(int *) (evtp->evt_arg1) = 0;
0834 complete((struct completion *)(evtp->evt_arg2));
0835 break;
0836 case LPFC_EVT_OFFLINE_PREP:
0837 if (phba->link_state >= LPFC_LINK_DOWN)
0838 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
0839 *(int *)(evtp->evt_arg1) = 0;
0840 complete((struct completion *)(evtp->evt_arg2));
0841 break;
0842 case LPFC_EVT_OFFLINE:
0843 lpfc_offline(phba);
0844 lpfc_sli_brdrestart(phba);
0845 *(int *)(evtp->evt_arg1) =
0846 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
0847 lpfc_unblock_mgmt_io(phba);
0848 complete((struct completion *)(evtp->evt_arg2));
0849 break;
0850 case LPFC_EVT_WARM_START:
0851 lpfc_offline(phba);
0852 lpfc_reset_barrier(phba);
0853 lpfc_sli_brdreset(phba);
0854 lpfc_hba_down_post(phba);
0855 *(int *)(evtp->evt_arg1) =
0856 lpfc_sli_brdready(phba, HS_MBRDY);
0857 lpfc_unblock_mgmt_io(phba);
0858 complete((struct completion *)(evtp->evt_arg2));
0859 break;
0860 case LPFC_EVT_KILL:
0861 lpfc_offline(phba);
0862 *(int *)(evtp->evt_arg1)
0863 = (phba->pport->stopped)
0864 ? 0 : lpfc_sli_brdkill(phba);
0865 lpfc_unblock_mgmt_io(phba);
0866 complete((struct completion *)(evtp->evt_arg2));
0867 break;
0868 case LPFC_EVT_FASTPATH_MGMT_EVT:
0869 lpfc_send_fastpath_evt(phba, evtp);
0870 free_evt = 0;
0871 break;
0872 case LPFC_EVT_RESET_HBA:
0873 if (!(phba->pport->load_flag & FC_UNLOADING))
0874 lpfc_reset_hba(phba);
0875 break;
0876 }
0877 if (free_evt)
0878 kfree(evtp);
0879 spin_lock_irq(&phba->hbalock);
0880 }
0881 spin_unlock_irq(&phba->hbalock);
0882
0883 }
0884
0885 static void
0886 lpfc_work_done(struct lpfc_hba *phba)
0887 {
0888 struct lpfc_sli_ring *pring;
0889 uint32_t ha_copy, status, control, work_port_events;
0890 struct lpfc_vport **vports;
0891 struct lpfc_vport *vport;
0892 int i;
0893 bool hba_pci_err;
0894
0895 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
0896 spin_lock_irq(&phba->hbalock);
0897 ha_copy = phba->work_ha;
0898 phba->work_ha = 0;
0899 spin_unlock_irq(&phba->hbalock);
0900 if (hba_pci_err)
0901 ha_copy = 0;
0902
0903
0904 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err)
0905 lpfc_sli4_post_async_mbox(phba);
0906
0907 if (ha_copy & HA_ERATT) {
0908
0909 lpfc_handle_eratt(phba);
0910
0911 if (phba->fw_dump_cmpl) {
0912 complete(phba->fw_dump_cmpl);
0913 phba->fw_dump_cmpl = NULL;
0914 }
0915 }
0916
0917 if (ha_copy & HA_MBATT)
0918 lpfc_sli_handle_mb_event(phba);
0919
0920 if (ha_copy & HA_LATT)
0921 lpfc_handle_latt(phba);
0922
0923
0924 if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) {
0925 if (phba->pport->work_port_events &
0926 WORKER_CHECK_VMID_ISSUE_QFPA) {
0927 lpfc_check_vmid_qfpa_issue(phba);
0928 phba->pport->work_port_events &=
0929 ~WORKER_CHECK_VMID_ISSUE_QFPA;
0930 }
0931 if (phba->pport->work_port_events &
0932 WORKER_CHECK_INACTIVE_VMID) {
0933 lpfc_check_inactive_vmid(phba);
0934 phba->pport->work_port_events &=
0935 ~WORKER_CHECK_INACTIVE_VMID;
0936 }
0937 }
0938
0939
0940 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
0941 if (phba->hba_flag & HBA_RRQ_ACTIVE)
0942 lpfc_handle_rrq_active(phba);
0943 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
0944 lpfc_sli4_els_xri_abort_event_proc(phba);
0945 if (phba->hba_flag & ASYNC_EVENT)
0946 lpfc_sli4_async_event_proc(phba);
0947 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
0948 spin_lock_irq(&phba->hbalock);
0949 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
0950 spin_unlock_irq(&phba->hbalock);
0951 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
0952 }
0953 if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
0954 lpfc_sli4_fcf_redisc_event_proc(phba);
0955 }
0956
0957 vports = lpfc_create_vport_work_array(phba);
0958 if (vports != NULL)
0959 for (i = 0; i <= phba->max_vports; i++) {
0960
0961
0962
0963
0964 if (vports[i] == NULL && i == 0)
0965 vport = phba->pport;
0966 else
0967 vport = vports[i];
0968 if (vport == NULL)
0969 break;
0970 spin_lock_irq(&vport->work_port_lock);
0971 work_port_events = vport->work_port_events;
0972 vport->work_port_events &= ~work_port_events;
0973 spin_unlock_irq(&vport->work_port_lock);
0974 if (hba_pci_err)
0975 continue;
0976 if (work_port_events & WORKER_DISC_TMO)
0977 lpfc_disc_timeout_handler(vport);
0978 if (work_port_events & WORKER_ELS_TMO)
0979 lpfc_els_timeout_handler(vport);
0980 if (work_port_events & WORKER_HB_TMO)
0981 lpfc_hb_timeout_handler(phba);
0982 if (work_port_events & WORKER_MBOX_TMO)
0983 lpfc_mbox_timeout_handler(phba);
0984 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
0985 lpfc_unblock_fabric_iocbs(phba);
0986 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
0987 lpfc_ramp_down_queue_handler(phba);
0988 if (work_port_events & WORKER_DELAYED_DISC_TMO)
0989 lpfc_delayed_disc_timeout_handler(vport);
0990 }
0991 lpfc_destroy_vport_work_array(phba, vports);
0992
0993 pring = lpfc_phba_elsring(phba);
0994 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
0995 status >>= (4*LPFC_ELS_RING);
0996 if (pring && (status & HA_RXMASK ||
0997 pring->flag & LPFC_DEFERRED_RING_EVENT ||
0998 phba->hba_flag & HBA_SP_QUEUE_EVT)) {
0999 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
1000 pring->flag |= LPFC_DEFERRED_RING_EVENT;
1001
1002 if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
1003 set_bit(LPFC_DATA_READY, &phba->data_flags);
1004 } else {
1005
1006
1007
1008 if (phba->link_state >= LPFC_LINK_DOWN ||
1009 phba->link_flag & LS_MDS_LOOPBACK) {
1010 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
1011 lpfc_sli_handle_slow_ring_event(phba, pring,
1012 (status &
1013 HA_RXMASK));
1014 }
1015 }
1016 if (phba->sli_rev == LPFC_SLI_REV4)
1017 lpfc_drain_txq(phba);
1018
1019
1020
1021 if (phba->sli_rev <= LPFC_SLI_REV3) {
1022 spin_lock_irq(&phba->hbalock);
1023 control = readl(phba->HCregaddr);
1024 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
1025 lpfc_debugfs_slow_ring_trc(phba,
1026 "WRK Enable ring: cntl:x%x hacopy:x%x",
1027 control, ha_copy, 0);
1028
1029 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
1030 writel(control, phba->HCregaddr);
1031 readl(phba->HCregaddr);
1032 } else {
1033 lpfc_debugfs_slow_ring_trc(phba,
1034 "WRK Ring ok: cntl:x%x hacopy:x%x",
1035 control, ha_copy, 0);
1036 }
1037 spin_unlock_irq(&phba->hbalock);
1038 }
1039 }
1040 lpfc_work_list_done(phba);
1041 }
1042
1043 int
1044 lpfc_do_work(void *p)
1045 {
1046 struct lpfc_hba *phba = p;
1047 int rc;
1048
1049 set_user_nice(current, MIN_NICE);
1050 current->flags |= PF_NOFREEZE;
1051 phba->data_flags = 0;
1052
1053 while (!kthread_should_stop()) {
1054
1055 rc = wait_event_interruptible(phba->work_waitq,
1056 (test_and_clear_bit(LPFC_DATA_READY,
1057 &phba->data_flags)
1058 || kthread_should_stop()));
1059
1060 if (rc) {
1061 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1062 "0433 Wakeup on signal: rc=x%x\n", rc);
1063 break;
1064 }
1065
1066
1067 lpfc_work_done(phba);
1068 }
1069 phba->worker_thread = NULL;
1070 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1071 "0432 Worker thread stopped.\n");
1072 return 0;
1073 }
1074
1075
1076
1077
1078
1079
1080 int
1081 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
1082 uint32_t evt)
1083 {
1084 struct lpfc_work_evt *evtp;
1085 unsigned long flags;
1086
1087
1088
1089
1090
1091 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
1092 if (!evtp)
1093 return 0;
1094
1095 evtp->evt_arg1 = arg1;
1096 evtp->evt_arg2 = arg2;
1097 evtp->evt = evt;
1098
1099 spin_lock_irqsave(&phba->hbalock, flags);
1100 list_add_tail(&evtp->evt_listp, &phba->work_list);
1101 spin_unlock_irqrestore(&phba->hbalock, flags);
1102
1103 lpfc_worker_wake_up(phba);
1104
1105 return 1;
1106 }
1107
1108 void
1109 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
1110 {
1111 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1112 struct lpfc_hba *phba = vport->phba;
1113 struct lpfc_nodelist *ndlp, *next_ndlp;
1114
1115 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1116 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
1117
1118
1119
1120
1121
1122 if (ndlp->nlp_DID == Fabric_DID) {
1123 if (ndlp->nlp_prev_state ==
1124 NLP_STE_UNUSED_NODE &&
1125 !ndlp->fc4_xpt_flags)
1126 lpfc_nlp_put(ndlp);
1127 }
1128 continue;
1129 }
1130
1131 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
1132 ((vport->port_type == LPFC_NPIV_PORT) &&
1133 ((ndlp->nlp_DID == NameServer_DID) ||
1134 (ndlp->nlp_DID == FDMI_DID) ||
1135 (ndlp->nlp_DID == Fabric_Cntl_DID))))
1136 lpfc_unreg_rpi(vport, ndlp);
1137
1138
1139 if ((phba->sli_rev < LPFC_SLI_REV4) &&
1140 (!remove && ndlp->nlp_type & NLP_FABRIC))
1141 continue;
1142
1143
1144 if (phba->nvmet_support &&
1145 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
1146 lpfc_nvmet_invalidate_host(phba, ndlp);
1147
1148 lpfc_disc_state_machine(vport, ndlp, NULL,
1149 remove
1150 ? NLP_EVT_DEVICE_RM
1151 : NLP_EVT_DEVICE_RECOVERY);
1152 }
1153 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
1154 if (phba->sli_rev == LPFC_SLI_REV4)
1155 lpfc_sli4_unreg_all_rpis(vport);
1156 lpfc_mbx_unreg_vpi(vport);
1157 spin_lock_irq(shost->host_lock);
1158 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1159 spin_unlock_irq(shost->host_lock);
1160 }
1161 }
1162
1163 void
1164 lpfc_port_link_failure(struct lpfc_vport *vport)
1165 {
1166 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
1167
1168
1169 lpfc_cleanup_rcv_buffers(vport);
1170
1171
1172 lpfc_els_flush_rscn(vport);
1173
1174
1175 lpfc_els_flush_cmd(vport);
1176
1177 lpfc_cleanup_rpis(vport, 0);
1178
1179
1180 lpfc_can_disctmo(vport);
1181 }
1182
1183 void
1184 lpfc_linkdown_port(struct lpfc_vport *vport)
1185 {
1186 struct lpfc_hba *phba = vport->phba;
1187 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1188
1189 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
1190 fc_host_post_event(shost, fc_get_event_number(),
1191 FCH_EVT_LINKDOWN, 0);
1192
1193 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1194 "Link Down: state:x%x rtry:x%x flg:x%x",
1195 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
1196
1197 lpfc_port_link_failure(vport);
1198
1199
1200 spin_lock_irq(shost->host_lock);
1201 vport->fc_flag &= ~FC_DISC_DELAYED;
1202 spin_unlock_irq(shost->host_lock);
1203 del_timer_sync(&vport->delayed_disc_tmo);
1204
1205 if (phba->sli_rev == LPFC_SLI_REV4 &&
1206 vport->port_type == LPFC_PHYSICAL_PORT &&
1207 phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
1208
1209 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
1210 }
1211 }
1212
1213 int
1214 lpfc_linkdown(struct lpfc_hba *phba)
1215 {
1216 struct lpfc_vport *vport = phba->pport;
1217 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1218 struct lpfc_vport **vports;
1219 LPFC_MBOXQ_t *mb;
1220 int i;
1221 int offline;
1222
1223 if (phba->link_state == LPFC_LINK_DOWN)
1224 return 0;
1225
1226
1227 lpfc_scsi_dev_block(phba);
1228 offline = pci_channel_offline(phba->pcidev);
1229
1230 phba->defer_flogi_acc_flag = false;
1231
1232
1233 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK;
1234
1235 spin_lock_irq(&phba->hbalock);
1236 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
1237 spin_unlock_irq(&phba->hbalock);
1238 if (phba->link_state > LPFC_LINK_DOWN) {
1239 phba->link_state = LPFC_LINK_DOWN;
1240 if (phba->sli4_hba.conf_trunk) {
1241 phba->trunk_link.link0.state = 0;
1242 phba->trunk_link.link1.state = 0;
1243 phba->trunk_link.link2.state = 0;
1244 phba->trunk_link.link3.state = 0;
1245 phba->sli4_hba.link_state.logical_speed =
1246 LPFC_LINK_SPEED_UNKNOWN;
1247 }
1248 spin_lock_irq(shost->host_lock);
1249 phba->pport->fc_flag &= ~FC_LBIT;
1250 spin_unlock_irq(shost->host_lock);
1251 }
1252 vports = lpfc_create_vport_work_array(phba);
1253 if (vports != NULL) {
1254 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1255
1256 lpfc_linkdown_port(vports[i]);
1257
1258 vports[i]->fc_myDID = 0;
1259
1260 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
1261 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
1262 if (phba->nvmet_support)
1263 lpfc_nvmet_update_targetport(phba);
1264 else
1265 lpfc_nvme_update_localport(vports[i]);
1266 }
1267 }
1268 }
1269 lpfc_destroy_vport_work_array(phba, vports);
1270
1271
1272 if (phba->sli_rev > LPFC_SLI_REV3 || offline)
1273 goto skip_unreg_did;
1274
1275 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1276 if (mb) {
1277 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
1278 mb->vport = vport;
1279 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1280 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
1281 == MBX_NOT_FINISHED) {
1282 mempool_free(mb, phba->mbox_mem_pool);
1283 }
1284 }
1285
1286 skip_unreg_did:
1287
1288 if (phba->pport->fc_flag & FC_PT2PT) {
1289 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1290 if (mb) {
1291 lpfc_config_link(phba, mb);
1292 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1293 mb->vport = vport;
1294 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
1295 == MBX_NOT_FINISHED) {
1296 mempool_free(mb, phba->mbox_mem_pool);
1297 }
1298 }
1299 spin_lock_irq(shost->host_lock);
1300 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
1301 phba->pport->rcv_flogi_cnt = 0;
1302 spin_unlock_irq(shost->host_lock);
1303 }
1304 return 0;
1305 }
1306
1307 static void
1308 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
1309 {
1310 struct lpfc_nodelist *ndlp;
1311
1312 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1313 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
1314
1315 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1316 continue;
1317 if (ndlp->nlp_type & NLP_FABRIC) {
1318
1319
1320
1321 if (ndlp->nlp_DID != Fabric_DID)
1322 lpfc_unreg_rpi(vport, ndlp);
1323 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1324 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
1325
1326
1327
1328 lpfc_unreg_rpi(vport, ndlp);
1329 }
1330 }
1331 }
1332
1333 static void
1334 lpfc_linkup_port(struct lpfc_vport *vport)
1335 {
1336 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1337 struct lpfc_hba *phba = vport->phba;
1338
1339 if ((vport->load_flag & FC_UNLOADING) != 0)
1340 return;
1341
1342 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1343 "Link Up: top:x%x speed:x%x flg:x%x",
1344 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
1345
1346
1347 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1348 (vport != phba->pport))
1349 return;
1350
1351 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
1352 fc_host_post_event(shost, fc_get_event_number(),
1353 FCH_EVT_LINKUP, 0);
1354
1355 spin_lock_irq(shost->host_lock);
1356 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
1357 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
1358 vport->fc_flag |= FC_NDISC_ACTIVE;
1359 vport->fc_ns_retry = 0;
1360 spin_unlock_irq(shost->host_lock);
1361 lpfc_setup_fdmi_mask(vport);
1362
1363 lpfc_linkup_cleanup_nodes(vport);
1364 }
1365
1366 static int
1367 lpfc_linkup(struct lpfc_hba *phba)
1368 {
1369 struct lpfc_vport **vports;
1370 int i;
1371 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
1372
1373 phba->link_state = LPFC_LINK_UP;
1374
1375
1376 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
1377 del_timer_sync(&phba->fabric_block_timer);
1378
1379 vports = lpfc_create_vport_work_array(phba);
1380 if (vports != NULL)
1381 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1382 lpfc_linkup_port(vports[i]);
1383 lpfc_destroy_vport_work_array(phba, vports);
1384
1385
1386
1387
1388
1389 spin_lock_irq(shost->host_lock);
1390 phba->pport->rcv_flogi_cnt = 0;
1391 spin_unlock_irq(shost->host_lock);
1392
1393
1394 phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_RHBA_CMPL);
1395 phba->defer_flogi_acc_flag = false;
1396
1397 return 0;
1398 }
1399
1400
1401
1402
1403
1404
1405
1406 static void
1407 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1408 {
1409 struct lpfc_vport *vport = pmb->vport;
1410 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1411 struct lpfc_sli *psli = &phba->sli;
1412 MAILBOX_t *mb = &pmb->u.mb;
1413 uint32_t control;
1414
1415
1416 psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1417 psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1418
1419
1420 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
1421
1422 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1423 "0320 CLEAR_LA mbxStatus error x%x hba "
1424 "state x%x\n",
1425 mb->mbxStatus, vport->port_state);
1426 phba->link_state = LPFC_HBA_ERROR;
1427 goto out;
1428 }
1429
1430 if (vport->port_type == LPFC_PHYSICAL_PORT)
1431 phba->link_state = LPFC_HBA_READY;
1432
1433 spin_lock_irq(&phba->hbalock);
1434 psli->sli_flag |= LPFC_PROCESS_LA;
1435 control = readl(phba->HCregaddr);
1436 control |= HC_LAINT_ENA;
1437 writel(control, phba->HCregaddr);
1438 readl(phba->HCregaddr);
1439 spin_unlock_irq(&phba->hbalock);
1440 mempool_free(pmb, phba->mbox_mem_pool);
1441 return;
1442
1443 out:
1444
1445 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1446 "0225 Device Discovery completes\n");
1447 mempool_free(pmb, phba->mbox_mem_pool);
1448
1449 spin_lock_irq(shost->host_lock);
1450 vport->fc_flag &= ~FC_ABORT_DISCOVERY;
1451 spin_unlock_irq(shost->host_lock);
1452
1453 lpfc_can_disctmo(vport);
1454
1455
1456
1457 spin_lock_irq(&phba->hbalock);
1458 psli->sli_flag |= LPFC_PROCESS_LA;
1459 control = readl(phba->HCregaddr);
1460 control |= HC_LAINT_ENA;
1461 writel(control, phba->HCregaddr);
1462 readl(phba->HCregaddr);
1463 spin_unlock_irq(&phba->hbalock);
1464
1465 return;
1466 }
1467
1468 void
1469 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1470 {
1471 struct lpfc_vport *vport = pmb->vport;
1472 LPFC_MBOXQ_t *sparam_mb;
1473 u16 status = pmb->u.mb.mbxStatus;
1474 int rc;
1475
1476 mempool_free(pmb, phba->mbox_mem_pool);
1477
1478 if (status)
1479 goto out;
1480
1481
1482 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1483 !(phba->hba_flag & HBA_FCOE_MODE) &&
1484 (phba->link_flag & LS_LOOPBACK_MODE))
1485 return;
1486
1487 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
1488 vport->fc_flag & FC_PUBLIC_LOOP &&
1489 !(vport->fc_flag & FC_LBIT)) {
1490
1491
1492
1493
1494 lpfc_set_disctmo(vport);
1495 return;
1496 }
1497
1498
1499
1500
1501 if (vport->port_state != LPFC_FLOGI) {
1502
1503
1504
1505 if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
1506 !(phba->link_flag & LS_LOOPBACK_MODE)) {
1507 sparam_mb = mempool_alloc(phba->mbox_mem_pool,
1508 GFP_KERNEL);
1509 if (!sparam_mb)
1510 goto sparam_out;
1511
1512 rc = lpfc_read_sparam(phba, sparam_mb, 0);
1513 if (rc) {
1514 mempool_free(sparam_mb, phba->mbox_mem_pool);
1515 goto sparam_out;
1516 }
1517 sparam_mb->vport = vport;
1518 sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1519 rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
1520 if (rc == MBX_NOT_FINISHED) {
1521 lpfc_mbox_rsrc_cleanup(phba, sparam_mb,
1522 MBOX_THD_UNLOCKED);
1523 goto sparam_out;
1524 }
1525
1526 phba->hba_flag |= HBA_DEFER_FLOGI;
1527 } else {
1528 lpfc_initial_flogi(vport);
1529 }
1530 } else {
1531 if (vport->fc_flag & FC_PT2PT)
1532 lpfc_disc_start(vport);
1533 }
1534 return;
1535
1536 out:
1537 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1538 "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n",
1539 status, vport->port_state);
1540
1541 sparam_out:
1542 lpfc_linkdown(phba);
1543
1544 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1545 "0200 CONFIG_LINK bad hba state x%x\n",
1546 vport->port_state);
1547
1548 lpfc_issue_clear_la(phba, vport);
1549 return;
1550 }
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560 void
1561 lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
1562 {
1563 struct lpfc_fcf_pri *fcf_pri;
1564 struct lpfc_fcf_pri *next_fcf_pri;
1565 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
1566 spin_lock_irq(&phba->hbalock);
1567 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
1568 &phba->fcf.fcf_pri_list, list) {
1569 list_del_init(&fcf_pri->list);
1570 fcf_pri->fcf_rec.flag = 0;
1571 }
1572 spin_unlock_irq(&phba->hbalock);
1573 }
1574 static void
1575 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1576 {
1577 struct lpfc_vport *vport = mboxq->vport;
1578
1579 if (mboxq->u.mb.mbxStatus) {
1580 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1581 "2017 REG_FCFI mbxStatus error x%x "
1582 "HBA state x%x\n", mboxq->u.mb.mbxStatus,
1583 vport->port_state);
1584 goto fail_out;
1585 }
1586
1587
1588 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1589
1590 spin_lock_irq(&phba->hbalock);
1591 phba->fcf.fcf_flag |= FCF_REGISTERED;
1592 spin_unlock_irq(&phba->hbalock);
1593
1594
1595 if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
1596 lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1597 goto fail_out;
1598
1599
1600 spin_lock_irq(&phba->hbalock);
1601 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1602 phba->hba_flag &= ~FCF_TS_INPROG;
1603 if (vport->port_state != LPFC_FLOGI) {
1604 phba->hba_flag |= FCF_RR_INPROG;
1605 spin_unlock_irq(&phba->hbalock);
1606 lpfc_issue_init_vfi(vport);
1607 goto out;
1608 }
1609 spin_unlock_irq(&phba->hbalock);
1610 goto out;
1611
1612 fail_out:
1613 spin_lock_irq(&phba->hbalock);
1614 phba->hba_flag &= ~FCF_RR_INPROG;
1615 spin_unlock_irq(&phba->hbalock);
1616 out:
1617 mempool_free(mboxq, phba->mbox_mem_pool);
1618 }
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629 static uint32_t
1630 lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1631 {
1632 if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
1633 return 0;
1634 if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
1635 return 0;
1636 if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
1637 return 0;
1638 if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
1639 return 0;
1640 if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
1641 return 0;
1642 if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
1643 return 0;
1644 if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
1645 return 0;
1646 if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
1647 return 0;
1648 return 1;
1649 }
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660 static uint32_t
1661 lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1662 {
1663 if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
1664 return 0;
1665 if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
1666 return 0;
1667 if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
1668 return 0;
1669 if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
1670 return 0;
1671 if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
1672 return 0;
1673 if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
1674 return 0;
1675 if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
1676 return 0;
1677 if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
1678 return 0;
1679 return 1;
1680 }
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691 static uint32_t
1692 lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
1693 {
1694 if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
1695 return 0;
1696 if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
1697 return 0;
1698 if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
1699 return 0;
1700 if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
1701 return 0;
1702 if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
1703 return 0;
1704 if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
1705 return 0;
1706 return 1;
1707 }
1708
1709 static bool
1710 lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1711 {
1712 return (curr_vlan_id == new_vlan_id);
1713 }
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725 static void
1726 __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
1727 struct fcf_record *new_fcf_record
1728 )
1729 {
1730 struct lpfc_fcf_pri *fcf_pri;
1731
1732 fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1733 fcf_pri->fcf_rec.fcf_index = fcf_index;
1734
1735 fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
1736
1737 }
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747 static void
1748 lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
1749 struct fcf_record *new_fcf_record)
1750 {
1751
1752 fcf_rec->fabric_name[0] =
1753 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1754 fcf_rec->fabric_name[1] =
1755 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1756 fcf_rec->fabric_name[2] =
1757 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1758 fcf_rec->fabric_name[3] =
1759 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1760 fcf_rec->fabric_name[4] =
1761 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1762 fcf_rec->fabric_name[5] =
1763 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1764 fcf_rec->fabric_name[6] =
1765 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1766 fcf_rec->fabric_name[7] =
1767 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1768
1769 fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1770 fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1771 fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1772 fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1773 fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1774 fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1775
1776 fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1777
1778 fcf_rec->priority = new_fcf_record->fip_priority;
1779
1780 fcf_rec->switch_name[0] =
1781 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1782 fcf_rec->switch_name[1] =
1783 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1784 fcf_rec->switch_name[2] =
1785 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1786 fcf_rec->switch_name[3] =
1787 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1788 fcf_rec->switch_name[4] =
1789 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1790 fcf_rec->switch_name[5] =
1791 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1792 fcf_rec->switch_name[6] =
1793 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1794 fcf_rec->switch_name[7] =
1795 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1796 }
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811 static void
1812 __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1813 struct fcf_record *new_fcf_record, uint32_t addr_mode,
1814 uint16_t vlan_id, uint32_t flag)
1815 {
1816 lockdep_assert_held(&phba->hbalock);
1817
1818
1819 lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
1820
1821 fcf_rec->addr_mode = addr_mode;
1822 fcf_rec->vlan_id = vlan_id;
1823 fcf_rec->flag |= (flag | RECORD_VALID);
1824 __lpfc_update_fcf_record_pri(phba,
1825 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
1826 new_fcf_record);
1827 }
1828
1829
1830
1831
1832
1833
1834
1835
1836 static void
1837 lpfc_register_fcf(struct lpfc_hba *phba)
1838 {
1839 LPFC_MBOXQ_t *fcf_mbxq;
1840 int rc;
1841
1842 spin_lock_irq(&phba->hbalock);
1843
1844 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1845 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1846 spin_unlock_irq(&phba->hbalock);
1847 return;
1848 }
1849
1850
1851 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1852 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1853 phba->hba_flag &= ~FCF_TS_INPROG;
1854 if (phba->pport->port_state != LPFC_FLOGI &&
1855 phba->pport->fc_flag & FC_FABRIC) {
1856 phba->hba_flag |= FCF_RR_INPROG;
1857 spin_unlock_irq(&phba->hbalock);
1858 lpfc_initial_flogi(phba->pport);
1859 return;
1860 }
1861 spin_unlock_irq(&phba->hbalock);
1862 return;
1863 }
1864 spin_unlock_irq(&phba->hbalock);
1865
1866 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1867 if (!fcf_mbxq) {
1868 spin_lock_irq(&phba->hbalock);
1869 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1870 spin_unlock_irq(&phba->hbalock);
1871 return;
1872 }
1873
1874 lpfc_reg_fcfi(phba, fcf_mbxq);
1875 fcf_mbxq->vport = phba->pport;
1876 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1877 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1878 if (rc == MBX_NOT_FINISHED) {
1879 spin_lock_irq(&phba->hbalock);
1880 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1881 spin_unlock_irq(&phba->hbalock);
1882 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1883 }
1884
1885 return;
1886 }
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 static int
1907 lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1908 struct fcf_record *new_fcf_record,
1909 uint32_t *boot_flag, uint32_t *addr_mode,
1910 uint16_t *vlan_id)
1911 {
1912 struct lpfc_fcf_conn_entry *conn_entry;
1913 int i, j, fcf_vlan_id = 0;
1914
1915
1916 for (i = 0; i < 512; i++) {
1917 if (new_fcf_record->vlan_bitmap[i]) {
1918 fcf_vlan_id = i * 8;
1919 j = 0;
1920 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1921 j++;
1922 fcf_vlan_id++;
1923 }
1924 break;
1925 }
1926 }
1927
1928
1929 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1930 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
1931 bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
1932 return 0;
1933
1934 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1935 *boot_flag = 0;
1936 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1937 new_fcf_record);
1938 if (phba->valid_vlan)
1939 *vlan_id = phba->vlan_id;
1940 else
1941 *vlan_id = LPFC_FCOE_NULL_VID;
1942 return 1;
1943 }
1944
1945
1946
1947
1948
1949 if (list_empty(&phba->fcf_conn_rec_list)) {
1950 *boot_flag = 0;
1951 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1952 new_fcf_record);
1953
1954
1955
1956
1957
1958 if (*addr_mode & LPFC_FCF_FPMA)
1959 *addr_mode = LPFC_FCF_FPMA;
1960
1961
1962 if (fcf_vlan_id)
1963 *vlan_id = fcf_vlan_id;
1964 else
1965 *vlan_id = LPFC_FCOE_NULL_VID;
1966 return 1;
1967 }
1968
1969 list_for_each_entry(conn_entry,
1970 &phba->fcf_conn_rec_list, list) {
1971 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1972 continue;
1973
1974 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1975 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1976 new_fcf_record))
1977 continue;
1978 if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
1979 !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
1980 new_fcf_record))
1981 continue;
1982 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1983
1984
1985
1986
1987 if (!(new_fcf_record->vlan_bitmap
1988 [conn_entry->conn_rec.vlan_tag / 8] &
1989 (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1990 continue;
1991 }
1992
1993
1994
1995
1996
1997 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1998 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1999 continue;
2000
2001
2002
2003
2004
2005 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
2006 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
2007
2008
2009
2010
2011 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
2012 !(bf_get(lpfc_fcf_record_mac_addr_prov,
2013 new_fcf_record) & LPFC_FCF_SPMA))
2014 continue;
2015
2016
2017
2018
2019 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
2020 !(bf_get(lpfc_fcf_record_mac_addr_prov,
2021 new_fcf_record) & LPFC_FCF_FPMA))
2022 continue;
2023 }
2024
2025
2026
2027
2028 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
2029 *boot_flag = 1;
2030 else
2031 *boot_flag = 0;
2032
2033
2034
2035
2036
2037
2038 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
2039 new_fcf_record);
2040
2041
2042
2043
2044 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
2045 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
2046 *addr_mode = (conn_entry->conn_rec.flags &
2047 FCFCNCT_AM_SPMA) ?
2048 LPFC_FCF_SPMA : LPFC_FCF_FPMA;
2049
2050
2051
2052
2053 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
2054 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
2055 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
2056 (*addr_mode & LPFC_FCF_SPMA))
2057 *addr_mode = LPFC_FCF_SPMA;
2058 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
2059 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
2060 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
2061 (*addr_mode & LPFC_FCF_FPMA))
2062 *addr_mode = LPFC_FCF_FPMA;
2063
2064
2065 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
2066 *vlan_id = conn_entry->conn_rec.vlan_tag;
2067
2068
2069
2070
2071 else if (fcf_vlan_id)
2072 *vlan_id = fcf_vlan_id;
2073 else
2074 *vlan_id = LPFC_FCOE_NULL_VID;
2075
2076 return 1;
2077 }
2078
2079 return 0;
2080 }
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091 int
2092 lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
2093 {
2094
2095
2096
2097
2098 if ((phba->link_state >= LPFC_LINK_UP) &&
2099 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
2100 return 0;
2101
2102 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2103 "2768 Pending link or FCF event during current "
2104 "handling of the previous event: link_state:x%x, "
2105 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
2106 phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
2107 phba->fcoe_eventtag);
2108
2109 spin_lock_irq(&phba->hbalock);
2110 phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
2111 spin_unlock_irq(&phba->hbalock);
2112
2113 if (phba->link_state >= LPFC_LINK_UP) {
2114 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
2115 "2780 Restart FCF table scan due to "
2116 "pending FCF event:evt_tag_at_scan:x%x, "
2117 "evt_tag_current:x%x\n",
2118 phba->fcoe_eventtag_at_fcf_scan,
2119 phba->fcoe_eventtag);
2120 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
2121 } else {
2122
2123
2124
2125
2126 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
2127 "2833 Stop FCF discovery process due to link "
2128 "state change (x%x)\n", phba->link_state);
2129 spin_lock_irq(&phba->hbalock);
2130 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
2131 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
2132 spin_unlock_irq(&phba->hbalock);
2133 }
2134
2135
2136 if (unreg_fcf) {
2137 spin_lock_irq(&phba->hbalock);
2138 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
2139 spin_unlock_irq(&phba->hbalock);
2140 lpfc_sli4_unregister_fcf(phba);
2141 }
2142 return 1;
2143 }
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160 static bool
2161 lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
2162 {
2163 uint32_t rand_num;
2164
2165
2166 rand_num = 0xFFFF & prandom_u32();
2167
2168
2169 if ((fcf_cnt * rand_num) < 0xFFFF)
2170 return true;
2171 else
2172 return false;
2173 }
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188 static struct fcf_record *
2189 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
2190 uint16_t *next_fcf_index)
2191 {
2192 void *virt_addr;
2193 struct lpfc_mbx_sge sge;
2194 struct lpfc_mbx_read_fcf_tbl *read_fcf;
2195 uint32_t shdr_status, shdr_add_status, if_type;
2196 union lpfc_sli4_cfg_shdr *shdr;
2197 struct fcf_record *new_fcf_record;
2198
2199
2200
2201
2202 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
2203 if (unlikely(!mboxq->sge_array)) {
2204 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2205 "2524 Failed to get the non-embedded SGE "
2206 "virtual address\n");
2207 return NULL;
2208 }
2209 virt_addr = mboxq->sge_array->addr[0];
2210
2211 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
2212 lpfc_sli_pcimem_bcopy(shdr, shdr,
2213 sizeof(union lpfc_sli4_cfg_shdr));
2214 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2215 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
2216 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2217 if (shdr_status || shdr_add_status) {
2218 if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
2219 if_type == LPFC_SLI_INTF_IF_TYPE_2)
2220 lpfc_printf_log(phba, KERN_ERR,
2221 LOG_TRACE_EVENT,
2222 "2726 READ_FCF_RECORD Indicates empty "
2223 "FCF table.\n");
2224 else
2225 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2226 "2521 READ_FCF_RECORD mailbox failed "
2227 "with status x%x add_status x%x, "
2228 "mbx\n", shdr_status, shdr_add_status);
2229 return NULL;
2230 }
2231
2232
2233 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
2234 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
2235 sizeof(struct lpfc_mbx_read_fcf_tbl));
2236 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
2237 new_fcf_record = (struct fcf_record *)(virt_addr +
2238 sizeof(struct lpfc_mbx_read_fcf_tbl));
2239 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
2240 offsetof(struct fcf_record, vlan_bitmap));
2241 new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
2242 new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
2243
2244 return new_fcf_record;
2245 }
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257 static void
2258 lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
2259 struct fcf_record *fcf_record,
2260 uint16_t vlan_id,
2261 uint16_t next_fcf_index)
2262 {
2263 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2264 "2764 READ_FCF_RECORD:\n"
2265 "\tFCF_Index : x%x\n"
2266 "\tFCF_Avail : x%x\n"
2267 "\tFCF_Valid : x%x\n"
2268 "\tFCF_SOL : x%x\n"
2269 "\tFIP_Priority : x%x\n"
2270 "\tMAC_Provider : x%x\n"
2271 "\tLowest VLANID : x%x\n"
2272 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
2273 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
2274 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
2275 "\tNext_FCF_Index: x%x\n",
2276 bf_get(lpfc_fcf_record_fcf_index, fcf_record),
2277 bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
2278 bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
2279 bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
2280 fcf_record->fip_priority,
2281 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
2282 vlan_id,
2283 bf_get(lpfc_fcf_record_mac_0, fcf_record),
2284 bf_get(lpfc_fcf_record_mac_1, fcf_record),
2285 bf_get(lpfc_fcf_record_mac_2, fcf_record),
2286 bf_get(lpfc_fcf_record_mac_3, fcf_record),
2287 bf_get(lpfc_fcf_record_mac_4, fcf_record),
2288 bf_get(lpfc_fcf_record_mac_5, fcf_record),
2289 bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
2290 bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
2291 bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
2292 bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
2293 bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
2294 bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
2295 bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
2296 bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
2297 bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
2298 bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
2299 bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
2300 bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
2301 bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
2302 bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
2303 bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
2304 bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
2305 next_fcf_index);
2306 }
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321 static bool
2322 lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
2323 struct lpfc_fcf_rec *fcf_rec,
2324 struct fcf_record *new_fcf_record,
2325 uint16_t new_vlan_id)
2326 {
2327 if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
2328 if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
2329 return false;
2330 if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
2331 return false;
2332 if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
2333 return false;
2334 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
2335 return false;
2336 if (fcf_rec->priority != new_fcf_record->fip_priority)
2337 return false;
2338 return true;
2339 }
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
2353 {
2354 struct lpfc_hba *phba = vport->phba;
2355 int rc;
2356
2357 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
2358 spin_lock_irq(&phba->hbalock);
2359 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2360 spin_unlock_irq(&phba->hbalock);
2361 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2362 "2872 Devloss tmo with no eligible "
2363 "FCF, unregister in-use FCF (x%x) "
2364 "and rescan FCF table\n",
2365 phba->fcf.current_rec.fcf_indx);
2366 lpfc_unregister_fcf_rescan(phba);
2367 goto stop_flogi_current_fcf;
2368 }
2369
2370 phba->hba_flag &= ~FCF_RR_INPROG;
2371
2372 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2373 spin_unlock_irq(&phba->hbalock);
2374 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2375 "2865 No FCF available, stop roundrobin FCF "
2376 "failover and change port state:x%x/x%x\n",
2377 phba->pport->port_state, LPFC_VPORT_UNKNOWN);
2378 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2379
2380 if (!phba->fcf.fcf_redisc_attempted) {
2381 lpfc_unregister_fcf(phba);
2382
2383 rc = lpfc_sli4_redisc_fcf_table(phba);
2384 if (!rc) {
2385 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2386 "3195 Rediscover FCF table\n");
2387 phba->fcf.fcf_redisc_attempted = 1;
2388 lpfc_sli4_clear_fcf_rr_bmask(phba);
2389 } else {
2390 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2391 "3196 Rediscover FCF table "
2392 "failed. Status:x%x\n", rc);
2393 }
2394 } else {
2395 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2396 "3197 Already rediscover FCF table "
2397 "attempted. No more retry\n");
2398 }
2399 goto stop_flogi_current_fcf;
2400 } else {
2401 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
2402 "2794 Try FLOGI roundrobin FCF failover to "
2403 "(x%x)\n", fcf_index);
2404 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
2405 if (rc)
2406 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
2407 "2761 FLOGI roundrobin FCF failover "
2408 "failed (rc:x%x) to read FCF (x%x)\n",
2409 rc, phba->fcf.current_rec.fcf_indx);
2410 else
2411 goto stop_flogi_current_fcf;
2412 }
2413 return 0;
2414
2415 stop_flogi_current_fcf:
2416 lpfc_can_disctmo(vport);
2417 return 1;
2418 }
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
2430 uint16_t fcf_index)
2431 {
2432 struct lpfc_fcf_pri *new_fcf_pri;
2433
2434 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2435 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2436 "3058 deleting idx x%x pri x%x flg x%x\n",
2437 fcf_index, new_fcf_pri->fcf_rec.priority,
2438 new_fcf_pri->fcf_rec.flag);
2439 spin_lock_irq(&phba->hbalock);
2440 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
2441 if (phba->fcf.current_rec.priority ==
2442 new_fcf_pri->fcf_rec.priority)
2443 phba->fcf.eligible_fcf_cnt--;
2444 list_del_init(&new_fcf_pri->list);
2445 new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
2446 }
2447 spin_unlock_irq(&phba->hbalock);
2448 }
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460 void
2461 lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
2462 {
2463 struct lpfc_fcf_pri *new_fcf_pri;
2464 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2465 spin_lock_irq(&phba->hbalock);
2466 new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
2467 spin_unlock_irq(&phba->hbalock);
2468 }
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486 static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
2487 uint16_t fcf_index,
2488 struct fcf_record *new_fcf_record)
2489 {
2490 uint16_t current_fcf_pri;
2491 uint16_t last_index;
2492 struct lpfc_fcf_pri *fcf_pri;
2493 struct lpfc_fcf_pri *next_fcf_pri;
2494 struct lpfc_fcf_pri *new_fcf_pri;
2495 int ret;
2496
2497 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2498 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2499 "3059 adding idx x%x pri x%x flg x%x\n",
2500 fcf_index, new_fcf_record->fip_priority,
2501 new_fcf_pri->fcf_rec.flag);
2502 spin_lock_irq(&phba->hbalock);
2503 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
2504 list_del_init(&new_fcf_pri->list);
2505 new_fcf_pri->fcf_rec.fcf_index = fcf_index;
2506 new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
2507 if (list_empty(&phba->fcf.fcf_pri_list)) {
2508 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2509 ret = lpfc_sli4_fcf_rr_index_set(phba,
2510 new_fcf_pri->fcf_rec.fcf_index);
2511 goto out;
2512 }
2513
2514 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
2515 LPFC_SLI4_FCF_TBL_INDX_MAX);
2516 if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
2517 ret = 0;
2518 goto out;
2519 }
2520 current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
2521 if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
2522 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2523 if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
2524 memset(phba->fcf.fcf_rr_bmask, 0,
2525 sizeof(*phba->fcf.fcf_rr_bmask));
2526
2527 phba->fcf.eligible_fcf_cnt = 1;
2528 } else
2529
2530 phba->fcf.eligible_fcf_cnt++;
2531 ret = lpfc_sli4_fcf_rr_index_set(phba,
2532 new_fcf_pri->fcf_rec.fcf_index);
2533 goto out;
2534 }
2535
2536 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
2537 &phba->fcf.fcf_pri_list, list) {
2538 if (new_fcf_pri->fcf_rec.priority <=
2539 fcf_pri->fcf_rec.priority) {
2540 if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
2541 list_add(&new_fcf_pri->list,
2542 &phba->fcf.fcf_pri_list);
2543 else
2544 list_add(&new_fcf_pri->list,
2545 &((struct lpfc_fcf_pri *)
2546 fcf_pri->list.prev)->list);
2547 ret = 0;
2548 goto out;
2549 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
2550 || new_fcf_pri->fcf_rec.priority <
2551 next_fcf_pri->fcf_rec.priority) {
2552 list_add(&new_fcf_pri->list, &fcf_pri->list);
2553 ret = 0;
2554 goto out;
2555 }
2556 if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
2557 continue;
2558
2559 }
2560 ret = 1;
2561 out:
2562
2563 new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
2564 spin_unlock_irq(&phba->hbalock);
2565 return ret;
2566 }
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583 void
2584 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2585 {
2586 struct fcf_record *new_fcf_record;
2587 uint32_t boot_flag, addr_mode;
2588 uint16_t fcf_index, next_fcf_index;
2589 struct lpfc_fcf_rec *fcf_rec = NULL;
2590 uint16_t vlan_id = LPFC_FCOE_NULL_VID;
2591 bool select_new_fcf;
2592 int rc;
2593
2594
2595 if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
2596 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2597 return;
2598 }
2599
2600
2601 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2602 &next_fcf_index);
2603 if (!new_fcf_record) {
2604 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2605 "2765 Mailbox command READ_FCF_RECORD "
2606 "failed to retrieve a FCF record.\n");
2607
2608 spin_lock_irq(&phba->hbalock);
2609 phba->hba_flag &= ~FCF_TS_INPROG;
2610 spin_unlock_irq(&phba->hbalock);
2611 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2612 return;
2613 }
2614
2615
2616 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2617 &addr_mode, &vlan_id);
2618
2619
2620 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2621 next_fcf_index);
2622
2623
2624
2625
2626
2627
2628 if (!rc) {
2629 lpfc_sli4_fcf_pri_list_del(phba,
2630 bf_get(lpfc_fcf_record_fcf_index,
2631 new_fcf_record));
2632 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2633 "2781 FCF (x%x) failed connection "
2634 "list check: (x%x/x%x/%x)\n",
2635 bf_get(lpfc_fcf_record_fcf_index,
2636 new_fcf_record),
2637 bf_get(lpfc_fcf_record_fcf_avail,
2638 new_fcf_record),
2639 bf_get(lpfc_fcf_record_fcf_valid,
2640 new_fcf_record),
2641 bf_get(lpfc_fcf_record_fcf_sol,
2642 new_fcf_record));
2643 if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
2644 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2645 new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
2646 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
2647 phba->fcf.current_rec.fcf_indx) {
2648 lpfc_printf_log(phba, KERN_ERR,
2649 LOG_TRACE_EVENT,
2650 "2862 FCF (x%x) matches property "
2651 "of in-use FCF (x%x)\n",
2652 bf_get(lpfc_fcf_record_fcf_index,
2653 new_fcf_record),
2654 phba->fcf.current_rec.fcf_indx);
2655 goto read_next_fcf;
2656 }
2657
2658
2659
2660
2661
2662
2663 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
2664 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2665 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2666 "2835 Invalid in-use FCF "
2667 "(x%x), enter FCF failover "
2668 "table scan.\n",
2669 phba->fcf.current_rec.fcf_indx);
2670 spin_lock_irq(&phba->hbalock);
2671 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2672 spin_unlock_irq(&phba->hbalock);
2673 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2674 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2675 LPFC_FCOE_FCF_GET_FIRST);
2676 return;
2677 }
2678 }
2679 goto read_next_fcf;
2680 } else {
2681 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2682 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
2683 new_fcf_record);
2684 if (rc)
2685 goto read_next_fcf;
2686 }
2687
2688
2689
2690
2691
2692
2693
2694 spin_lock_irq(&phba->hbalock);
2695 if (phba->fcf.fcf_flag & FCF_IN_USE) {
2696 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2697 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2698 new_fcf_record, vlan_id)) {
2699 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
2700 phba->fcf.current_rec.fcf_indx) {
2701 phba->fcf.fcf_flag |= FCF_AVAILABLE;
2702 if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
2703
2704 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2705 phba);
2706 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2707
2708 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2709 spin_unlock_irq(&phba->hbalock);
2710 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2711 "2836 New FCF matches in-use "
2712 "FCF (x%x), port_state:x%x, "
2713 "fc_flag:x%x\n",
2714 phba->fcf.current_rec.fcf_indx,
2715 phba->pport->port_state,
2716 phba->pport->fc_flag);
2717 goto out;
2718 } else
2719 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2720 "2863 New FCF (x%x) matches "
2721 "property of in-use FCF (x%x)\n",
2722 bf_get(lpfc_fcf_record_fcf_index,
2723 new_fcf_record),
2724 phba->fcf.current_rec.fcf_indx);
2725 }
2726
2727
2728
2729
2730
2731
2732
2733 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2734 spin_unlock_irq(&phba->hbalock);
2735 goto read_next_fcf;
2736 }
2737 }
2738
2739
2740
2741
2742 if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2743 fcf_rec = &phba->fcf.failover_rec;
2744 else
2745 fcf_rec = &phba->fcf.current_rec;
2746
2747 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
2748
2749
2750
2751
2752
2753 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
2754
2755 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2756 "2837 Update current FCF record "
2757 "(x%x) with new FCF record (x%x)\n",
2758 fcf_rec->fcf_indx,
2759 bf_get(lpfc_fcf_record_fcf_index,
2760 new_fcf_record));
2761 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2762 addr_mode, vlan_id, BOOT_ENABLE);
2763 spin_unlock_irq(&phba->hbalock);
2764 goto read_next_fcf;
2765 }
2766
2767
2768
2769
2770
2771 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
2772 spin_unlock_irq(&phba->hbalock);
2773 goto read_next_fcf;
2774 }
2775
2776
2777
2778
2779 if (new_fcf_record->fip_priority < fcf_rec->priority) {
2780
2781 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2782 "2838 Update current FCF record "
2783 "(x%x) with new FCF record (x%x)\n",
2784 fcf_rec->fcf_indx,
2785 bf_get(lpfc_fcf_record_fcf_index,
2786 new_fcf_record));
2787 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2788 addr_mode, vlan_id, 0);
2789
2790 phba->fcf.eligible_fcf_cnt = 1;
2791 } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
2792
2793 phba->fcf.eligible_fcf_cnt++;
2794 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
2795 phba->fcf.eligible_fcf_cnt);
2796 if (select_new_fcf) {
2797 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2798 "2839 Update current FCF record "
2799 "(x%x) with new FCF record (x%x)\n",
2800 fcf_rec->fcf_indx,
2801 bf_get(lpfc_fcf_record_fcf_index,
2802 new_fcf_record));
2803
2804 __lpfc_update_fcf_record(phba, fcf_rec,
2805 new_fcf_record,
2806 addr_mode, vlan_id, 0);
2807 }
2808 }
2809 spin_unlock_irq(&phba->hbalock);
2810 goto read_next_fcf;
2811 }
2812
2813
2814
2815
2816 if (fcf_rec) {
2817 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2818 "2840 Update initial FCF candidate "
2819 "with FCF (x%x)\n",
2820 bf_get(lpfc_fcf_record_fcf_index,
2821 new_fcf_record));
2822 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2823 addr_mode, vlan_id, (boot_flag ?
2824 BOOT_ENABLE : 0));
2825 phba->fcf.fcf_flag |= FCF_AVAILABLE;
2826
2827 phba->fcf.eligible_fcf_cnt = 1;
2828 }
2829 spin_unlock_irq(&phba->hbalock);
2830 goto read_next_fcf;
2831
2832 read_next_fcf:
2833 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2834 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
2835 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
2836
2837
2838
2839
2840
2841
2842
2843
2844 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
2845 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2846 "2782 No suitable FCF found: "
2847 "(x%x/x%x)\n",
2848 phba->fcoe_eventtag_at_fcf_scan,
2849 bf_get(lpfc_fcf_record_fcf_index,
2850 new_fcf_record));
2851 spin_lock_irq(&phba->hbalock);
2852 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2853 phba->hba_flag &= ~FCF_TS_INPROG;
2854 spin_unlock_irq(&phba->hbalock);
2855
2856 lpfc_printf_log(phba, KERN_INFO,
2857 LOG_FIP,
2858 "2864 On devloss tmo "
2859 "unreg in-use FCF and "
2860 "rescan FCF table\n");
2861 lpfc_unregister_fcf_rescan(phba);
2862 return;
2863 }
2864
2865
2866
2867 phba->hba_flag &= ~FCF_TS_INPROG;
2868 spin_unlock_irq(&phba->hbalock);
2869 return;
2870 }
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881 lpfc_unregister_fcf(phba);
2882
2883
2884 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2885 "2842 Replace in-use FCF (x%x) "
2886 "with failover FCF (x%x)\n",
2887 phba->fcf.current_rec.fcf_indx,
2888 phba->fcf.failover_rec.fcf_indx);
2889 memcpy(&phba->fcf.current_rec,
2890 &phba->fcf.failover_rec,
2891 sizeof(struct lpfc_fcf_rec));
2892
2893
2894
2895
2896
2897 spin_lock_irq(&phba->hbalock);
2898 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2899 spin_unlock_irq(&phba->hbalock);
2900
2901 lpfc_register_fcf(phba);
2902 } else {
2903
2904
2905
2906
2907 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
2908 (phba->fcf.fcf_flag & FCF_REDISC_PEND))
2909 return;
2910
2911 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2912 phba->fcf.fcf_flag & FCF_IN_USE) {
2913
2914
2915
2916
2917
2918
2919 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2920 "2841 In-use FCF record (x%x) "
2921 "not reported, entering fast "
2922 "FCF failover mode scanning.\n",
2923 phba->fcf.current_rec.fcf_indx);
2924 spin_lock_irq(&phba->hbalock);
2925 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2926 spin_unlock_irq(&phba->hbalock);
2927 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2928 LPFC_FCOE_FCF_GET_FIRST);
2929 return;
2930 }
2931
2932 lpfc_register_fcf(phba);
2933 }
2934 } else
2935 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
2936 return;
2937
2938 out:
2939 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2940 lpfc_register_fcf(phba);
2941
2942 return;
2943 }
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960 void
2961 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2962 {
2963 struct fcf_record *new_fcf_record;
2964 uint32_t boot_flag, addr_mode;
2965 uint16_t next_fcf_index, fcf_index;
2966 uint16_t current_fcf_index;
2967 uint16_t vlan_id;
2968 int rc;
2969
2970
2971 if (phba->link_state < LPFC_LINK_UP) {
2972 spin_lock_irq(&phba->hbalock);
2973 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
2974 phba->hba_flag &= ~FCF_RR_INPROG;
2975 spin_unlock_irq(&phba->hbalock);
2976 goto out;
2977 }
2978
2979
2980 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2981 &next_fcf_index);
2982 if (!new_fcf_record) {
2983 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2984 "2766 Mailbox command READ_FCF_RECORD "
2985 "failed to retrieve a FCF record. "
2986 "hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
2987 phba->fcf.fcf_flag);
2988 lpfc_unregister_fcf_rescan(phba);
2989 goto out;
2990 }
2991
2992
2993 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2994 &addr_mode, &vlan_id);
2995
2996
2997 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2998 next_fcf_index);
2999
3000 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
3001 if (!rc) {
3002 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3003 "2848 Remove ineligible FCF (x%x) from "
3004 "from roundrobin bmask\n", fcf_index);
3005
3006 lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
3007
3008 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
3009 rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
3010 if (rc)
3011 goto out;
3012 goto error_out;
3013 }
3014
3015 if (fcf_index == phba->fcf.current_rec.fcf_indx) {
3016 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3017 "2760 Perform FLOGI roundrobin FCF failover: "
3018 "FCF (x%x) back to FCF (x%x)\n",
3019 phba->fcf.current_rec.fcf_indx, fcf_index);
3020
3021 msleep(500);
3022 lpfc_issue_init_vfi(phba->pport);
3023 goto out;
3024 }
3025
3026
3027 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3028 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
3029 phba->fcf.failover_rec.fcf_indx, fcf_index);
3030 spin_lock_irq(&phba->hbalock);
3031 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
3032 new_fcf_record, addr_mode, vlan_id,
3033 (boot_flag ? BOOT_ENABLE : 0));
3034 spin_unlock_irq(&phba->hbalock);
3035
3036 current_fcf_index = phba->fcf.current_rec.fcf_indx;
3037
3038
3039 lpfc_unregister_fcf(phba);
3040
3041
3042 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
3043 sizeof(struct lpfc_fcf_rec));
3044
3045 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3046 "2783 Perform FLOGI roundrobin FCF failover: FCF "
3047 "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
3048
3049 error_out:
3050 lpfc_register_fcf(phba);
3051 out:
3052 lpfc_sli4_mbox_cmd_free(phba, mboxq);
3053 }
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066 void
3067 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
3068 {
3069 struct fcf_record *new_fcf_record;
3070 uint32_t boot_flag, addr_mode;
3071 uint16_t fcf_index, next_fcf_index;
3072 uint16_t vlan_id;
3073 int rc;
3074
3075
3076 if (phba->link_state < LPFC_LINK_UP)
3077 goto out;
3078
3079
3080 if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
3081 goto out;
3082
3083
3084 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
3085 &next_fcf_index);
3086 if (!new_fcf_record) {
3087 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3088 "2767 Mailbox command READ_FCF_RECORD "
3089 "failed to retrieve a FCF record.\n");
3090 goto out;
3091 }
3092
3093
3094 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
3095 &addr_mode, &vlan_id);
3096
3097
3098 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
3099 next_fcf_index);
3100
3101 if (!rc)
3102 goto out;
3103
3104
3105 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
3106
3107 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
3108
3109 out:
3110 lpfc_sli4_mbox_cmd_free(phba, mboxq);
3111 }
3112
3113
3114
3115
3116
3117
3118
3119
3120 static void
3121 lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
3122 {
3123 struct lpfc_vport *vport = mboxq->vport;
3124
3125
3126
3127
3128
3129 if (mboxq->u.mb.mbxStatus &&
3130 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3131 LPFC_SLI_INTF_IF_TYPE_0) &&
3132 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
3133 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3134 "2891 Init VFI mailbox failed 0x%x\n",
3135 mboxq->u.mb.mbxStatus);
3136 mempool_free(mboxq, phba->mbox_mem_pool);
3137 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3138 return;
3139 }
3140
3141 lpfc_initial_flogi(vport);
3142 mempool_free(mboxq, phba->mbox_mem_pool);
3143 return;
3144 }
3145
3146
3147
3148
3149
3150
3151
3152
3153 void
3154 lpfc_issue_init_vfi(struct lpfc_vport *vport)
3155 {
3156 LPFC_MBOXQ_t *mboxq;
3157 int rc;
3158 struct lpfc_hba *phba = vport->phba;
3159
3160 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3161 if (!mboxq) {
3162 lpfc_printf_vlog(vport, KERN_ERR,
3163 LOG_TRACE_EVENT, "2892 Failed to allocate "
3164 "init_vfi mailbox\n");
3165 return;
3166 }
3167 lpfc_init_vfi(mboxq, vport);
3168 mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
3169 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
3170 if (rc == MBX_NOT_FINISHED) {
3171 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3172 "2893 Failed to issue init_vfi mailbox\n");
3173 mempool_free(mboxq, vport->phba->mbox_mem_pool);
3174 }
3175 }
3176
3177
3178
3179
3180
3181
3182
3183
3184 void
3185 lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
3186 {
3187 struct lpfc_vport *vport = mboxq->vport;
3188 struct lpfc_nodelist *ndlp;
3189 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3190
3191 if (mboxq->u.mb.mbxStatus) {
3192 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3193 "2609 Init VPI mailbox failed 0x%x\n",
3194 mboxq->u.mb.mbxStatus);
3195 mempool_free(mboxq, phba->mbox_mem_pool);
3196 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3197 return;
3198 }
3199 spin_lock_irq(shost->host_lock);
3200 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
3201 spin_unlock_irq(shost->host_lock);
3202
3203
3204 if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
3205 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3206 if (!ndlp)
3207 lpfc_printf_vlog(vport, KERN_ERR,
3208 LOG_TRACE_EVENT,
3209 "2731 Cannot find fabric "
3210 "controller node\n");
3211 else
3212 lpfc_register_new_vport(phba, vport, ndlp);
3213 mempool_free(mboxq, phba->mbox_mem_pool);
3214 return;
3215 }
3216
3217 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
3218 lpfc_initial_fdisc(vport);
3219 else {
3220 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
3221 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3222 "2606 No NPIV Fabric support\n");
3223 }
3224 mempool_free(mboxq, phba->mbox_mem_pool);
3225 return;
3226 }
3227
3228
3229
3230
3231
3232
3233
3234
3235 void
3236 lpfc_issue_init_vpi(struct lpfc_vport *vport)
3237 {
3238 LPFC_MBOXQ_t *mboxq;
3239 int rc, vpi;
3240
3241 if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
3242 vpi = lpfc_alloc_vpi(vport->phba);
3243 if (!vpi) {
3244 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3245 "3303 Failed to obtain vport vpi\n");
3246 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3247 return;
3248 }
3249 vport->vpi = vpi;
3250 }
3251
3252 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
3253 if (!mboxq) {
3254 lpfc_printf_vlog(vport, KERN_ERR,
3255 LOG_TRACE_EVENT, "2607 Failed to allocate "
3256 "init_vpi mailbox\n");
3257 return;
3258 }
3259 lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
3260 mboxq->vport = vport;
3261 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
3262 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
3263 if (rc == MBX_NOT_FINISHED) {
3264 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3265 "2608 Failed to issue init_vpi mailbox\n");
3266 mempool_free(mboxq, vport->phba->mbox_mem_pool);
3267 }
3268 }
3269
3270
3271
3272
3273
3274
3275
3276
3277 void
3278 lpfc_start_fdiscs(struct lpfc_hba *phba)
3279 {
3280 struct lpfc_vport **vports;
3281 int i;
3282
3283 vports = lpfc_create_vport_work_array(phba);
3284 if (vports != NULL) {
3285 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3286 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
3287 continue;
3288
3289 if (vports[i]->vpi > phba->max_vpi) {
3290 lpfc_vport_set_state(vports[i],
3291 FC_VPORT_FAILED);
3292 continue;
3293 }
3294 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3295 lpfc_vport_set_state(vports[i],
3296 FC_VPORT_LINKDOWN);
3297 continue;
3298 }
3299 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
3300 lpfc_issue_init_vpi(vports[i]);
3301 continue;
3302 }
3303 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
3304 lpfc_initial_fdisc(vports[i]);
3305 else {
3306 lpfc_vport_set_state(vports[i],
3307 FC_VPORT_NO_FABRIC_SUPP);
3308 lpfc_printf_vlog(vports[i], KERN_ERR,
3309 LOG_TRACE_EVENT,
3310 "0259 No NPIV "
3311 "Fabric support\n");
3312 }
3313 }
3314 }
3315 lpfc_destroy_vport_work_array(phba, vports);
3316 }
3317
3318 void
3319 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
3320 {
3321 struct lpfc_vport *vport = mboxq->vport;
3322 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3323
3324
3325
3326
3327
3328 if (mboxq->u.mb.mbxStatus &&
3329 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3330 LPFC_SLI_INTF_IF_TYPE_0) &&
3331 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
3332 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3333 "2018 REG_VFI mbxStatus error x%x "
3334 "HBA state x%x\n",
3335 mboxq->u.mb.mbxStatus, vport->port_state);
3336 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3337
3338 lpfc_disc_list_loopmap(vport);
3339
3340 lpfc_disc_start(vport);
3341 goto out_free_mem;
3342 }
3343 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3344 goto out_free_mem;
3345 }
3346
3347
3348
3349
3350
3351 if (vport->fc_flag & FC_VFI_REGISTERED)
3352 if (!(phba->sli_rev == LPFC_SLI_REV4 &&
3353 vport->fc_flag & FC_PT2PT))
3354 goto out_free_mem;
3355
3356
3357 spin_lock_irq(shost->host_lock);
3358 vport->vpi_state |= LPFC_VPI_REGISTERED;
3359 vport->fc_flag |= FC_VFI_REGISTERED;
3360 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3361 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
3362 spin_unlock_irq(shost->host_lock);
3363
3364
3365 if ((phba->sli_rev == LPFC_SLI_REV4) &&
3366 (phba->link_flag & LS_LOOPBACK_MODE)) {
3367 phba->link_state = LPFC_HBA_READY;
3368 goto out_free_mem;
3369 }
3370
3371 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3372 "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
3373 "alpacnt:%d LinkState:%x topology:%x\n",
3374 vport->port_state, vport->fc_flag, vport->fc_myDID,
3375 vport->phba->alpa_map[0],
3376 phba->link_state, phba->fc_topology);
3377
3378 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
3379
3380
3381
3382
3383 if ((vport->fc_flag & FC_PT2PT) ||
3384 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
3385 !(vport->fc_flag & FC_PUBLIC_LOOP))) {
3386
3387
3388 lpfc_disc_list_loopmap(vport);
3389
3390 if (vport->fc_flag & FC_PT2PT)
3391 vport->port_state = LPFC_VPORT_READY;
3392 else
3393 lpfc_disc_start(vport);
3394 } else {
3395 lpfc_start_fdiscs(phba);
3396 lpfc_do_scr_ns_plogi(phba, vport);
3397 }
3398 }
3399
3400 out_free_mem:
3401 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
3402 }
3403
3404 static void
3405 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3406 {
3407 MAILBOX_t *mb = &pmb->u.mb;
3408 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3409 struct lpfc_vport *vport = pmb->vport;
3410 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3411 struct serv_parm *sp = &vport->fc_sparam;
3412 uint32_t ed_tov;
3413
3414
3415 if (mb->mbxStatus) {
3416
3417 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3418 "0319 READ_SPARAM mbxStatus error x%x "
3419 "hba state x%x>\n",
3420 mb->mbxStatus, vport->port_state);
3421 lpfc_linkdown(phba);
3422 goto out;
3423 }
3424
3425 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
3426 sizeof (struct serv_parm));
3427
3428 ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
3429 if (sp->cmn.edtovResolution)
3430 ed_tov = (ed_tov + 999999) / 1000000;
3431
3432 phba->fc_edtov = ed_tov;
3433 phba->fc_ratov = (2 * ed_tov) / 1000;
3434 if (phba->fc_ratov < FF_DEF_RATOV) {
3435
3436 phba->fc_ratov = FF_DEF_RATOV;
3437 }
3438
3439 lpfc_update_vport_wwn(vport);
3440 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3441 if (vport->port_type == LPFC_PHYSICAL_PORT) {
3442 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
3443 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
3444 }
3445
3446 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
3447
3448
3449
3450
3451 if (phba->hba_flag & HBA_DEFER_FLOGI) {
3452 lpfc_initial_flogi(vport);
3453 phba->hba_flag &= ~HBA_DEFER_FLOGI;
3454 }
3455 return;
3456
3457 out:
3458 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
3459 lpfc_issue_clear_la(phba, vport);
3460 }
3461
3462 static void
3463 lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3464 {
3465 struct lpfc_vport *vport = phba->pport;
3466 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
3467 struct Scsi_Host *shost;
3468 int i;
3469 int rc;
3470 struct fcf_record *fcf_record;
3471 uint32_t fc_flags = 0;
3472 unsigned long iflags;
3473
3474 spin_lock_irqsave(&phba->hbalock, iflags);
3475 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
3476
3477 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3478 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
3479 case LPFC_LINK_SPEED_1GHZ:
3480 case LPFC_LINK_SPEED_2GHZ:
3481 case LPFC_LINK_SPEED_4GHZ:
3482 case LPFC_LINK_SPEED_8GHZ:
3483 case LPFC_LINK_SPEED_10GHZ:
3484 case LPFC_LINK_SPEED_16GHZ:
3485 case LPFC_LINK_SPEED_32GHZ:
3486 case LPFC_LINK_SPEED_64GHZ:
3487 case LPFC_LINK_SPEED_128GHZ:
3488 case LPFC_LINK_SPEED_256GHZ:
3489 break;
3490 default:
3491 phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
3492 break;
3493 }
3494 }
3495
3496 if (phba->fc_topology &&
3497 phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
3498 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3499 "3314 Toplogy changed was 0x%x is 0x%x\n",
3500 phba->fc_topology,
3501 bf_get(lpfc_mbx_read_top_topology, la));
3502 phba->fc_topology_changed = 1;
3503 }
3504
3505 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
3506 phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA);
3507
3508 shost = lpfc_shost_from_vport(vport);
3509 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3510 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
3511
3512
3513
3514
3515 if (phba->cfg_enable_npiv && phba->max_vpi)
3516 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3517 "1309 Link Up Event npiv not supported in loop "
3518 "topology\n");
3519
3520 if (bf_get(lpfc_mbx_read_top_il, la))
3521 fc_flags |= FC_LBIT;
3522
3523 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
3524 i = la->lilpBde64.tus.f.bdeSize;
3525
3526 if (i == 0) {
3527 phba->alpa_map[0] = 0;
3528 } else {
3529 if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
3530 int numalpa, j, k;
3531 union {
3532 uint8_t pamap[16];
3533 struct {
3534 uint32_t wd1;
3535 uint32_t wd2;
3536 uint32_t wd3;
3537 uint32_t wd4;
3538 } pa;
3539 } un;
3540 numalpa = phba->alpa_map[0];
3541 j = 0;
3542 while (j < numalpa) {
3543 memset(un.pamap, 0, 16);
3544 for (k = 1; j < numalpa; k++) {
3545 un.pamap[k - 1] =
3546 phba->alpa_map[j + 1];
3547 j++;
3548 if (k == 16)
3549 break;
3550 }
3551
3552 lpfc_printf_log(phba,
3553 KERN_WARNING,
3554 LOG_LINK_EVENT,
3555 "1304 Link Up Event "
3556 "ALPA map Data: x%x "
3557 "x%x x%x x%x\n",
3558 un.pa.wd1, un.pa.wd2,
3559 un.pa.wd3, un.pa.wd4);
3560 }
3561 }
3562 }
3563 } else {
3564 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
3565 if (phba->max_vpi && phba->cfg_enable_npiv &&
3566 (phba->sli_rev >= LPFC_SLI_REV3))
3567 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3568 }
3569 vport->fc_myDID = phba->fc_pref_DID;
3570 fc_flags |= FC_LBIT;
3571 }
3572 spin_unlock_irqrestore(&phba->hbalock, iflags);
3573
3574 if (fc_flags) {
3575 spin_lock_irqsave(shost->host_lock, iflags);
3576 vport->fc_flag |= fc_flags;
3577 spin_unlock_irqrestore(shost->host_lock, iflags);
3578 }
3579
3580 lpfc_linkup(phba);
3581 sparam_mbox = NULL;
3582
3583 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3584 if (!sparam_mbox)
3585 goto out;
3586
3587 rc = lpfc_read_sparam(phba, sparam_mbox, 0);
3588 if (rc) {
3589 mempool_free(sparam_mbox, phba->mbox_mem_pool);
3590 goto out;
3591 }
3592 sparam_mbox->vport = vport;
3593 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
3594 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
3595 if (rc == MBX_NOT_FINISHED) {
3596 lpfc_mbox_rsrc_cleanup(phba, sparam_mbox, MBOX_THD_UNLOCKED);
3597 goto out;
3598 }
3599
3600 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3601 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3602 if (!cfglink_mbox)
3603 goto out;
3604 vport->port_state = LPFC_LOCAL_CFG_LINK;
3605 lpfc_config_link(phba, cfglink_mbox);
3606 cfglink_mbox->vport = vport;
3607 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
3608 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
3609 if (rc == MBX_NOT_FINISHED) {
3610 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
3611 goto out;
3612 }
3613 } else {
3614 vport->port_state = LPFC_VPORT_UNKNOWN;
3615
3616
3617
3618
3619
3620 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
3621 fcf_record = kzalloc(sizeof(struct fcf_record),
3622 GFP_KERNEL);
3623 if (unlikely(!fcf_record)) {
3624 lpfc_printf_log(phba, KERN_ERR,
3625 LOG_TRACE_EVENT,
3626 "2554 Could not allocate memory for "
3627 "fcf record\n");
3628 rc = -ENODEV;
3629 goto out;
3630 }
3631
3632 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
3633 LPFC_FCOE_FCF_DEF_INDEX);
3634 rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
3635 if (unlikely(rc)) {
3636 lpfc_printf_log(phba, KERN_ERR,
3637 LOG_TRACE_EVENT,
3638 "2013 Could not manually add FCF "
3639 "record 0, status %d\n", rc);
3640 rc = -ENODEV;
3641 kfree(fcf_record);
3642 goto out;
3643 }
3644 kfree(fcf_record);
3645 }
3646
3647
3648
3649
3650 spin_lock_irqsave(&phba->hbalock, iflags);
3651 if (phba->hba_flag & FCF_TS_INPROG) {
3652 spin_unlock_irqrestore(&phba->hbalock, iflags);
3653 return;
3654 }
3655
3656 phba->fcf.fcf_flag |= FCF_INIT_DISC;
3657 spin_unlock_irqrestore(&phba->hbalock, iflags);
3658 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3659 "2778 Start FCF table scan at linkup\n");
3660 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3661 LPFC_FCOE_FCF_GET_FIRST);
3662 if (rc) {
3663 spin_lock_irqsave(&phba->hbalock, iflags);
3664 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
3665 spin_unlock_irqrestore(&phba->hbalock, iflags);
3666 goto out;
3667 }
3668
3669 lpfc_sli4_clear_fcf_rr_bmask(phba);
3670 }
3671
3672
3673 memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
3674 scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
3675 init_utsname()->nodename);
3676 return;
3677 out:
3678 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3679 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3680 "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
3681 vport->port_state, sparam_mbox, cfglink_mbox);
3682 lpfc_issue_clear_la(phba, vport);
3683 return;
3684 }
3685
3686 static void
3687 lpfc_enable_la(struct lpfc_hba *phba)
3688 {
3689 uint32_t control;
3690 struct lpfc_sli *psli = &phba->sli;
3691 spin_lock_irq(&phba->hbalock);
3692 psli->sli_flag |= LPFC_PROCESS_LA;
3693 if (phba->sli_rev <= LPFC_SLI_REV3) {
3694 control = readl(phba->HCregaddr);
3695 control |= HC_LAINT_ENA;
3696 writel(control, phba->HCregaddr);
3697 readl(phba->HCregaddr);
3698 }
3699 spin_unlock_irq(&phba->hbalock);
3700 }
3701
3702 static void
3703 lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
3704 {
3705 lpfc_linkdown(phba);
3706 lpfc_enable_la(phba);
3707 lpfc_unregister_unused_fcf(phba);
3708
3709 }
3710
3711
3712
3713
3714
3715
3716
3717
3718 void
3719 lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3720 {
3721 struct lpfc_vport *vport = pmb->vport;
3722 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3723 struct lpfc_mbx_read_top *la;
3724 struct lpfc_sli_ring *pring;
3725 MAILBOX_t *mb = &pmb->u.mb;
3726 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3727 uint8_t attn_type;
3728 unsigned long iflags;
3729
3730
3731 pring = lpfc_phba_elsring(phba);
3732 if (pring)
3733 pring->flag &= ~LPFC_STOP_IOCB_EVENT;
3734
3735
3736 if (mb->mbxStatus) {
3737 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3738 "1307 READ_LA mbox error x%x state x%x\n",
3739 mb->mbxStatus, vport->port_state);
3740 lpfc_mbx_issue_link_down(phba);
3741 phba->link_state = LPFC_HBA_ERROR;
3742 goto lpfc_mbx_cmpl_read_topology_free_mbuf;
3743 }
3744
3745 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3746 attn_type = bf_get(lpfc_mbx_read_top_att_type, la);
3747
3748 memcpy(&phba->alpa_map[0], mp->virt, 128);
3749
3750 spin_lock_irqsave(shost->host_lock, iflags);
3751 if (bf_get(lpfc_mbx_read_top_pb, la))
3752 vport->fc_flag |= FC_BYPASSED_MODE;
3753 else
3754 vport->fc_flag &= ~FC_BYPASSED_MODE;
3755 spin_unlock_irqrestore(shost->host_lock, iflags);
3756
3757 if (phba->fc_eventTag <= la->eventTag) {
3758 phba->fc_stat.LinkMultiEvent++;
3759 if (attn_type == LPFC_ATT_LINK_UP)
3760 if (phba->fc_eventTag != 0)
3761 lpfc_linkdown(phba);
3762 }
3763
3764 phba->fc_eventTag = la->eventTag;
3765 phba->link_events++;
3766 if (attn_type == LPFC_ATT_LINK_UP) {
3767 phba->fc_stat.LinkUp++;
3768 if (phba->link_flag & LS_LOOPBACK_MODE) {
3769 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3770 "1306 Link Up Event in loop back mode "
3771 "x%x received Data: x%x x%x x%x x%x\n",
3772 la->eventTag, phba->fc_eventTag,
3773 bf_get(lpfc_mbx_read_top_alpa_granted,
3774 la),
3775 bf_get(lpfc_mbx_read_top_link_spd, la),
3776 phba->alpa_map[0]);
3777 } else {
3778 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3779 "1303 Link Up Event x%x received "
3780 "Data: x%x x%x x%x x%x x%x\n",
3781 la->eventTag, phba->fc_eventTag,
3782 bf_get(lpfc_mbx_read_top_alpa_granted,
3783 la),
3784 bf_get(lpfc_mbx_read_top_link_spd, la),
3785 phba->alpa_map[0],
3786 bf_get(lpfc_mbx_read_top_fa, la));
3787 }
3788 lpfc_mbx_process_link_up(phba, la);
3789
3790 if (phba->cmf_active_mode != LPFC_CFG_OFF)
3791 lpfc_cmf_signal_init(phba);
3792
3793 } else if (attn_type == LPFC_ATT_LINK_DOWN ||
3794 attn_type == LPFC_ATT_UNEXP_WWPN) {
3795 phba->fc_stat.LinkDown++;
3796 if (phba->link_flag & LS_LOOPBACK_MODE)
3797 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3798 "1308 Link Down Event in loop back mode "
3799 "x%x received "
3800 "Data: x%x x%x x%x\n",
3801 la->eventTag, phba->fc_eventTag,
3802 phba->pport->port_state, vport->fc_flag);
3803 else if (attn_type == LPFC_ATT_UNEXP_WWPN)
3804 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3805 "1313 Link Down Unexpected FA WWPN Event x%x "
3806 "received Data: x%x x%x x%x x%x\n",
3807 la->eventTag, phba->fc_eventTag,
3808 phba->pport->port_state, vport->fc_flag,
3809 bf_get(lpfc_mbx_read_top_fa, la));
3810 else
3811 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3812 "1305 Link Down Event x%x received "
3813 "Data: x%x x%x x%x x%x\n",
3814 la->eventTag, phba->fc_eventTag,
3815 phba->pport->port_state, vport->fc_flag,
3816 bf_get(lpfc_mbx_read_top_fa, la));
3817 lpfc_mbx_issue_link_down(phba);
3818 }
3819
3820 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3821 bf_get(lpfc_mbx_read_top_fa, la))
3822 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3823 "1311 fa %d\n",
3824 bf_get(lpfc_mbx_read_top_fa, la));
3825
3826 lpfc_mbx_cmpl_read_topology_free_mbuf:
3827 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
3828 }
3829
3830
3831
3832
3833
3834
3835
3836 void
3837 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3838 {
3839 struct lpfc_vport *vport = pmb->vport;
3840 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3841 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3842
3843
3844
3845
3846
3847 pmb->ctx_buf = NULL;
3848 pmb->ctx_ndlp = NULL;
3849
3850 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY,
3851 "0002 rpi:%x DID:%x flg:%x %d x%px\n",
3852 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3853 kref_read(&ndlp->kref),
3854 ndlp);
3855 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
3856 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3857
3858 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
3859 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
3860
3861
3862
3863
3864
3865
3866
3867
3868 spin_lock_irq(&ndlp->lock);
3869 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
3870 spin_unlock_irq(&ndlp->lock);
3871
3872
3873
3874
3875
3876
3877 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3878 lpfc_unreg_rpi(vport, ndlp);
3879 }
3880
3881
3882 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
3883 pmb->ctx_buf = mp;
3884 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
3885
3886
3887
3888
3889 lpfc_nlp_put(ndlp);
3890
3891 return;
3892 }
3893
3894 static void
3895 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3896 {
3897 MAILBOX_t *mb = &pmb->u.mb;
3898 struct lpfc_vport *vport = pmb->vport;
3899 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3900
3901 switch (mb->mbxStatus) {
3902 case 0x0011:
3903 case 0x0020:
3904 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3905 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3906 mb->mbxStatus);
3907 break;
3908
3909 case 0x9700:
3910 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3911 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3912 vport->vpi, mb->mbxStatus);
3913 if (!(phba->pport->load_flag & FC_UNLOADING))
3914 lpfc_workq_post_event(phba, NULL, NULL,
3915 LPFC_EVT_RESET_HBA);
3916 }
3917 spin_lock_irq(shost->host_lock);
3918 vport->vpi_state &= ~LPFC_VPI_REGISTERED;
3919 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3920 spin_unlock_irq(shost->host_lock);
3921 mempool_free(pmb, phba->mbox_mem_pool);
3922 lpfc_cleanup_vports_rrqs(vport, NULL);
3923
3924
3925
3926
3927 if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
3928 scsi_host_put(shost);
3929 }
3930
3931 int
3932 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
3933 {
3934 struct lpfc_hba *phba = vport->phba;
3935 LPFC_MBOXQ_t *mbox;
3936 int rc;
3937
3938 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3939 if (!mbox)
3940 return 1;
3941
3942 lpfc_unreg_vpi(phba, vport->vpi, mbox);
3943 mbox->vport = vport;
3944 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
3945 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3946 if (rc == MBX_NOT_FINISHED) {
3947 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3948 "1800 Could not issue unreg_vpi\n");
3949 mempool_free(mbox, phba->mbox_mem_pool);
3950 return rc;
3951 }
3952 return 0;
3953 }
3954
3955 static void
3956 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3957 {
3958 struct lpfc_vport *vport = pmb->vport;
3959 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3960 MAILBOX_t *mb = &pmb->u.mb;
3961
3962 switch (mb->mbxStatus) {
3963 case 0x0011:
3964 case 0x9601:
3965 case 0x9602:
3966 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3967 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3968 mb->mbxStatus);
3969 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3970 spin_lock_irq(shost->host_lock);
3971 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3972 spin_unlock_irq(shost->host_lock);
3973 vport->fc_myDID = 0;
3974
3975 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3976 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
3977 if (phba->nvmet_support)
3978 lpfc_nvmet_update_targetport(phba);
3979 else
3980 lpfc_nvme_update_localport(vport);
3981 }
3982 goto out;
3983 }
3984
3985 spin_lock_irq(shost->host_lock);
3986 vport->vpi_state |= LPFC_VPI_REGISTERED;
3987 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3988 spin_unlock_irq(shost->host_lock);
3989 vport->num_disc_nodes = 0;
3990
3991 if (vport->fc_npr_cnt)
3992 lpfc_els_disc_plogi(vport);
3993
3994 if (!vport->num_disc_nodes) {
3995 spin_lock_irq(shost->host_lock);
3996 vport->fc_flag &= ~FC_NDISC_ACTIVE;
3997 spin_unlock_irq(shost->host_lock);
3998 lpfc_can_disctmo(vport);
3999 }
4000 vport->port_state = LPFC_VPORT_READY;
4001
4002 out:
4003 mempool_free(pmb, phba->mbox_mem_pool);
4004 return;
4005 }
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015 void
4016 lpfc_create_static_vport(struct lpfc_hba *phba)
4017 {
4018 LPFC_MBOXQ_t *pmb = NULL;
4019 MAILBOX_t *mb;
4020 struct static_vport_info *vport_info;
4021 int mbx_wait_rc = 0, i;
4022 struct fc_vport_identifiers vport_id;
4023 struct fc_vport *new_fc_vport;
4024 struct Scsi_Host *shost;
4025 struct lpfc_vport *vport;
4026 uint16_t offset = 0;
4027 uint8_t *vport_buff;
4028 struct lpfc_dmabuf *mp;
4029 uint32_t byte_count = 0;
4030
4031 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4032 if (!pmb) {
4033 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4034 "0542 lpfc_create_static_vport failed to"
4035 " allocate mailbox memory\n");
4036 return;
4037 }
4038 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
4039 mb = &pmb->u.mb;
4040
4041 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
4042 if (!vport_info) {
4043 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4044 "0543 lpfc_create_static_vport failed to"
4045 " allocate vport_info\n");
4046 mempool_free(pmb, phba->mbox_mem_pool);
4047 return;
4048 }
4049
4050 vport_buff = (uint8_t *) vport_info;
4051 do {
4052
4053
4054
4055
4056 if (pmb->ctx_buf) {
4057 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
4058 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4059 kfree(mp);
4060 pmb->ctx_buf = NULL;
4061 }
4062 if (lpfc_dump_static_vport(phba, pmb, offset))
4063 goto out;
4064
4065 pmb->vport = phba->pport;
4066 mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
4067 LPFC_MBOX_TMO);
4068
4069 if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
4070 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4071 "0544 lpfc_create_static_vport failed to"
4072 " issue dump mailbox command ret 0x%x "
4073 "status 0x%x\n",
4074 mbx_wait_rc, mb->mbxStatus);
4075 goto out;
4076 }
4077
4078 if (phba->sli_rev == LPFC_SLI_REV4) {
4079 byte_count = pmb->u.mqe.un.mb_words[5];
4080 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
4081 if (byte_count > sizeof(struct static_vport_info) -
4082 offset)
4083 byte_count = sizeof(struct static_vport_info)
4084 - offset;
4085 memcpy(vport_buff + offset, mp->virt, byte_count);
4086 offset += byte_count;
4087 } else {
4088 if (mb->un.varDmp.word_cnt >
4089 sizeof(struct static_vport_info) - offset)
4090 mb->un.varDmp.word_cnt =
4091 sizeof(struct static_vport_info)
4092 - offset;
4093 byte_count = mb->un.varDmp.word_cnt;
4094 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
4095 vport_buff + offset,
4096 byte_count);
4097
4098 offset += byte_count;
4099 }
4100
4101 } while (byte_count &&
4102 offset < sizeof(struct static_vport_info));
4103
4104
4105 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
4106 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
4107 != VPORT_INFO_REV)) {
4108 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4109 "0545 lpfc_create_static_vport bad"
4110 " information header 0x%x 0x%x\n",
4111 le32_to_cpu(vport_info->signature),
4112 le32_to_cpu(vport_info->rev) &
4113 VPORT_INFO_REV_MASK);
4114
4115 goto out;
4116 }
4117
4118 shost = lpfc_shost_from_vport(phba->pport);
4119
4120 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
4121 memset(&vport_id, 0, sizeof(vport_id));
4122 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
4123 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
4124 if (!vport_id.port_name || !vport_id.node_name)
4125 continue;
4126
4127 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
4128 vport_id.vport_type = FC_PORTTYPE_NPIV;
4129 vport_id.disable = false;
4130 new_fc_vport = fc_vport_create(shost, 0, &vport_id);
4131
4132 if (!new_fc_vport) {
4133 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4134 "0546 lpfc_create_static_vport failed to"
4135 " create vport\n");
4136 continue;
4137 }
4138
4139 vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
4140 vport->vport_flag |= STATIC_VPORT;
4141 }
4142
4143 out:
4144 kfree(vport_info);
4145 if (mbx_wait_rc != MBX_TIMEOUT)
4146 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
4147 }
4148
4149
4150
4151
4152
4153
4154
4155 void
4156 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4157 {
4158 struct lpfc_vport *vport = pmb->vport;
4159 MAILBOX_t *mb = &pmb->u.mb;
4160 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4161 struct Scsi_Host *shost;
4162
4163 pmb->ctx_ndlp = NULL;
4164
4165 if (mb->mbxStatus) {
4166 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4167 "0258 Register Fabric login error: 0x%x\n",
4168 mb->mbxStatus);
4169 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
4170 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4171
4172 lpfc_disc_list_loopmap(vport);
4173
4174
4175 lpfc_disc_start(vport);
4176
4177
4178
4179 lpfc_nlp_put(ndlp);
4180 return;
4181 }
4182
4183 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4184
4185
4186
4187 lpfc_nlp_put(ndlp);
4188 return;
4189 }
4190
4191 if (phba->sli_rev < LPFC_SLI_REV4)
4192 ndlp->nlp_rpi = mb->un.varWords[0];
4193 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
4194 ndlp->nlp_type |= NLP_FABRIC;
4195 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4196
4197 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
4198
4199
4200 if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
4201 lpfc_start_fdiscs(phba);
4202 else {
4203 shost = lpfc_shost_from_vport(vport);
4204 spin_lock_irq(shost->host_lock);
4205 vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
4206 spin_unlock_irq(shost->host_lock);
4207 }
4208 lpfc_do_scr_ns_plogi(phba, vport);
4209 }
4210
4211 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
4212
4213
4214
4215
4216 lpfc_nlp_put(ndlp);
4217 return;
4218 }
4219
4220
4221
4222
4223
4224 int
4225 lpfc_issue_gidft(struct lpfc_vport *vport)
4226 {
4227
4228 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4229 (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
4230 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
4231
4232
4233
4234 lpfc_printf_vlog(vport, KERN_ERR,
4235 LOG_TRACE_EVENT,
4236 "0604 %s FC TYPE %x %s\n",
4237 "Failed to issue GID_FT to ",
4238 FC_TYPE_FCP,
4239 "Finishing discovery.");
4240 return 0;
4241 }
4242 vport->gidft_inp++;
4243 }
4244
4245 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4246 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
4247 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
4248
4249
4250
4251 lpfc_printf_vlog(vport, KERN_ERR,
4252 LOG_TRACE_EVENT,
4253 "0605 %s FC_TYPE %x %s %d\n",
4254 "Failed to issue GID_FT to ",
4255 FC_TYPE_NVME,
4256 "Finishing discovery: gidftinp ",
4257 vport->gidft_inp);
4258 if (vport->gidft_inp == 0)
4259 return 0;
4260 } else
4261 vport->gidft_inp++;
4262 }
4263 return vport->gidft_inp;
4264 }
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276 int
4277 lpfc_issue_gidpt(struct lpfc_vport *vport)
4278 {
4279
4280 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) {
4281
4282
4283
4284 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4285 "0606 %s Port TYPE %x %s\n",
4286 "Failed to issue GID_PT to ",
4287 GID_PT_N_PORT,
4288 "Finishing discovery.");
4289 return 0;
4290 }
4291 vport->gidft_inp++;
4292 return 1;
4293 }
4294
4295
4296
4297
4298
4299
4300
4301 void
4302 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4303 {
4304 MAILBOX_t *mb = &pmb->u.mb;
4305 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4306 struct lpfc_vport *vport = pmb->vport;
4307 int rc;
4308
4309 pmb->ctx_ndlp = NULL;
4310 vport->gidft_inp = 0;
4311
4312 if (mb->mbxStatus) {
4313 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4314 "0260 Register NameServer error: 0x%x\n",
4315 mb->mbxStatus);
4316
4317 out:
4318
4319
4320
4321 lpfc_nlp_put(ndlp);
4322 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
4323
4324
4325
4326
4327
4328 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
4329 spin_lock_irq(&ndlp->lock);
4330 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
4331 spin_unlock_irq(&ndlp->lock);
4332 lpfc_nlp_not_used(ndlp);
4333 }
4334
4335 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4336
4337
4338
4339
4340 lpfc_disc_list_loopmap(vport);
4341
4342
4343 lpfc_disc_start(vport);
4344 return;
4345 }
4346 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4347 return;
4348 }
4349
4350 if (phba->sli_rev < LPFC_SLI_REV4)
4351 ndlp->nlp_rpi = mb->un.varWords[0];
4352 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
4353 ndlp->nlp_type |= NLP_FABRIC;
4354 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4355 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
4356 "0003 rpi:%x DID:%x flg:%x %d x%px\n",
4357 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4358 kref_read(&ndlp->kref),
4359 ndlp);
4360
4361 if (vport->port_state < LPFC_VPORT_READY) {
4362
4363 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
4364 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
4365 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
4366 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
4367
4368 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4369 (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
4370 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
4371
4372 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4373 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
4374 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
4375 FC_TYPE_NVME);
4376
4377
4378 lpfc_issue_els_scr(vport, 0);
4379
4380
4381
4382
4383
4384
4385
4386 if (phba->cmf_active_mode != LPFC_CFG_OFF) {
4387 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin;
4388 phba->cgn_reg_signal = phba->cgn_init_reg_signal;
4389 rc = lpfc_issue_els_edc(vport, 0);
4390 lpfc_printf_log(phba, KERN_INFO,
4391 LOG_INIT | LOG_ELS | LOG_DISCOVERY,
4392 "4220 EDC issue error x%x, Data: x%x\n",
4393 rc, phba->cgn_init_reg_signal);
4394 } else {
4395 lpfc_issue_els_rdf(vport, 0);
4396 }
4397 }
4398
4399 vport->fc_ns_retry = 0;
4400 if (lpfc_issue_gidft(vport) == 0)
4401 goto out;
4402
4403
4404
4405
4406
4407
4408
4409
4410 lpfc_nlp_put(ndlp);
4411 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
4412 return;
4413 }
4414
4415
4416
4417
4418
4419
4420 void
4421 lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4422 {
4423 struct lpfc_vport *vport = pmb->vport;
4424 MAILBOX_t *mb = &pmb->u.mb;
4425 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4426
4427 pmb->ctx_ndlp = NULL;
4428 if (mb->mbxStatus) {
4429 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4430 "0933 %s: Register FC login error: 0x%x\n",
4431 __func__, mb->mbxStatus);
4432 goto out;
4433 }
4434
4435 lpfc_check_nlp_post_devloss(vport, ndlp);
4436
4437 if (phba->sli_rev < LPFC_SLI_REV4)
4438 ndlp->nlp_rpi = mb->un.varWords[0];
4439
4440 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4441 "0934 %s: Complete FC x%x RegLogin rpi x%x ste x%x\n",
4442 __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
4443 ndlp->nlp_state);
4444
4445 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
4446 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
4447 ndlp->nlp_type |= NLP_FABRIC;
4448 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4449
4450 out:
4451 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
4452
4453
4454
4455
4456 lpfc_nlp_put(ndlp);
4457 }
4458
4459 static void
4460 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4461 {
4462 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4463 struct fc_rport *rport;
4464 struct lpfc_rport_data *rdata;
4465 struct fc_rport_identifiers rport_ids;
4466 struct lpfc_hba *phba = vport->phba;
4467 unsigned long flags;
4468
4469 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4470 return;
4471
4472
4473 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
4474 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
4475 rport_ids.port_id = ndlp->nlp_DID;
4476 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4477
4478
4479 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4480 "rport add: did:x%x flg:x%x type x%x",
4481 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4482
4483
4484 if (vport->load_flag & FC_UNLOADING)
4485 return;
4486
4487
4488
4489
4490 if (ndlp->rport) {
4491 rdata = ndlp->rport->dd_data;
4492 rdata->pnode = NULL;
4493 }
4494
4495 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
4496 if (!rport) {
4497 dev_printk(KERN_WARNING, &phba->pcidev->dev,
4498 "Warning: fc_remote_port_add failed\n");
4499 return;
4500 }
4501
4502
4503 rport->maxframe_size = ndlp->nlp_maxframe;
4504 rport->supported_classes = ndlp->nlp_class_sup;
4505 rdata = rport->dd_data;
4506 rdata->pnode = lpfc_nlp_get(ndlp);
4507 if (!rdata->pnode) {
4508 dev_warn(&phba->pcidev->dev,
4509 "Warning - node ref failed. Unreg rport\n");
4510 fc_remote_port_delete(rport);
4511 ndlp->rport = NULL;
4512 return;
4513 }
4514
4515 spin_lock_irqsave(&ndlp->lock, flags);
4516 ndlp->fc4_xpt_flags |= SCSI_XPT_REGD;
4517 spin_unlock_irqrestore(&ndlp->lock, flags);
4518
4519 if (ndlp->nlp_type & NLP_FCP_TARGET)
4520 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
4521 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
4522 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
4523 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
4524 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
4525 if (ndlp->nlp_type & NLP_NVME_TARGET)
4526 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
4527 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
4528 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
4529
4530 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
4531 fc_remote_port_rolechg(rport, rport_ids.roles);
4532
4533 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4534 "3183 %s rport x%px DID x%x, role x%x refcnt %d\n",
4535 __func__, rport, rport->port_id, rport->roles,
4536 kref_read(&ndlp->kref));
4537
4538 if ((rport->scsi_target_id != -1) &&
4539 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
4540 ndlp->nlp_sid = rport->scsi_target_id;
4541 }
4542
4543 return;
4544 }
4545
4546 static void
4547 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
4548 {
4549 struct fc_rport *rport = ndlp->rport;
4550 struct lpfc_vport *vport = ndlp->vport;
4551
4552 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4553 return;
4554
4555 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4556 "rport delete: did:x%x flg:x%x type x%x",
4557 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4558
4559 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4560 "3184 rport unregister x%06x, rport x%px "
4561 "xptflg x%x refcnt %d\n",
4562 ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags,
4563 kref_read(&ndlp->kref));
4564
4565 fc_remote_port_delete(rport);
4566 lpfc_nlp_put(ndlp);
4567 }
4568
4569 static void
4570 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
4571 {
4572 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4573 unsigned long iflags;
4574
4575 spin_lock_irqsave(shost->host_lock, iflags);
4576 switch (state) {
4577 case NLP_STE_UNUSED_NODE:
4578 vport->fc_unused_cnt += count;
4579 break;
4580 case NLP_STE_PLOGI_ISSUE:
4581 vport->fc_plogi_cnt += count;
4582 break;
4583 case NLP_STE_ADISC_ISSUE:
4584 vport->fc_adisc_cnt += count;
4585 break;
4586 case NLP_STE_REG_LOGIN_ISSUE:
4587 vport->fc_reglogin_cnt += count;
4588 break;
4589 case NLP_STE_PRLI_ISSUE:
4590 vport->fc_prli_cnt += count;
4591 break;
4592 case NLP_STE_UNMAPPED_NODE:
4593 vport->fc_unmap_cnt += count;
4594 break;
4595 case NLP_STE_MAPPED_NODE:
4596 vport->fc_map_cnt += count;
4597 break;
4598 case NLP_STE_NPR_NODE:
4599 if (vport->fc_npr_cnt == 0 && count == -1)
4600 vport->fc_npr_cnt = 0;
4601 else
4602 vport->fc_npr_cnt += count;
4603 break;
4604 }
4605 spin_unlock_irqrestore(shost->host_lock, iflags);
4606 }
4607
4608
4609 void
4610 lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4611 {
4612 unsigned long iflags;
4613
4614 lpfc_check_nlp_post_devloss(vport, ndlp);
4615
4616 spin_lock_irqsave(&ndlp->lock, iflags);
4617 if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
4618
4619 spin_unlock_irqrestore(&ndlp->lock, iflags);
4620
4621 if (ndlp->fc4_xpt_flags & NVME_XPT_REGD &&
4622 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) {
4623 lpfc_nvme_rescan_port(vport, ndlp);
4624 }
4625 return;
4626 }
4627
4628 ndlp->fc4_xpt_flags |= NLP_XPT_REGD;
4629 spin_unlock_irqrestore(&ndlp->lock, iflags);
4630
4631 if (lpfc_valid_xpt_node(ndlp)) {
4632 vport->phba->nport_event_cnt++;
4633
4634
4635
4636
4637 lpfc_register_remote_port(vport, ndlp);
4638 }
4639
4640
4641 if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME))
4642 return;
4643
4644
4645 if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
4646 ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4647 if (vport->phba->nvmet_support == 0) {
4648
4649
4650
4651
4652 if (ndlp->nlp_type & NLP_NVME_TARGET) {
4653 vport->phba->nport_event_cnt++;
4654 lpfc_nvme_register_port(vport, ndlp);
4655 }
4656 } else {
4657
4658
4659
4660 lpfc_nlp_get(ndlp);
4661 }
4662 }
4663 }
4664
4665
4666 void
4667 lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4668 {
4669 unsigned long iflags;
4670
4671 spin_lock_irqsave(&ndlp->lock, iflags);
4672 if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
4673 spin_unlock_irqrestore(&ndlp->lock, iflags);
4674 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
4675 "0999 %s Not regd: ndlp x%px rport x%px DID "
4676 "x%x FLG x%x XPT x%x\n",
4677 __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
4678 ndlp->nlp_flag, ndlp->fc4_xpt_flags);
4679 return;
4680 }
4681
4682 ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
4683 spin_unlock_irqrestore(&ndlp->lock, iflags);
4684
4685 if (ndlp->rport &&
4686 ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
4687 vport->phba->nport_event_cnt++;
4688 lpfc_unregister_remote_port(ndlp);
4689 } else if (!ndlp->rport) {
4690 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
4691 "1999 %s NDLP in devloss x%px DID x%x FLG x%x"
4692 " XPT x%x refcnt %d\n",
4693 __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
4694 ndlp->fc4_xpt_flags,
4695 kref_read(&ndlp->kref));
4696 }
4697
4698 if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
4699 vport->phba->nport_event_cnt++;
4700 if (vport->phba->nvmet_support == 0) {
4701
4702 if (ndlp->nlp_type & NLP_NVME_TARGET)
4703 lpfc_nvme_unregister_port(vport, ndlp);
4704 } else {
4705
4706 lpfc_nlp_put(ndlp);
4707 }
4708 }
4709
4710 }
4711
4712
4713
4714
4715 static void
4716 lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4717 int new_state)
4718 {
4719 switch (new_state) {
4720
4721
4722
4723
4724 case NLP_STE_ADISC_ISSUE:
4725 break;
4726
4727
4728
4729
4730
4731
4732 case NLP_STE_UNMAPPED_NODE:
4733 ndlp->nlp_type |= NLP_FC_NODE;
4734 fallthrough;
4735 case NLP_STE_MAPPED_NODE:
4736 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4737 lpfc_nlp_reg_node(vport, ndlp);
4738 break;
4739
4740
4741
4742
4743
4744
4745
4746 case NLP_STE_NPR_NODE:
4747 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
4748 fallthrough;
4749 default:
4750 lpfc_nlp_unreg_node(vport, ndlp);
4751 break;
4752 }
4753
4754 }
4755
4756 static void
4757 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4758 int old_state, int new_state)
4759 {
4760
4761 if (new_state == NLP_STE_ADISC_ISSUE ||
4762 old_state == NLP_STE_ADISC_ISSUE) {
4763 lpfc_handle_adisc_state(vport, ndlp, new_state);
4764 return;
4765 }
4766
4767 if (new_state == NLP_STE_UNMAPPED_NODE) {
4768 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4769 ndlp->nlp_type |= NLP_FC_NODE;
4770 }
4771 if (new_state == NLP_STE_MAPPED_NODE)
4772 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4773 if (new_state == NLP_STE_NPR_NODE)
4774 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
4775
4776
4777 if ((old_state == NLP_STE_MAPPED_NODE ||
4778 old_state == NLP_STE_UNMAPPED_NODE)) {
4779
4780
4781
4782 if (!(ndlp->nlp_flag & NLP_NPR_ADISC) ||
4783 !lpfc_is_link_up(vport->phba))
4784 lpfc_nlp_unreg_node(vport, ndlp);
4785 }
4786
4787 if (new_state == NLP_STE_MAPPED_NODE ||
4788 new_state == NLP_STE_UNMAPPED_NODE)
4789 lpfc_nlp_reg_node(vport, ndlp);
4790
4791 if ((new_state == NLP_STE_MAPPED_NODE) &&
4792 (vport->stat_data_enabled)) {
4793
4794
4795
4796
4797 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
4798 sizeof(struct lpfc_scsicmd_bkt),
4799 GFP_KERNEL);
4800
4801 if (!ndlp->lat_data)
4802 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4803 "0286 lpfc_nlp_state_cleanup failed to "
4804 "allocate statistical data buffer DID "
4805 "0x%x\n", ndlp->nlp_DID);
4806 }
4807
4808
4809
4810
4811
4812
4813 if ((new_state == NLP_STE_MAPPED_NODE) &&
4814 (ndlp->nlp_type & NLP_FCP_TARGET) &&
4815 (!ndlp->rport ||
4816 ndlp->rport->scsi_target_id == -1 ||
4817 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
4818 spin_lock_irq(&ndlp->lock);
4819 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
4820 spin_unlock_irq(&ndlp->lock);
4821 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4822 }
4823 }
4824
4825 static char *
4826 lpfc_nlp_state_name(char *buffer, size_t size, int state)
4827 {
4828 static char *states[] = {
4829 [NLP_STE_UNUSED_NODE] = "UNUSED",
4830 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
4831 [NLP_STE_ADISC_ISSUE] = "ADISC",
4832 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
4833 [NLP_STE_PRLI_ISSUE] = "PRLI",
4834 [NLP_STE_LOGO_ISSUE] = "LOGO",
4835 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
4836 [NLP_STE_MAPPED_NODE] = "MAPPED",
4837 [NLP_STE_NPR_NODE] = "NPR",
4838 };
4839
4840 if (state < NLP_STE_MAX_STATE && states[state])
4841 strlcpy(buffer, states[state], size);
4842 else
4843 snprintf(buffer, size, "unknown (%d)", state);
4844 return buffer;
4845 }
4846
4847 void
4848 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4849 int state)
4850 {
4851 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4852 int old_state = ndlp->nlp_state;
4853 int node_dropped = ndlp->nlp_flag & NLP_DROPPED;
4854 char name1[16], name2[16];
4855
4856 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4857 "0904 NPort state transition x%06x, %s -> %s\n",
4858 ndlp->nlp_DID,
4859 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
4860 lpfc_nlp_state_name(name2, sizeof(name2), state));
4861
4862 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4863 "node statechg did:x%x old:%d ste:%d",
4864 ndlp->nlp_DID, old_state, state);
4865
4866 if (node_dropped && old_state == NLP_STE_UNUSED_NODE &&
4867 state != NLP_STE_UNUSED_NODE) {
4868 ndlp->nlp_flag &= ~NLP_DROPPED;
4869 lpfc_nlp_get(ndlp);
4870 }
4871
4872 if (old_state == NLP_STE_NPR_NODE &&
4873 state != NLP_STE_NPR_NODE)
4874 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4875 if (old_state == NLP_STE_UNMAPPED_NODE) {
4876 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
4877 ndlp->nlp_type &= ~NLP_FC_NODE;
4878 }
4879
4880 if (list_empty(&ndlp->nlp_listp)) {
4881 spin_lock_irq(shost->host_lock);
4882 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4883 spin_unlock_irq(shost->host_lock);
4884 } else if (old_state)
4885 lpfc_nlp_counters(vport, old_state, -1);
4886
4887 ndlp->nlp_state = state;
4888 lpfc_nlp_counters(vport, state, 1);
4889 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
4890 }
4891
4892 void
4893 lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4894 {
4895 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4896
4897 if (list_empty(&ndlp->nlp_listp)) {
4898 spin_lock_irq(shost->host_lock);
4899 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4900 spin_unlock_irq(shost->host_lock);
4901 }
4902 }
4903
4904 void
4905 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4906 {
4907 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4908
4909 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4910 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4911 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4912 spin_lock_irq(shost->host_lock);
4913 list_del_init(&ndlp->nlp_listp);
4914 spin_unlock_irq(shost->host_lock);
4915 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4916 NLP_STE_UNUSED_NODE);
4917 }
4918
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933 static inline void
4934 lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4935 uint32_t did)
4936 {
4937 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4938 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4939 timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
4940 INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp);
4941
4942 ndlp->nlp_DID = did;
4943 ndlp->vport = vport;
4944 ndlp->phba = vport->phba;
4945 ndlp->nlp_sid = NLP_NO_SID;
4946 ndlp->nlp_fc4_type = NLP_FC4_NONE;
4947 kref_init(&ndlp->kref);
4948 atomic_set(&ndlp->cmd_pending, 0);
4949 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4950 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4951 }
4952
4953 void
4954 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4955 {
4956
4957
4958
4959
4960
4961
4962
4963 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4964 return;
4965 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
4966 ndlp->nlp_flag |= NLP_DROPPED;
4967 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4968 lpfc_cleanup_vports_rrqs(vport, ndlp);
4969 lpfc_unreg_rpi(vport, ndlp);
4970 }
4971
4972 lpfc_nlp_put(ndlp);
4973 return;
4974 }
4975
4976
4977
4978
4979 void
4980 lpfc_set_disctmo(struct lpfc_vport *vport)
4981 {
4982 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4983 struct lpfc_hba *phba = vport->phba;
4984 uint32_t tmo;
4985
4986 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
4987
4988 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
4989 } else {
4990
4991
4992
4993 tmo = ((phba->fc_ratov * 3) + 3);
4994 }
4995
4996
4997 if (!timer_pending(&vport->fc_disctmo)) {
4998 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4999 "set disc timer: tmo:x%x state:x%x flg:x%x",
5000 tmo, vport->port_state, vport->fc_flag);
5001 }
5002
5003 mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
5004 spin_lock_irq(shost->host_lock);
5005 vport->fc_flag |= FC_DISC_TMO;
5006 spin_unlock_irq(shost->host_lock);
5007
5008
5009 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5010 "0247 Start Discovery Timer state x%x "
5011 "Data: x%x x%lx x%x x%x\n",
5012 vport->port_state, tmo,
5013 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
5014 vport->fc_adisc_cnt);
5015
5016 return;
5017 }
5018
5019
5020
5021
5022 int
5023 lpfc_can_disctmo(struct lpfc_vport *vport)
5024 {
5025 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5026 unsigned long iflags;
5027
5028 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5029 "can disc timer: state:x%x rtry:x%x flg:x%x",
5030 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
5031
5032
5033 if (vport->fc_flag & FC_DISC_TMO ||
5034 timer_pending(&vport->fc_disctmo)) {
5035 spin_lock_irqsave(shost->host_lock, iflags);
5036 vport->fc_flag &= ~FC_DISC_TMO;
5037 spin_unlock_irqrestore(shost->host_lock, iflags);
5038 del_timer_sync(&vport->fc_disctmo);
5039 spin_lock_irqsave(&vport->work_port_lock, iflags);
5040 vport->work_port_events &= ~WORKER_DISC_TMO;
5041 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
5042 }
5043
5044
5045 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5046 "0248 Cancel Discovery Timer state x%x "
5047 "Data: x%x x%x x%x\n",
5048 vport->port_state, vport->fc_flag,
5049 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
5050 return 0;
5051 }
5052
5053
5054
5055
5056
5057 int
5058 lpfc_check_sli_ndlp(struct lpfc_hba *phba,
5059 struct lpfc_sli_ring *pring,
5060 struct lpfc_iocbq *iocb,
5061 struct lpfc_nodelist *ndlp)
5062 {
5063 struct lpfc_vport *vport = ndlp->vport;
5064 u8 ulp_command;
5065 u16 ulp_context;
5066 u32 remote_id;
5067
5068 if (iocb->vport != vport)
5069 return 0;
5070
5071 ulp_command = get_job_cmnd(phba, iocb);
5072 ulp_context = get_job_ulpcontext(phba, iocb);
5073 remote_id = get_job_els_rsp64_did(phba, iocb);
5074
5075 if (pring->ringno == LPFC_ELS_RING) {
5076 switch (ulp_command) {
5077 case CMD_GEN_REQUEST64_CR:
5078 if (iocb->ndlp == ndlp)
5079 return 1;
5080 fallthrough;
5081 case CMD_ELS_REQUEST64_CR:
5082 if (remote_id == ndlp->nlp_DID)
5083 return 1;
5084 fallthrough;
5085 case CMD_XMIT_ELS_RSP64_CX:
5086 if (iocb->ndlp == ndlp)
5087 return 1;
5088 }
5089 } else if (pring->ringno == LPFC_FCP_RING) {
5090
5091 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
5092 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
5093 return 0;
5094 }
5095 if (ulp_context == ndlp->nlp_rpi)
5096 return 1;
5097 }
5098 return 0;
5099 }
5100
5101 static void
5102 __lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba,
5103 struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
5104 struct list_head *dequeue_list)
5105 {
5106 struct lpfc_iocbq *iocb, *next_iocb;
5107
5108 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
5109
5110 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
5111
5112 list_move_tail(&iocb->list, dequeue_list);
5113 }
5114 }
5115
5116 static void
5117 lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba,
5118 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
5119 {
5120 struct lpfc_sli *psli = &phba->sli;
5121 uint32_t i;
5122
5123 spin_lock_irq(&phba->hbalock);
5124 for (i = 0; i < psli->num_rings; i++)
5125 __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
5126 dequeue_list);
5127 spin_unlock_irq(&phba->hbalock);
5128 }
5129
5130 static void
5131 lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
5132 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
5133 {
5134 struct lpfc_sli_ring *pring;
5135 struct lpfc_queue *qp = NULL;
5136
5137 spin_lock_irq(&phba->hbalock);
5138 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
5139 pring = qp->pring;
5140 if (!pring)
5141 continue;
5142 spin_lock(&pring->ring_lock);
5143 __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
5144 spin_unlock(&pring->ring_lock);
5145 }
5146 spin_unlock_irq(&phba->hbalock);
5147 }
5148
5149
5150
5151
5152
5153 static int
5154 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5155 {
5156 LIST_HEAD(completions);
5157
5158 lpfc_fabric_abort_nport(ndlp);
5159
5160
5161
5162
5163
5164 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
5165 if (phba->sli_rev != LPFC_SLI_REV4)
5166 lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
5167 else
5168 lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
5169 }
5170
5171
5172 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5173 IOERR_SLI_ABORTED);
5174
5175 return 0;
5176 }
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186 static void
5187 lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5188 {
5189 struct lpfc_vport *vport = pmb->vport;
5190 struct lpfc_nodelist *ndlp;
5191
5192 ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
5193 if (!ndlp)
5194 return;
5195 lpfc_issue_els_logo(vport, ndlp, 0);
5196
5197
5198 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
5199 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
5200 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5201 "1434 UNREG cmpl deferred logo x%x "
5202 "on NPort x%x Data: x%x x%px\n",
5203 ndlp->nlp_rpi, ndlp->nlp_DID,
5204 ndlp->nlp_defer_did, ndlp);
5205
5206 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5207 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
5208 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
5209 } else {
5210
5211 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
5212 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
5213 spin_lock_irq(&ndlp->lock);
5214 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
5215 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
5216 spin_unlock_irq(&ndlp->lock);
5217 }
5218 spin_lock_irq(&ndlp->lock);
5219 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5220 spin_unlock_irq(&ndlp->lock);
5221 }
5222
5223
5224
5225
5226
5227 lpfc_nlp_put(ndlp);
5228 mempool_free(pmb, phba->mbox_mem_pool);
5229 }
5230
5231
5232
5233
5234
5235
5236 static void
5237 lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
5238 struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
5239 {
5240 unsigned long iflags;
5241
5242
5243
5244
5245 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
5246 if (!mbox->ctx_ndlp)
5247 return;
5248
5249 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
5250 mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
5251
5252 } else if (phba->sli_rev == LPFC_SLI_REV4 &&
5253 (!(vport->load_flag & FC_UNLOADING)) &&
5254 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
5255 LPFC_SLI_INTF_IF_TYPE_2) &&
5256 (kref_read(&ndlp->kref) > 0)) {
5257 mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
5258 } else {
5259 if (vport->load_flag & FC_UNLOADING) {
5260 if (phba->sli_rev == LPFC_SLI_REV4) {
5261 spin_lock_irqsave(&ndlp->lock, iflags);
5262 ndlp->nlp_flag |= NLP_RELEASE_RPI;
5263 spin_unlock_irqrestore(&ndlp->lock, iflags);
5264 }
5265 }
5266 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5267 }
5268 }
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279 int
5280 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5281 {
5282 struct lpfc_hba *phba = vport->phba;
5283 LPFC_MBOXQ_t *mbox;
5284 int rc, acc_plogi = 1;
5285 uint16_t rpi;
5286
5287 if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
5288 ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
5289 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
5290 lpfc_printf_vlog(vport, KERN_INFO,
5291 LOG_NODE | LOG_DISCOVERY,
5292 "3366 RPI x%x needs to be "
5293 "unregistered nlp_flag x%x "
5294 "did x%x\n",
5295 ndlp->nlp_rpi, ndlp->nlp_flag,
5296 ndlp->nlp_DID);
5297
5298
5299
5300
5301 if (ndlp->nlp_flag & NLP_UNREG_INP) {
5302 lpfc_printf_vlog(vport, KERN_INFO,
5303 LOG_NODE | LOG_DISCOVERY,
5304 "1436 unreg_rpi SKIP UNREG x%x on "
5305 "NPort x%x deferred x%x flg x%x "
5306 "Data: x%px\n",
5307 ndlp->nlp_rpi, ndlp->nlp_DID,
5308 ndlp->nlp_defer_did,
5309 ndlp->nlp_flag, ndlp);
5310 goto out;
5311 }
5312
5313 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5314 if (mbox) {
5315
5316 rpi = ndlp->nlp_rpi;
5317 if (phba->sli_rev == LPFC_SLI_REV4)
5318 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
5319
5320 lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
5321 mbox->vport = vport;
5322 lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
5323 if (!mbox->ctx_ndlp) {
5324 mempool_free(mbox, phba->mbox_mem_pool);
5325 return 1;
5326 }
5327
5328 if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
5329
5330
5331
5332 acc_plogi = 0;
5333 if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
5334 Fabric_DID_MASK) &&
5335 (!(vport->fc_flag & FC_OFFLINE_MODE)))
5336 ndlp->nlp_flag |= NLP_UNREG_INP;
5337
5338 lpfc_printf_vlog(vport, KERN_INFO,
5339 LOG_NODE | LOG_DISCOVERY,
5340 "1433 unreg_rpi UNREG x%x on "
5341 "NPort x%x deferred flg x%x "
5342 "Data:x%px\n",
5343 ndlp->nlp_rpi, ndlp->nlp_DID,
5344 ndlp->nlp_flag, ndlp);
5345
5346 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5347 if (rc == MBX_NOT_FINISHED) {
5348 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5349 mempool_free(mbox, phba->mbox_mem_pool);
5350 acc_plogi = 1;
5351 lpfc_nlp_put(ndlp);
5352 }
5353 } else {
5354 lpfc_printf_vlog(vport, KERN_INFO,
5355 LOG_NODE | LOG_DISCOVERY,
5356 "1444 Failed to allocate mempool "
5357 "unreg_rpi UNREG x%x, "
5358 "DID x%x, flag x%x, "
5359 "ndlp x%px\n",
5360 ndlp->nlp_rpi, ndlp->nlp_DID,
5361 ndlp->nlp_flag, ndlp);
5362
5363
5364
5365
5366
5367 if (!(vport->load_flag & FC_UNLOADING)) {
5368 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5369 lpfc_issue_els_logo(vport, ndlp, 0);
5370 ndlp->nlp_prev_state = ndlp->nlp_state;
5371 lpfc_nlp_set_state(vport, ndlp,
5372 NLP_STE_NPR_NODE);
5373 }
5374
5375 return 1;
5376 }
5377 lpfc_no_rpi(phba, ndlp);
5378 out:
5379 if (phba->sli_rev != LPFC_SLI_REV4)
5380 ndlp->nlp_rpi = 0;
5381 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
5382 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
5383 if (acc_plogi)
5384 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5385 return 1;
5386 }
5387 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5388 return 0;
5389 }
5390
5391
5392
5393
5394
5395
5396
5397
5398 void
5399 lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
5400 {
5401 struct lpfc_vport **vports;
5402 struct lpfc_nodelist *ndlp;
5403 struct Scsi_Host *shost;
5404 int i;
5405
5406 vports = lpfc_create_vport_work_array(phba);
5407 if (!vports) {
5408 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5409 "2884 Vport array allocation failed \n");
5410 return;
5411 }
5412 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5413 shost = lpfc_shost_from_vport(vports[i]);
5414 spin_lock_irq(shost->host_lock);
5415 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
5416 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
5417
5418 spin_unlock_irq(shost->host_lock);
5419 lpfc_unreg_rpi(vports[i], ndlp);
5420 spin_lock_irq(shost->host_lock);
5421 }
5422 }
5423 spin_unlock_irq(shost->host_lock);
5424 }
5425 lpfc_destroy_vport_work_array(phba, vports);
5426 }
5427
5428 void
5429 lpfc_unreg_all_rpis(struct lpfc_vport *vport)
5430 {
5431 struct lpfc_hba *phba = vport->phba;
5432 LPFC_MBOXQ_t *mbox;
5433 int rc;
5434
5435 if (phba->sli_rev == LPFC_SLI_REV4) {
5436 lpfc_sli4_unreg_all_rpis(vport);
5437 return;
5438 }
5439
5440 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5441 if (mbox) {
5442 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
5443 mbox);
5444 mbox->vport = vport;
5445 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5446 mbox->ctx_ndlp = NULL;
5447 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
5448 if (rc != MBX_TIMEOUT)
5449 mempool_free(mbox, phba->mbox_mem_pool);
5450
5451 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
5452 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5453 "1836 Could not issue "
5454 "unreg_login(all_rpis) status %d\n",
5455 rc);
5456 }
5457 }
5458
5459 void
5460 lpfc_unreg_default_rpis(struct lpfc_vport *vport)
5461 {
5462 struct lpfc_hba *phba = vport->phba;
5463 LPFC_MBOXQ_t *mbox;
5464 int rc;
5465
5466
5467 if (phba->sli_rev > LPFC_SLI_REV3)
5468 return;
5469
5470 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5471 if (mbox) {
5472 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
5473 mbox);
5474 mbox->vport = vport;
5475 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5476 mbox->ctx_ndlp = NULL;
5477 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
5478 if (rc != MBX_TIMEOUT)
5479 mempool_free(mbox, phba->mbox_mem_pool);
5480
5481 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
5482 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5483 "1815 Could not issue "
5484 "unreg_did (default rpis) status %d\n",
5485 rc);
5486 }
5487 }
5488
5489
5490
5491
5492
5493 static int
5494 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5495 {
5496 struct lpfc_hba *phba = vport->phba;
5497 LPFC_MBOXQ_t *mb, *nextmb;
5498
5499
5500 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5501 "0900 Cleanup node for NPort x%x "
5502 "Data: x%x x%x x%x\n",
5503 ndlp->nlp_DID, ndlp->nlp_flag,
5504 ndlp->nlp_state, ndlp->nlp_rpi);
5505 lpfc_dequeue_node(vport, ndlp);
5506
5507
5508
5509
5510 if ((mb = phba->sli.mbox_active)) {
5511 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
5512 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
5513 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5514 mb->ctx_ndlp = NULL;
5515 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5516 }
5517 }
5518
5519 spin_lock_irq(&phba->hbalock);
5520
5521 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
5522 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
5523 (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
5524 (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
5525 continue;
5526
5527 mb->ctx_ndlp = NULL;
5528 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5529 }
5530
5531 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
5532 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
5533 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
5534 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5535 list_del(&mb->list);
5536 lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED);
5537
5538
5539
5540
5541 }
5542 }
5543 spin_unlock_irq(&phba->hbalock);
5544
5545 lpfc_els_abort(phba, ndlp);
5546
5547 spin_lock_irq(&ndlp->lock);
5548 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
5549 spin_unlock_irq(&ndlp->lock);
5550
5551 ndlp->nlp_last_elscmd = 0;
5552 del_timer_sync(&ndlp->nlp_delayfunc);
5553
5554 list_del_init(&ndlp->els_retry_evt.evt_listp);
5555 list_del_init(&ndlp->dev_loss_evt.evt_listp);
5556 list_del_init(&ndlp->recovery_evt.evt_listp);
5557 lpfc_cleanup_vports_rrqs(vport, ndlp);
5558
5559 if (phba->sli_rev == LPFC_SLI_REV4)
5560 ndlp->nlp_flag |= NLP_RELEASE_RPI;
5561
5562 return 0;
5563 }
5564
5565 static int
5566 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5567 uint32_t did)
5568 {
5569 D_ID mydid, ndlpdid, matchdid;
5570
5571 if (did == Bcast_DID)
5572 return 0;
5573
5574
5575 if (ndlp->nlp_DID == did)
5576 return 1;
5577
5578
5579 mydid.un.word = vport->fc_myDID;
5580 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
5581 return 0;
5582 }
5583
5584 matchdid.un.word = did;
5585 ndlpdid.un.word = ndlp->nlp_DID;
5586 if (matchdid.un.b.id == ndlpdid.un.b.id) {
5587 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
5588 (mydid.un.b.area == matchdid.un.b.area)) {
5589
5590
5591
5592
5593
5594
5595
5596
5597 if ((ndlpdid.un.b.domain == 0) &&
5598 (ndlpdid.un.b.area == 0)) {
5599 if (ndlpdid.un.b.id &&
5600 vport->phba->fc_topology ==
5601 LPFC_TOPOLOGY_LOOP)
5602 return 1;
5603 }
5604 return 0;
5605 }
5606
5607 matchdid.un.word = ndlp->nlp_DID;
5608 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
5609 (mydid.un.b.area == ndlpdid.un.b.area)) {
5610 if ((matchdid.un.b.domain == 0) &&
5611 (matchdid.un.b.area == 0)) {
5612 if (matchdid.un.b.id)
5613 return 1;
5614 }
5615 }
5616 }
5617 return 0;
5618 }
5619
5620
5621 static struct lpfc_nodelist *
5622 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5623 {
5624 struct lpfc_nodelist *ndlp;
5625 uint32_t data1;
5626
5627 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5628 if (lpfc_matchdid(vport, ndlp, did)) {
5629 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5630 ((uint32_t)ndlp->nlp_xri << 16) |
5631 ((uint32_t)ndlp->nlp_type << 8)
5632 );
5633 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5634 "0929 FIND node DID "
5635 "Data: x%px x%x x%x x%x x%x x%px\n",
5636 ndlp, ndlp->nlp_DID,
5637 ndlp->nlp_flag, data1, ndlp->nlp_rpi,
5638 ndlp->active_rrqs_xri_bitmap);
5639 return ndlp;
5640 }
5641 }
5642
5643
5644 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5645 "0932 FIND node did x%x NOT FOUND.\n", did);
5646 return NULL;
5647 }
5648
5649 struct lpfc_nodelist *
5650 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5651 {
5652 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5653 struct lpfc_nodelist *ndlp;
5654 unsigned long iflags;
5655
5656 spin_lock_irqsave(shost->host_lock, iflags);
5657 ndlp = __lpfc_findnode_did(vport, did);
5658 spin_unlock_irqrestore(shost->host_lock, iflags);
5659 return ndlp;
5660 }
5661
5662 struct lpfc_nodelist *
5663 lpfc_findnode_mapped(struct lpfc_vport *vport)
5664 {
5665 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5666 struct lpfc_nodelist *ndlp;
5667 uint32_t data1;
5668 unsigned long iflags;
5669
5670 spin_lock_irqsave(shost->host_lock, iflags);
5671
5672 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5673 if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
5674 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
5675 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5676 ((uint32_t)ndlp->nlp_xri << 16) |
5677 ((uint32_t)ndlp->nlp_type << 8) |
5678 ((uint32_t)ndlp->nlp_rpi & 0xff));
5679 spin_unlock_irqrestore(shost->host_lock, iflags);
5680 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5681 "2025 FIND node DID "
5682 "Data: x%px x%x x%x x%x x%px\n",
5683 ndlp, ndlp->nlp_DID,
5684 ndlp->nlp_flag, data1,
5685 ndlp->active_rrqs_xri_bitmap);
5686 return ndlp;
5687 }
5688 }
5689 spin_unlock_irqrestore(shost->host_lock, iflags);
5690
5691
5692 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5693 "2026 FIND mapped did NOT FOUND.\n");
5694 return NULL;
5695 }
5696
5697 struct lpfc_nodelist *
5698 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
5699 {
5700 struct lpfc_nodelist *ndlp;
5701
5702 ndlp = lpfc_findnode_did(vport, did);
5703 if (!ndlp) {
5704 if (vport->phba->nvmet_support)
5705 return NULL;
5706 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
5707 lpfc_rscn_payload_check(vport, did) == 0)
5708 return NULL;
5709 ndlp = lpfc_nlp_init(vport, did);
5710 if (!ndlp)
5711 return NULL;
5712 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5713
5714 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5715 "6453 Setup New Node 2B_DISC x%x "
5716 "Data:x%x x%x x%x\n",
5717 ndlp->nlp_DID, ndlp->nlp_flag,
5718 ndlp->nlp_state, vport->fc_flag);
5719
5720 spin_lock_irq(&ndlp->lock);
5721 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5722 spin_unlock_irq(&ndlp->lock);
5723 return ndlp;
5724 }
5725
5726
5727
5728
5729
5730 if ((vport->fc_flag & FC_RSCN_MODE) &&
5731 !(vport->fc_flag & FC_NDISC_ACTIVE)) {
5732 if (lpfc_rscn_payload_check(vport, did)) {
5733
5734
5735
5736
5737 lpfc_cancel_retry_delay_tmo(vport, ndlp);
5738
5739 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5740 "6455 Setup RSCN Node 2B_DISC x%x "
5741 "Data:x%x x%x x%x\n",
5742 ndlp->nlp_DID, ndlp->nlp_flag,
5743 ndlp->nlp_state, vport->fc_flag);
5744
5745
5746
5747
5748
5749
5750 if (vport->phba->nvmet_support)
5751 return ndlp;
5752
5753
5754
5755
5756 if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
5757 !(ndlp->nlp_type &
5758 (NLP_FCP_TARGET | NLP_NVME_TARGET)))
5759 return NULL;
5760
5761 ndlp->nlp_prev_state = ndlp->nlp_state;
5762 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5763
5764 spin_lock_irq(&ndlp->lock);
5765 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5766 spin_unlock_irq(&ndlp->lock);
5767 } else {
5768 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5769 "6456 Skip Setup RSCN Node x%x "
5770 "Data:x%x x%x x%x\n",
5771 ndlp->nlp_DID, ndlp->nlp_flag,
5772 ndlp->nlp_state, vport->fc_flag);
5773 ndlp = NULL;
5774 }
5775 } else {
5776 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5777 "6457 Setup Active Node 2B_DISC x%x "
5778 "Data:x%x x%x x%x\n",
5779 ndlp->nlp_DID, ndlp->nlp_flag,
5780 ndlp->nlp_state, vport->fc_flag);
5781
5782
5783
5784
5785
5786 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
5787 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5788 (!vport->phba->nvmet_support &&
5789 ndlp->nlp_flag & NLP_RCV_PLOGI))
5790 return NULL;
5791
5792 if (vport->phba->nvmet_support)
5793 return ndlp;
5794
5795
5796
5797
5798 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5799
5800 spin_lock_irq(&ndlp->lock);
5801 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5802 spin_unlock_irq(&ndlp->lock);
5803 }
5804 return ndlp;
5805 }
5806
5807
5808 void
5809 lpfc_disc_list_loopmap(struct lpfc_vport *vport)
5810 {
5811 struct lpfc_hba *phba = vport->phba;
5812 int j;
5813 uint32_t alpa, index;
5814
5815 if (!lpfc_is_link_up(phba))
5816 return;
5817
5818 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
5819 return;
5820
5821
5822 if (phba->alpa_map[0]) {
5823 for (j = 1; j <= phba->alpa_map[0]; j++) {
5824 alpa = phba->alpa_map[j];
5825 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
5826 continue;
5827 lpfc_setup_disc_node(vport, alpa);
5828 }
5829 } else {
5830
5831 for (j = 0; j < FC_MAXLOOP; j++) {
5832
5833
5834
5835 if (vport->cfg_scan_down)
5836 index = j;
5837 else
5838 index = FC_MAXLOOP - j - 1;
5839 alpa = lpfcAlpaArray[index];
5840 if ((vport->fc_myDID & 0xff) == alpa)
5841 continue;
5842 lpfc_setup_disc_node(vport, alpa);
5843 }
5844 }
5845 return;
5846 }
5847
5848
5849 void
5850 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
5851 {
5852 LPFC_MBOXQ_t *mbox;
5853 struct lpfc_sli *psli = &phba->sli;
5854 struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING];
5855 struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING];
5856 int rc;
5857
5858
5859
5860
5861
5862 if ((phba->link_state >= LPFC_CLEAR_LA) ||
5863 (vport->port_type != LPFC_PHYSICAL_PORT) ||
5864 (phba->sli_rev == LPFC_SLI_REV4))
5865 return;
5866
5867
5868 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
5869 phba->link_state = LPFC_CLEAR_LA;
5870 lpfc_clear_la(phba, mbox);
5871 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
5872 mbox->vport = vport;
5873 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5874 if (rc == MBX_NOT_FINISHED) {
5875 mempool_free(mbox, phba->mbox_mem_pool);
5876 lpfc_disc_flush_list(vport);
5877 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5878 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5879 phba->link_state = LPFC_HBA_ERROR;
5880 }
5881 }
5882 }
5883
5884
5885 void
5886 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
5887 {
5888 LPFC_MBOXQ_t *regvpimbox;
5889
5890 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5891 if (regvpimbox) {
5892 lpfc_reg_vpi(vport, regvpimbox);
5893 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
5894 regvpimbox->vport = vport;
5895 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
5896 == MBX_NOT_FINISHED) {
5897 mempool_free(regvpimbox, phba->mbox_mem_pool);
5898 }
5899 }
5900 }
5901
5902
5903 void
5904 lpfc_disc_start(struct lpfc_vport *vport)
5905 {
5906 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5907 struct lpfc_hba *phba = vport->phba;
5908 uint32_t num_sent;
5909 uint32_t clear_la_pending;
5910
5911 if (!lpfc_is_link_up(phba)) {
5912 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
5913 "3315 Link is not up %x\n",
5914 phba->link_state);
5915 return;
5916 }
5917
5918 if (phba->link_state == LPFC_CLEAR_LA)
5919 clear_la_pending = 1;
5920 else
5921 clear_la_pending = 0;
5922
5923 if (vport->port_state < LPFC_VPORT_READY)
5924 vport->port_state = LPFC_DISC_AUTH;
5925
5926 lpfc_set_disctmo(vport);
5927
5928 vport->fc_prevDID = vport->fc_myDID;
5929 vport->num_disc_nodes = 0;
5930
5931
5932 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5933 "0202 Start Discovery port state x%x "
5934 "flg x%x Data: x%x x%x x%x\n",
5935 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
5936 vport->fc_adisc_cnt, vport->fc_npr_cnt);
5937
5938
5939 num_sent = lpfc_els_disc_adisc(vport);
5940
5941 if (num_sent)
5942 return;
5943
5944
5945 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
5946 !(vport->fc_flag & FC_PT2PT) &&
5947 !(vport->fc_flag & FC_RSCN_MODE) &&
5948 (phba->sli_rev < LPFC_SLI_REV4)) {
5949 lpfc_issue_clear_la(phba, vport);
5950 lpfc_issue_reg_vpi(phba, vport);
5951 return;
5952 }
5953
5954
5955
5956
5957
5958 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
5959
5960 lpfc_issue_clear_la(phba, vport);
5961
5962 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
5963 vport->num_disc_nodes = 0;
5964
5965 if (vport->fc_npr_cnt)
5966 lpfc_els_disc_plogi(vport);
5967
5968 if (!vport->num_disc_nodes) {
5969 spin_lock_irq(shost->host_lock);
5970 vport->fc_flag &= ~FC_NDISC_ACTIVE;
5971 spin_unlock_irq(shost->host_lock);
5972 lpfc_can_disctmo(vport);
5973 }
5974 }
5975 vport->port_state = LPFC_VPORT_READY;
5976 } else {
5977
5978 num_sent = lpfc_els_disc_plogi(vport);
5979
5980 if (num_sent)
5981 return;
5982
5983 if (vport->fc_flag & FC_RSCN_MODE) {
5984
5985
5986
5987 if ((vport->fc_rscn_id_cnt == 0) &&
5988 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
5989 spin_lock_irq(shost->host_lock);
5990 vport->fc_flag &= ~FC_RSCN_MODE;
5991 spin_unlock_irq(shost->host_lock);
5992 lpfc_can_disctmo(vport);
5993 } else
5994 lpfc_els_handle_rscn(vport);
5995 }
5996 }
5997 return;
5998 }
5999
6000
6001
6002
6003
6004 static void
6005 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
6006 {
6007 LIST_HEAD(completions);
6008 struct lpfc_iocbq *iocb, *next_iocb;
6009 struct lpfc_sli_ring *pring;
6010 u32 ulp_command;
6011
6012 pring = lpfc_phba_elsring(phba);
6013 if (unlikely(!pring))
6014 return;
6015
6016
6017
6018
6019 spin_lock_irq(&phba->hbalock);
6020 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
6021 if (iocb->ndlp != ndlp)
6022 continue;
6023
6024 ulp_command = get_job_cmnd(phba, iocb);
6025
6026 if (ulp_command == CMD_ELS_REQUEST64_CR ||
6027 ulp_command == CMD_XMIT_ELS_RSP64_CX) {
6028
6029 list_move_tail(&iocb->list, &completions);
6030 }
6031 }
6032
6033
6034 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
6035 if (iocb->ndlp != ndlp)
6036 continue;
6037
6038 ulp_command = get_job_cmnd(phba, iocb);
6039
6040 if (ulp_command == CMD_ELS_REQUEST64_CR ||
6041 ulp_command == CMD_XMIT_ELS_RSP64_CX) {
6042 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
6043 }
6044 }
6045 spin_unlock_irq(&phba->hbalock);
6046
6047
6048 lpfc_issue_hb_tmo(phba);
6049
6050
6051 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6052 IOERR_SLI_ABORTED);
6053 }
6054
6055 static void
6056 lpfc_disc_flush_list(struct lpfc_vport *vport)
6057 {
6058 struct lpfc_nodelist *ndlp, *next_ndlp;
6059 struct lpfc_hba *phba = vport->phba;
6060
6061 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
6062 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
6063 nlp_listp) {
6064 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
6065 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
6066 lpfc_free_tx(phba, ndlp);
6067 }
6068 }
6069 }
6070 }
6071
6072
6073
6074
6075
6076
6077
6078
6079
6080
6081
6082 static void
6083 lpfc_notify_xport_npr(struct lpfc_vport *vport)
6084 {
6085 struct lpfc_nodelist *ndlp, *next_ndlp;
6086
6087 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
6088 nlp_listp) {
6089 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
6090 }
6091 }
6092 void
6093 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
6094 {
6095 lpfc_els_flush_rscn(vport);
6096 lpfc_els_flush_cmd(vport);
6097 lpfc_disc_flush_list(vport);
6098 if (pci_channel_offline(vport->phba->pcidev))
6099 lpfc_notify_xport_npr(vport);
6100 }
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113
6114
6115
6116
6117 void
6118 lpfc_disc_timeout(struct timer_list *t)
6119 {
6120 struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo);
6121 struct lpfc_hba *phba = vport->phba;
6122 uint32_t tmo_posted;
6123 unsigned long flags = 0;
6124
6125 if (unlikely(!phba))
6126 return;
6127
6128 spin_lock_irqsave(&vport->work_port_lock, flags);
6129 tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
6130 if (!tmo_posted)
6131 vport->work_port_events |= WORKER_DISC_TMO;
6132 spin_unlock_irqrestore(&vport->work_port_lock, flags);
6133
6134 if (!tmo_posted)
6135 lpfc_worker_wake_up(phba);
6136 return;
6137 }
6138
6139 static void
6140 lpfc_disc_timeout_handler(struct lpfc_vport *vport)
6141 {
6142 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6143 struct lpfc_hba *phba = vport->phba;
6144 struct lpfc_sli *psli = &phba->sli;
6145 struct lpfc_nodelist *ndlp, *next_ndlp;
6146 LPFC_MBOXQ_t *initlinkmbox;
6147 int rc, clrlaerr = 0;
6148
6149 if (!(vport->fc_flag & FC_DISC_TMO))
6150 return;
6151
6152 spin_lock_irq(shost->host_lock);
6153 vport->fc_flag &= ~FC_DISC_TMO;
6154 spin_unlock_irq(shost->host_lock);
6155
6156 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6157 "disc timeout: state:x%x rtry:x%x flg:x%x",
6158 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
6159
6160 switch (vport->port_state) {
6161
6162 case LPFC_LOCAL_CFG_LINK:
6163
6164
6165
6166
6167 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
6168 "0221 FAN timeout\n");
6169
6170
6171 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
6172 nlp_listp) {
6173 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
6174 continue;
6175 if (ndlp->nlp_type & NLP_FABRIC) {
6176
6177 lpfc_drop_node(vport, ndlp);
6178
6179 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
6180
6181
6182
6183 lpfc_unreg_rpi(vport, ndlp);
6184 }
6185 }
6186 if (vport->port_state != LPFC_FLOGI) {
6187 if (phba->sli_rev <= LPFC_SLI_REV3)
6188 lpfc_initial_flogi(vport);
6189 else
6190 lpfc_issue_init_vfi(vport);
6191 return;
6192 }
6193 break;
6194
6195 case LPFC_FDISC:
6196 case LPFC_FLOGI:
6197
6198
6199 lpfc_printf_vlog(vport, KERN_ERR,
6200 LOG_TRACE_EVENT,
6201 "0222 Initial %s timeout\n",
6202 vport->vpi ? "FDISC" : "FLOGI");
6203
6204
6205
6206
6207
6208
6209 lpfc_disc_list_loopmap(vport);
6210
6211
6212 lpfc_disc_start(vport);
6213 break;
6214
6215 case LPFC_FABRIC_CFG_LINK:
6216
6217
6218 lpfc_printf_vlog(vport, KERN_ERR,
6219 LOG_TRACE_EVENT,
6220 "0223 Timeout while waiting for "
6221 "NameServer login\n");
6222
6223 ndlp = lpfc_findnode_did(vport, NameServer_DID);
6224 if (ndlp)
6225 lpfc_els_abort(phba, ndlp);
6226
6227
6228 goto restart_disc;
6229
6230 case LPFC_NS_QRY:
6231
6232 lpfc_printf_vlog(vport, KERN_ERR,
6233 LOG_TRACE_EVENT,
6234 "0224 NameServer Query timeout "
6235 "Data: x%x x%x\n",
6236 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
6237
6238 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
6239
6240 vport->fc_ns_retry++;
6241 vport->gidft_inp = 0;
6242 rc = lpfc_issue_gidft(vport);
6243 if (rc == 0)
6244 break;
6245 }
6246 vport->fc_ns_retry = 0;
6247
6248 restart_disc:
6249
6250
6251
6252
6253
6254 if (phba->sli_rev < LPFC_SLI_REV4) {
6255 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
6256 lpfc_issue_reg_vpi(phba, vport);
6257 else {
6258 lpfc_issue_clear_la(phba, vport);
6259 vport->port_state = LPFC_VPORT_READY;
6260 }
6261 }
6262
6263
6264 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6265 if (!initlinkmbox) {
6266 lpfc_printf_vlog(vport, KERN_ERR,
6267 LOG_TRACE_EVENT,
6268 "0206 Device Discovery "
6269 "completion error\n");
6270 phba->link_state = LPFC_HBA_ERROR;
6271 break;
6272 }
6273
6274 lpfc_linkdown(phba);
6275 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
6276 phba->cfg_link_speed);
6277 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
6278 initlinkmbox->vport = vport;
6279 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6280 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
6281 lpfc_set_loopback_flag(phba);
6282 if (rc == MBX_NOT_FINISHED)
6283 mempool_free(initlinkmbox, phba->mbox_mem_pool);
6284
6285 break;
6286
6287 case LPFC_DISC_AUTH:
6288
6289 lpfc_printf_vlog(vport, KERN_ERR,
6290 LOG_TRACE_EVENT,
6291 "0227 Node Authentication timeout\n");
6292 lpfc_disc_flush_list(vport);
6293
6294
6295
6296
6297
6298 if (phba->sli_rev < LPFC_SLI_REV4) {
6299 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
6300 lpfc_issue_reg_vpi(phba, vport);
6301 else {
6302 lpfc_issue_clear_la(phba, vport);
6303 vport->port_state = LPFC_VPORT_READY;
6304 }
6305 }
6306 break;
6307
6308 case LPFC_VPORT_READY:
6309 if (vport->fc_flag & FC_RSCN_MODE) {
6310 lpfc_printf_vlog(vport, KERN_ERR,
6311 LOG_TRACE_EVENT,
6312 "0231 RSCN timeout Data: x%x "
6313 "x%x x%x x%x\n",
6314 vport->fc_ns_retry, LPFC_MAX_NS_RETRY,
6315 vport->port_state, vport->gidft_inp);
6316
6317
6318 lpfc_els_flush_cmd(vport);
6319
6320 lpfc_els_flush_rscn(vport);
6321 lpfc_disc_flush_list(vport);
6322 }
6323 break;
6324
6325 default:
6326 lpfc_printf_vlog(vport, KERN_ERR,
6327 LOG_TRACE_EVENT,
6328 "0273 Unexpected discovery timeout, "
6329 "vport State x%x\n", vport->port_state);
6330 break;
6331 }
6332
6333 switch (phba->link_state) {
6334 case LPFC_CLEAR_LA:
6335
6336 lpfc_printf_vlog(vport, KERN_ERR,
6337 LOG_TRACE_EVENT,
6338 "0228 CLEAR LA timeout\n");
6339 clrlaerr = 1;
6340 break;
6341
6342 case LPFC_LINK_UP:
6343 lpfc_issue_clear_la(phba, vport);
6344 fallthrough;
6345 case LPFC_LINK_UNKNOWN:
6346 case LPFC_WARM_START:
6347 case LPFC_INIT_START:
6348 case LPFC_INIT_MBX_CMDS:
6349 case LPFC_LINK_DOWN:
6350 case LPFC_HBA_ERROR:
6351 lpfc_printf_vlog(vport, KERN_ERR,
6352 LOG_TRACE_EVENT,
6353 "0230 Unexpected timeout, hba link "
6354 "state x%x\n", phba->link_state);
6355 clrlaerr = 1;
6356 break;
6357
6358 case LPFC_HBA_READY:
6359 break;
6360 }
6361
6362 if (clrlaerr) {
6363 lpfc_disc_flush_list(vport);
6364 if (phba->sli_rev != LPFC_SLI_REV4) {
6365 psli->sli3_ring[(LPFC_EXTRA_RING)].flag &=
6366 ~LPFC_STOP_IOCB_EVENT;
6367 psli->sli3_ring[LPFC_FCP_RING].flag &=
6368 ~LPFC_STOP_IOCB_EVENT;
6369 }
6370 vport->port_state = LPFC_VPORT_READY;
6371 }
6372 return;
6373 }
6374
6375
6376
6377
6378
6379
6380
6381 void
6382 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6383 {
6384 MAILBOX_t *mb = &pmb->u.mb;
6385 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
6386 struct lpfc_vport *vport = pmb->vport;
6387
6388 pmb->ctx_ndlp = NULL;
6389
6390 if (phba->sli_rev < LPFC_SLI_REV4)
6391 ndlp->nlp_rpi = mb->un.varWords[0];
6392 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
6393 ndlp->nlp_type |= NLP_FABRIC;
6394 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
6395 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
6396 "0004 rpi:%x DID:%x flg:%x %d x%px\n",
6397 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
6398 kref_read(&ndlp->kref),
6399 ndlp);
6400
6401
6402
6403
6404
6405
6406 if (vport->port_type == LPFC_PHYSICAL_PORT) {
6407 phba->link_flag &= ~LS_CT_VEN_RPA;
6408 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
6409 } else {
6410 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
6411 }
6412
6413
6414
6415
6416
6417 lpfc_nlp_put(ndlp);
6418 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
6419 return;
6420 }
6421
6422 static int
6423 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
6424 {
6425 uint16_t *rpi = param;
6426
6427 return ndlp->nlp_rpi == *rpi;
6428 }
6429
6430 static int
6431 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
6432 {
6433 return memcmp(&ndlp->nlp_portname, param,
6434 sizeof(ndlp->nlp_portname)) == 0;
6435 }
6436
6437 static struct lpfc_nodelist *
6438 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
6439 {
6440 struct lpfc_nodelist *ndlp;
6441
6442 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6443 if (filter(ndlp, param)) {
6444 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6445 "3185 FIND node filter %ps DID "
6446 "ndlp x%px did x%x flg x%x st x%x "
6447 "xri x%x type x%x rpi x%x\n",
6448 filter, ndlp, ndlp->nlp_DID,
6449 ndlp->nlp_flag, ndlp->nlp_state,
6450 ndlp->nlp_xri, ndlp->nlp_type,
6451 ndlp->nlp_rpi);
6452 return ndlp;
6453 }
6454 }
6455 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6456 "3186 FIND node filter %ps NOT FOUND.\n", filter);
6457 return NULL;
6458 }
6459
6460
6461
6462
6463
6464 struct lpfc_nodelist *
6465 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
6466 {
6467 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
6468 }
6469
6470
6471
6472
6473
6474 struct lpfc_nodelist *
6475 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
6476 {
6477 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6478 struct lpfc_nodelist *ndlp;
6479
6480 spin_lock_irq(shost->host_lock);
6481 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
6482 spin_unlock_irq(shost->host_lock);
6483 return ndlp;
6484 }
6485
6486
6487
6488
6489
6490
6491 struct lpfc_nodelist *
6492 lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
6493 {
6494 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6495 struct lpfc_nodelist *ndlp;
6496 unsigned long flags;
6497
6498 spin_lock_irqsave(shost->host_lock, flags);
6499 ndlp = __lpfc_findnode_rpi(vport, rpi);
6500 spin_unlock_irqrestore(shost->host_lock, flags);
6501 return ndlp;
6502 }
6503
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513
6514
6515
6516
6517 struct lpfc_vport *
6518 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6519 {
6520 struct lpfc_vport *vport;
6521 unsigned long flags;
6522 int i = 0;
6523
6524
6525 if (vpi > 0) {
6526
6527
6528
6529
6530 for (i = 0; i <= phba->max_vpi; i++) {
6531 if (vpi == phba->vpi_ids[i])
6532 break;
6533 }
6534
6535 if (i > phba->max_vpi) {
6536 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6537 "2936 Could not find Vport mapped "
6538 "to vpi %d\n", vpi);
6539 return NULL;
6540 }
6541 }
6542
6543 spin_lock_irqsave(&phba->port_list_lock, flags);
6544 list_for_each_entry(vport, &phba->port_list, listentry) {
6545 if (vport->vpi == i) {
6546 spin_unlock_irqrestore(&phba->port_list_lock, flags);
6547 return vport;
6548 }
6549 }
6550 spin_unlock_irqrestore(&phba->port_list_lock, flags);
6551 return NULL;
6552 }
6553
6554 struct lpfc_nodelist *
6555 lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
6556 {
6557 struct lpfc_nodelist *ndlp;
6558 int rpi = LPFC_RPI_ALLOC_ERROR;
6559
6560 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
6561 rpi = lpfc_sli4_alloc_rpi(vport->phba);
6562 if (rpi == LPFC_RPI_ALLOC_ERROR)
6563 return NULL;
6564 }
6565
6566 ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
6567 if (!ndlp) {
6568 if (vport->phba->sli_rev == LPFC_SLI_REV4)
6569 lpfc_sli4_free_rpi(vport->phba, rpi);
6570 return NULL;
6571 }
6572
6573 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
6574
6575 spin_lock_init(&ndlp->lock);
6576
6577 lpfc_initialize_node(vport, ndlp, did);
6578 INIT_LIST_HEAD(&ndlp->nlp_listp);
6579 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
6580 ndlp->nlp_rpi = rpi;
6581 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
6582 "0007 Init New ndlp x%px, rpi:x%x DID:%x "
6583 "flg:x%x refcnt:%d\n",
6584 ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
6585 ndlp->nlp_flag, kref_read(&ndlp->kref));
6586
6587 ndlp->active_rrqs_xri_bitmap =
6588 mempool_alloc(vport->phba->active_rrq_pool,
6589 GFP_KERNEL);
6590 if (ndlp->active_rrqs_xri_bitmap)
6591 memset(ndlp->active_rrqs_xri_bitmap, 0,
6592 ndlp->phba->cfg_rrq_xri_bitmap_sz);
6593 }
6594
6595
6596
6597 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
6598 "node init: did:x%x",
6599 ndlp->nlp_DID, 0, 0);
6600
6601 return ndlp;
6602 }
6603
6604
6605
6606
6607 static void
6608 lpfc_nlp_release(struct kref *kref)
6609 {
6610 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
6611 kref);
6612 struct lpfc_vport *vport = ndlp->vport;
6613
6614 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6615 "node release: did:x%x flg:x%x type:x%x",
6616 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
6617
6618 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6619 "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n",
6620 __func__, ndlp, ndlp->nlp_DID,
6621 kref_read(&ndlp->kref), ndlp->nlp_rpi);
6622
6623
6624 lpfc_cancel_retry_delay_tmo(vport, ndlp);
6625 lpfc_cleanup_node(vport, ndlp);
6626
6627
6628
6629
6630
6631
6632
6633
6634 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
6635 if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR &&
6636 !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) {
6637 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
6638 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
6639 }
6640 }
6641
6642
6643
6644
6645 ndlp->vport = NULL;
6646 ndlp->nlp_state = NLP_STE_FREED_NODE;
6647 ndlp->nlp_flag = 0;
6648 ndlp->fc4_xpt_flags = 0;
6649
6650
6651 kfree(ndlp->lat_data);
6652 if (ndlp->phba->sli_rev == LPFC_SLI_REV4)
6653 mempool_free(ndlp->active_rrqs_xri_bitmap,
6654 ndlp->phba->active_rrq_pool);
6655 mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
6656 }
6657
6658
6659
6660
6661
6662 struct lpfc_nodelist *
6663 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
6664 {
6665 unsigned long flags;
6666
6667 if (ndlp) {
6668 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6669 "node get: did:x%x flg:x%x refcnt:x%x",
6670 ndlp->nlp_DID, ndlp->nlp_flag,
6671 kref_read(&ndlp->kref));
6672
6673
6674
6675
6676
6677 spin_lock_irqsave(&ndlp->lock, flags);
6678 if (!kref_get_unless_zero(&ndlp->kref)) {
6679 spin_unlock_irqrestore(&ndlp->lock, flags);
6680 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6681 "0276 %s: ndlp:x%px refcnt:%d\n",
6682 __func__, (void *)ndlp, kref_read(&ndlp->kref));
6683 return NULL;
6684 }
6685 spin_unlock_irqrestore(&ndlp->lock, flags);
6686 } else {
6687 WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!", __func__);
6688 }
6689
6690 return ndlp;
6691 }
6692
6693
6694
6695
6696 int
6697 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
6698 {
6699 if (ndlp) {
6700 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6701 "node put: did:x%x flg:x%x refcnt:x%x",
6702 ndlp->nlp_DID, ndlp->nlp_flag,
6703 kref_read(&ndlp->kref));
6704 } else {
6705 WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!", __func__);
6706 }
6707
6708 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
6709 }
6710
6711
6712
6713
6714
6715
6716 int
6717 lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
6718 {
6719 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6720 "node not used: did:x%x flg:x%x refcnt:x%x",
6721 ndlp->nlp_DID, ndlp->nlp_flag,
6722 kref_read(&ndlp->kref));
6723
6724 if (kref_read(&ndlp->kref) == 1)
6725 if (lpfc_nlp_put(ndlp))
6726 return 1;
6727 return 0;
6728 }
6729
6730
6731
6732
6733
6734
6735
6736
6737
6738
6739
6740 static int
6741 lpfc_fcf_inuse(struct lpfc_hba *phba)
6742 {
6743 struct lpfc_vport **vports;
6744 int i, ret = 0;
6745 struct lpfc_nodelist *ndlp;
6746 struct Scsi_Host *shost;
6747
6748 vports = lpfc_create_vport_work_array(phba);
6749
6750
6751 if (!vports)
6752 return 1;
6753
6754 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6755 shost = lpfc_shost_from_vport(vports[i]);
6756 spin_lock_irq(shost->host_lock);
6757
6758
6759
6760
6761
6762
6763 if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
6764 spin_unlock_irq(shost->host_lock);
6765 ret = 1;
6766 goto out;
6767 }
6768 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
6769 if (ndlp->rport &&
6770 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
6771 ret = 1;
6772 spin_unlock_irq(shost->host_lock);
6773 goto out;
6774 } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
6775 ret = 1;
6776 lpfc_printf_log(phba, KERN_INFO,
6777 LOG_NODE | LOG_DISCOVERY,
6778 "2624 RPI %x DID %x flag %x "
6779 "still logged in\n",
6780 ndlp->nlp_rpi, ndlp->nlp_DID,
6781 ndlp->nlp_flag);
6782 }
6783 }
6784 spin_unlock_irq(shost->host_lock);
6785 }
6786 out:
6787 lpfc_destroy_vport_work_array(phba, vports);
6788 return ret;
6789 }
6790
6791
6792
6793
6794
6795
6796
6797
6798 void
6799 lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6800 {
6801 struct lpfc_vport *vport = mboxq->vport;
6802 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6803
6804 if (mboxq->u.mb.mbxStatus) {
6805 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6806 "2555 UNREG_VFI mbxStatus error x%x "
6807 "HBA state x%x\n",
6808 mboxq->u.mb.mbxStatus, vport->port_state);
6809 }
6810 spin_lock_irq(shost->host_lock);
6811 phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
6812 spin_unlock_irq(shost->host_lock);
6813 mempool_free(mboxq, phba->mbox_mem_pool);
6814 return;
6815 }
6816
6817
6818
6819
6820
6821
6822
6823
6824 static void
6825 lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6826 {
6827 struct lpfc_vport *vport = mboxq->vport;
6828
6829 if (mboxq->u.mb.mbxStatus) {
6830 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6831 "2550 UNREG_FCFI mbxStatus error x%x "
6832 "HBA state x%x\n",
6833 mboxq->u.mb.mbxStatus, vport->port_state);
6834 }
6835 mempool_free(mboxq, phba->mbox_mem_pool);
6836 return;
6837 }
6838
6839
6840
6841
6842
6843
6844
6845
6846
6847 int
6848 lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
6849 {
6850 struct lpfc_vport **vports;
6851 struct lpfc_nodelist *ndlp;
6852 struct Scsi_Host *shost;
6853 int i = 0, rc;
6854
6855
6856 if (lpfc_fcf_inuse(phba))
6857 lpfc_unreg_hba_rpis(phba);
6858
6859
6860 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
6861
6862
6863 vports = lpfc_create_vport_work_array(phba);
6864 if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
6865 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6866
6867 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6868 if (ndlp)
6869 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6870 lpfc_cleanup_pending_mbox(vports[i]);
6871 if (phba->sli_rev == LPFC_SLI_REV4)
6872 lpfc_sli4_unreg_all_rpis(vports[i]);
6873 lpfc_mbx_unreg_vpi(vports[i]);
6874 shost = lpfc_shost_from_vport(vports[i]);
6875 spin_lock_irq(shost->host_lock);
6876 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6877 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
6878 spin_unlock_irq(shost->host_lock);
6879 }
6880 lpfc_destroy_vport_work_array(phba, vports);
6881 if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
6882 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6883 if (ndlp)
6884 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
6885 lpfc_cleanup_pending_mbox(phba->pport);
6886 if (phba->sli_rev == LPFC_SLI_REV4)
6887 lpfc_sli4_unreg_all_rpis(phba->pport);
6888 lpfc_mbx_unreg_vpi(phba->pport);
6889 shost = lpfc_shost_from_vport(phba->pport);
6890 spin_lock_irq(shost->host_lock);
6891 phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6892 phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
6893 spin_unlock_irq(shost->host_lock);
6894 }
6895
6896
6897 lpfc_els_flush_all_cmd(phba);
6898
6899
6900 rc = lpfc_issue_unreg_vfi(phba->pport);
6901 return rc;
6902 }
6903
6904
6905
6906
6907
6908
6909
6910
6911
6912
6913
6914 int
6915 lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
6916 {
6917 LPFC_MBOXQ_t *mbox;
6918 int rc;
6919
6920 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6921 if (!mbox) {
6922 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6923 "2551 UNREG_FCFI mbox allocation failed"
6924 "HBA state x%x\n", phba->pport->port_state);
6925 return -ENOMEM;
6926 }
6927 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
6928 mbox->vport = phba->pport;
6929 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
6930 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6931
6932 if (rc == MBX_NOT_FINISHED) {
6933 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6934 "2552 Unregister FCFI command failed rc x%x "
6935 "HBA state x%x\n",
6936 rc, phba->pport->port_state);
6937 return -EINVAL;
6938 }
6939 return 0;
6940 }
6941
6942
6943
6944
6945
6946
6947
6948
6949 void
6950 lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
6951 {
6952 int rc;
6953
6954
6955 rc = lpfc_unregister_fcf_prep(phba);
6956 if (rc) {
6957 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6958 "2748 Failed to prepare for unregistering "
6959 "HBA's FCF record: rc=%d\n", rc);
6960 return;
6961 }
6962
6963
6964 rc = lpfc_sli4_unregister_fcf(phba);
6965 if (rc)
6966 return;
6967
6968 phba->fcf.fcf_flag = 0;
6969 phba->fcf.current_rec.flag = 0;
6970
6971
6972
6973
6974
6975 if ((phba->pport->load_flag & FC_UNLOADING) ||
6976 (phba->link_state < LPFC_LINK_UP))
6977 return;
6978
6979
6980 spin_lock_irq(&phba->hbalock);
6981 phba->fcf.fcf_flag |= FCF_INIT_DISC;
6982 spin_unlock_irq(&phba->hbalock);
6983
6984
6985 lpfc_sli4_clear_fcf_rr_bmask(phba);
6986
6987 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
6988
6989 if (rc) {
6990 spin_lock_irq(&phba->hbalock);
6991 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
6992 spin_unlock_irq(&phba->hbalock);
6993 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6994 "2553 lpfc_unregister_unused_fcf failed "
6995 "to read FCF record HBA state x%x\n",
6996 phba->pport->port_state);
6997 }
6998 }
6999
7000
7001
7002
7003
7004
7005
7006
7007 void
7008 lpfc_unregister_fcf(struct lpfc_hba *phba)
7009 {
7010 int rc;
7011
7012
7013 rc = lpfc_unregister_fcf_prep(phba);
7014 if (rc) {
7015 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7016 "2749 Failed to prepare for unregistering "
7017 "HBA's FCF record: rc=%d\n", rc);
7018 return;
7019 }
7020
7021
7022 rc = lpfc_sli4_unregister_fcf(phba);
7023 if (rc)
7024 return;
7025
7026 spin_lock_irq(&phba->hbalock);
7027 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
7028 spin_unlock_irq(&phba->hbalock);
7029 }
7030
7031
7032
7033
7034
7035
7036
7037
7038
7039 void
7040 lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
7041 {
7042
7043
7044
7045
7046
7047 spin_lock_irq(&phba->hbalock);
7048 if (!(phba->hba_flag & HBA_FCOE_MODE) ||
7049 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
7050 !(phba->hba_flag & HBA_FIP_SUPPORT) ||
7051 (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
7052 (phba->pport->port_state == LPFC_FLOGI)) {
7053 spin_unlock_irq(&phba->hbalock);
7054 return;
7055 }
7056 spin_unlock_irq(&phba->hbalock);
7057
7058 if (lpfc_fcf_inuse(phba))
7059 return;
7060
7061 lpfc_unregister_fcf_rescan(phba);
7062 }
7063
7064
7065
7066
7067
7068
7069
7070
7071
7072 static void
7073 lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
7074 uint8_t *buff)
7075 {
7076 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
7077 struct lpfc_fcf_conn_hdr *conn_hdr;
7078 struct lpfc_fcf_conn_rec *conn_rec;
7079 uint32_t record_count;
7080 int i;
7081
7082
7083 list_for_each_entry_safe(conn_entry, next_conn_entry,
7084 &phba->fcf_conn_rec_list, list) {
7085 list_del_init(&conn_entry->list);
7086 kfree(conn_entry);
7087 }
7088
7089 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
7090 record_count = conn_hdr->length * sizeof(uint32_t)/
7091 sizeof(struct lpfc_fcf_conn_rec);
7092
7093 conn_rec = (struct lpfc_fcf_conn_rec *)
7094 (buff + sizeof(struct lpfc_fcf_conn_hdr));
7095
7096 for (i = 0; i < record_count; i++) {
7097 if (!(conn_rec[i].flags & FCFCNCT_VALID))
7098 continue;
7099 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
7100 GFP_KERNEL);
7101 if (!conn_entry) {
7102 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7103 "2566 Failed to allocate connection"
7104 " table entry\n");
7105 return;
7106 }
7107
7108 memcpy(&conn_entry->conn_rec, &conn_rec[i],
7109 sizeof(struct lpfc_fcf_conn_rec));
7110 list_add_tail(&conn_entry->list,
7111 &phba->fcf_conn_rec_list);
7112 }
7113
7114 if (!list_empty(&phba->fcf_conn_rec_list)) {
7115 i = 0;
7116 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list,
7117 list) {
7118 conn_rec = &conn_entry->conn_rec;
7119 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7120 "3345 FCF connection list rec[%02d]: "
7121 "flags:x%04x, vtag:x%04x, "
7122 "fabric_name:x%02x:%02x:%02x:%02x:"
7123 "%02x:%02x:%02x:%02x, "
7124 "switch_name:x%02x:%02x:%02x:%02x:"
7125 "%02x:%02x:%02x:%02x\n", i++,
7126 conn_rec->flags, conn_rec->vlan_tag,
7127 conn_rec->fabric_name[0],
7128 conn_rec->fabric_name[1],
7129 conn_rec->fabric_name[2],
7130 conn_rec->fabric_name[3],
7131 conn_rec->fabric_name[4],
7132 conn_rec->fabric_name[5],
7133 conn_rec->fabric_name[6],
7134 conn_rec->fabric_name[7],
7135 conn_rec->switch_name[0],
7136 conn_rec->switch_name[1],
7137 conn_rec->switch_name[2],
7138 conn_rec->switch_name[3],
7139 conn_rec->switch_name[4],
7140 conn_rec->switch_name[5],
7141 conn_rec->switch_name[6],
7142 conn_rec->switch_name[7]);
7143 }
7144 }
7145 }
7146
7147
7148
7149
7150
7151
7152
7153
7154
7155 static void
7156 lpfc_read_fcoe_param(struct lpfc_hba *phba,
7157 uint8_t *buff)
7158 {
7159 struct lpfc_fip_param_hdr *fcoe_param_hdr;
7160 struct lpfc_fcoe_params *fcoe_param;
7161
7162 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
7163 buff;
7164 fcoe_param = (struct lpfc_fcoe_params *)
7165 (buff + sizeof(struct lpfc_fip_param_hdr));
7166
7167 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
7168 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
7169 return;
7170
7171 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
7172 phba->valid_vlan = 1;
7173 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
7174 0xFFF;
7175 }
7176
7177 phba->fc_map[0] = fcoe_param->fc_map[0];
7178 phba->fc_map[1] = fcoe_param->fc_map[1];
7179 phba->fc_map[2] = fcoe_param->fc_map[2];
7180 return;
7181 }
7182
7183
7184
7185
7186
7187
7188
7189
7190
7191
7192
7193 static uint8_t *
7194 lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
7195 {
7196 uint32_t offset = 0, rec_length;
7197
7198 if ((buff[0] == LPFC_REGION23_LAST_REC) ||
7199 (size < sizeof(uint32_t)))
7200 return NULL;
7201
7202 rec_length = buff[offset + 1];
7203
7204
7205
7206
7207
7208 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
7209 <= size) {
7210 if (buff[offset] == rec_type)
7211 return &buff[offset];
7212
7213 if (buff[offset] == LPFC_REGION23_LAST_REC)
7214 return NULL;
7215
7216 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
7217 rec_length = buff[offset + 1];
7218 }
7219 return NULL;
7220 }
7221
7222
7223
7224
7225
7226
7227
7228
7229
7230
7231 void
7232 lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
7233 uint8_t *buff,
7234 uint32_t size)
7235 {
7236 uint32_t offset = 0;
7237 uint8_t *rec_ptr;
7238
7239
7240
7241
7242
7243 if (size < 2*sizeof(uint32_t))
7244 return;
7245
7246
7247 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
7248 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7249 "2567 Config region 23 has bad signature\n");
7250 return;
7251 }
7252
7253 offset += 4;
7254
7255
7256 if (buff[offset] != LPFC_REGION23_VERSION) {
7257 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7258 "2568 Config region 23 has bad version\n");
7259 return;
7260 }
7261 offset += 4;
7262
7263
7264 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
7265 size - offset, FCOE_PARAM_TYPE);
7266 if (rec_ptr)
7267 lpfc_read_fcoe_param(phba, rec_ptr);
7268
7269
7270 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
7271 size - offset, FCOE_CONN_TBL_TYPE);
7272 if (rec_ptr)
7273 lpfc_read_fcf_conn_tbl(phba, rec_ptr);
7274
7275 }