0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/blkdev.h>
0025 #include <linux/pci.h>
0026 #include <linux/slab.h>
0027 #include <linux/interrupt.h>
0028
0029 #include <scsi/scsi_device.h>
0030 #include <scsi/scsi_transport_fc.h>
0031 #include <scsi/scsi.h>
0032 #include <scsi/fc/fc_fs.h>
0033
0034 #include "lpfc_hw4.h"
0035 #include "lpfc_hw.h"
0036 #include "lpfc_sli.h"
0037 #include "lpfc_sli4.h"
0038 #include "lpfc_nl.h"
0039 #include "lpfc_disc.h"
0040 #include "lpfc_scsi.h"
0041 #include "lpfc.h"
0042 #include "lpfc_logmsg.h"
0043 #include "lpfc_crtn.h"
0044 #include "lpfc_compat.h"
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062 int
0063 lpfc_mbox_rsrc_prep(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
0064 {
0065 struct lpfc_dmabuf *mp;
0066
0067 mp = kmalloc(sizeof(*mp), GFP_KERNEL);
0068 if (!mp)
0069 return -ENOMEM;
0070
0071 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
0072 if (!mp->virt) {
0073 kfree(mp);
0074 return -ENOMEM;
0075 }
0076
0077 memset(mp->virt, 0, LPFC_BPL_SIZE);
0078
0079
0080 INIT_LIST_HEAD(&mp->list);
0081 mbox->ctx_buf = mp;
0082 return 0;
0083 }
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099 void
0100 lpfc_mbox_rsrc_cleanup(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
0101 enum lpfc_mbox_ctx locked)
0102 {
0103 struct lpfc_dmabuf *mp;
0104
0105 mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
0106 mbox->ctx_buf = NULL;
0107
0108
0109 if (mp) {
0110 if (locked == MBOX_THD_LOCKED)
0111 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
0112 else
0113 lpfc_mbuf_free(phba, mp->virt, mp->phys);
0114 kfree(mp);
0115 }
0116
0117 mempool_free(mbox, phba->mbox_mem_pool);
0118 }
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 int
0133 lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
0134 uint16_t offset)
0135 {
0136 MAILBOX_t *mb;
0137 struct lpfc_dmabuf *mp;
0138 int rc;
0139
0140 mb = &pmb->u.mb;
0141
0142
0143 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
0144 mb->mbxCommand = MBX_DUMP_MEMORY;
0145 mb->un.varDmp.type = DMP_NV_PARAMS;
0146 mb->un.varDmp.entry_index = offset;
0147 mb->un.varDmp.region_id = DMP_REGION_VPORT;
0148 mb->mbxOwner = OWN_HOST;
0149
0150
0151 if (phba->sli_rev != LPFC_SLI_REV4) {
0152 mb->un.varDmp.cv = 1;
0153 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
0154 return 0;
0155 }
0156
0157 rc = lpfc_mbox_rsrc_prep(phba, pmb);
0158 if (rc) {
0159 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
0160 "2605 %s: memory allocation failed\n",
0161 __func__);
0162 return 1;
0163 }
0164
0165 mp = pmb->ctx_buf;
0166 mb->un.varWords[3] = putPaddrLow(mp->phys);
0167 mb->un.varWords[4] = putPaddrHigh(mp->phys);
0168 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
0169
0170 return 0;
0171 }
0172
0173
0174
0175
0176
0177
0178
0179
0180 void
0181 lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
0182 {
0183 MAILBOX_t *mb;
0184 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
0185 mb = &pmb->u.mb;
0186 mb->mbxCommand = MBX_DOWN_LINK;
0187 mb->mbxOwner = OWN_HOST;
0188 }
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202 void
0203 lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
0204 uint16_t region_id)
0205 {
0206 MAILBOX_t *mb;
0207 void *ctx;
0208
0209 mb = &pmb->u.mb;
0210 ctx = pmb->ctx_buf;
0211
0212
0213 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0214 mb->mbxCommand = MBX_DUMP_MEMORY;
0215 mb->un.varDmp.cv = 1;
0216 mb->un.varDmp.type = DMP_NV_PARAMS;
0217 mb->un.varDmp.entry_index = offset;
0218 mb->un.varDmp.region_id = region_id;
0219 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
0220 mb->un.varDmp.co = 0;
0221 mb->un.varDmp.resp_offset = 0;
0222 pmb->ctx_buf = ctx;
0223 mb->mbxOwner = OWN_HOST;
0224 return;
0225 }
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235 void
0236 lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
0237 {
0238 MAILBOX_t *mb;
0239 void *ctx;
0240
0241 mb = &pmb->u.mb;
0242
0243 ctx = pmb->ctx_buf;
0244
0245
0246 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
0247 mb->mbxCommand = MBX_DUMP_MEMORY;
0248 mb->mbxOwner = OWN_HOST;
0249 mb->un.varDmp.cv = 1;
0250 mb->un.varDmp.type = DMP_NV_PARAMS;
0251 if (phba->sli_rev < LPFC_SLI_REV4)
0252 mb->un.varDmp.entry_index = 0;
0253 mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
0254 mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
0255 mb->un.varDmp.co = 0;
0256 mb->un.varDmp.resp_offset = 0;
0257 pmb->ctx_buf = ctx;
0258 return;
0259 }
0260
0261
0262
0263
0264
0265
0266
0267
0268
0269
0270
0271
0272 void
0273 lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
0274 {
0275 MAILBOX_t *mb;
0276
0277 mb = &pmb->u.mb;
0278 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0279 mb->mbxCommand = MBX_READ_NV;
0280 mb->mbxOwner = OWN_HOST;
0281 return;
0282 }
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297 void
0298 lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
0299 uint32_t ring)
0300 {
0301 MAILBOX_t *mb;
0302
0303 mb = &pmb->u.mb;
0304 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0305 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
0306 mb->un.varCfgAsyncEvent.ring = ring;
0307 mb->mbxOwner = OWN_HOST;
0308 return;
0309 }
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324 void
0325 lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
0326 {
0327 MAILBOX_t *mb;
0328
0329 mb = &pmb->u.mb;
0330 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0331 mb->mbxCommand = MBX_HEARTBEAT;
0332 mb->mbxOwner = OWN_HOST;
0333 return;
0334 }
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349
0350
0351
0352
0353
0354
0355
0356
0357 int
0358 lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
0359 struct lpfc_dmabuf *mp)
0360 {
0361 MAILBOX_t *mb;
0362
0363 mb = &pmb->u.mb;
0364 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0365
0366 INIT_LIST_HEAD(&mp->list);
0367 mb->mbxCommand = MBX_READ_TOPOLOGY;
0368 mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE;
0369 mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
0370 mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys);
0371
0372
0373
0374
0375 pmb->ctx_buf = (uint8_t *)mp;
0376 mb->mbxOwner = OWN_HOST;
0377 return (0);
0378 }
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395 void
0396 lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
0397 {
0398 MAILBOX_t *mb;
0399
0400 mb = &pmb->u.mb;
0401 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0402
0403 mb->un.varClearLA.eventTag = phba->fc_eventTag;
0404 mb->mbxCommand = MBX_CLEAR_LA;
0405 mb->mbxOwner = OWN_HOST;
0406 return;
0407 }
0408
0409
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423 void
0424 lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
0425 {
0426 struct lpfc_vport *vport = phba->pport;
0427 MAILBOX_t *mb = &pmb->u.mb;
0428 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0429
0430
0431
0432
0433 if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) {
0434 mb->un.varCfgLnk.cr = 1;
0435 mb->un.varCfgLnk.ci = 1;
0436 mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
0437 mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
0438 }
0439
0440 mb->un.varCfgLnk.myId = vport->fc_myDID;
0441 mb->un.varCfgLnk.edtov = phba->fc_edtov;
0442 mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
0443 mb->un.varCfgLnk.ratov = phba->fc_ratov;
0444 mb->un.varCfgLnk.rttov = phba->fc_rttov;
0445 mb->un.varCfgLnk.altov = phba->fc_altov;
0446 mb->un.varCfgLnk.crtov = phba->fc_crtov;
0447 mb->un.varCfgLnk.cscn = 0;
0448 if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
0449 mb->un.varCfgLnk.cscn = 1;
0450 mb->un.varCfgLnk.bbscn = bf_get(lpfc_bbscn_def,
0451 &phba->sli4_hba.bbscn_params);
0452 }
0453
0454 if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4))
0455 mb->un.varCfgLnk.ack0_enable = 1;
0456
0457 mb->mbxCommand = MBX_CONFIG_LINK;
0458 mb->mbxOwner = OWN_HOST;
0459 return;
0460 }
0461
0462
0463
0464
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475 int
0476 lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
0477 {
0478 MAILBOX_t *mb = &pmb->u.mb;
0479 uint32_t attentionConditions[2];
0480
0481
0482 if (phba->cfg_use_msi != 2) {
0483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
0484 "0475 Not configured for supporting MSI-X "
0485 "cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
0486 return -EINVAL;
0487 }
0488
0489 if (phba->sli_rev < 3) {
0490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
0491 "0476 HBA not supporting SLI-3 or later "
0492 "SLI Revision: 0x%x\n", phba->sli_rev);
0493 return -EINVAL;
0494 }
0495
0496
0497 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
0498
0499
0500
0501
0502
0503
0504 attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
0505 HA_LATT | HA_MBATT);
0506 attentionConditions[1] = 0;
0507
0508 mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
0509 mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
0510
0511
0512
0513
0514 #ifdef __BIG_ENDIAN_BITFIELD
0515
0516 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
0517
0518 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
0519 #else
0520
0521 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
0522
0523 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
0524 #endif
0525
0526 mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
0527 mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
0528
0529
0530 mb->un.varCfgMSI.autoClearHA[0] = 0;
0531 mb->un.varCfgMSI.autoClearHA[1] = 0;
0532
0533
0534 mb->mbxCommand = MBX_CONFIG_MSI;
0535 mb->mbxOwner = OWN_HOST;
0536
0537 return 0;
0538 }
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550
0551
0552
0553
0554 void
0555 lpfc_init_link(struct lpfc_hba * phba,
0556 LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
0557 {
0558 lpfc_vpd_t *vpd;
0559 MAILBOX_t *mb;
0560
0561 mb = &pmb->u.mb;
0562 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0563
0564 switch (topology) {
0565 case FLAGS_TOPOLOGY_MODE_LOOP_PT:
0566 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
0567 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
0568 break;
0569 case FLAGS_TOPOLOGY_MODE_PT_PT:
0570 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
0571 break;
0572 case FLAGS_TOPOLOGY_MODE_LOOP:
0573 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
0574 break;
0575 case FLAGS_TOPOLOGY_MODE_PT_LOOP:
0576 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
0577 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
0578 break;
0579 case FLAGS_LOCAL_LB:
0580 mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
0581 break;
0582 }
0583
0584
0585 if ((phba->sli4_hba.pc_sli4_params.sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
0586 phba->sli4_hba.pc_sli4_params.if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
0587 !(phba->sli4_hba.pc_sli4_params.pls) &&
0588 mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
0589 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
0590 phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
0591 }
0592
0593
0594 if (phba->sli_rev == LPFC_SLI_REV3 && !phba->cfg_fcp_wait_abts_rsp)
0595 mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
0596
0597
0598
0599
0600 vpd = &phba->vpd;
0601 if (vpd->rev.feaLevelHigh >= 0x02){
0602 switch(linkspeed){
0603 case LPFC_USER_LINK_SPEED_1G:
0604 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
0605 mb->un.varInitLnk.link_speed = LINK_SPEED_1G;
0606 break;
0607 case LPFC_USER_LINK_SPEED_2G:
0608 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
0609 mb->un.varInitLnk.link_speed = LINK_SPEED_2G;
0610 break;
0611 case LPFC_USER_LINK_SPEED_4G:
0612 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
0613 mb->un.varInitLnk.link_speed = LINK_SPEED_4G;
0614 break;
0615 case LPFC_USER_LINK_SPEED_8G:
0616 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
0617 mb->un.varInitLnk.link_speed = LINK_SPEED_8G;
0618 break;
0619 case LPFC_USER_LINK_SPEED_10G:
0620 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
0621 mb->un.varInitLnk.link_speed = LINK_SPEED_10G;
0622 break;
0623 case LPFC_USER_LINK_SPEED_16G:
0624 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
0625 mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
0626 break;
0627 case LPFC_USER_LINK_SPEED_32G:
0628 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
0629 mb->un.varInitLnk.link_speed = LINK_SPEED_32G;
0630 break;
0631 case LPFC_USER_LINK_SPEED_64G:
0632 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
0633 mb->un.varInitLnk.link_speed = LINK_SPEED_64G;
0634 break;
0635 case LPFC_USER_LINK_SPEED_AUTO:
0636 default:
0637 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
0638 break;
0639 }
0640
0641 }
0642 else
0643 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
0644
0645 mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
0646 mb->mbxOwner = OWN_HOST;
0647 mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
0648 return;
0649 }
0650
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667
0668
0669
0670
0671
0672 int
0673 lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
0674 {
0675 struct lpfc_dmabuf *mp;
0676 MAILBOX_t *mb;
0677 int rc;
0678
0679 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0680
0681
0682 rc = lpfc_mbox_rsrc_prep(phba, pmb);
0683 if (rc) {
0684 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
0685 "0301 READ_SPARAM: no buffers\n");
0686 return 1;
0687 }
0688
0689 mp = pmb->ctx_buf;
0690 mb = &pmb->u.mb;
0691 mb->mbxOwner = OWN_HOST;
0692 mb->mbxCommand = MBX_READ_SPARM64;
0693 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
0694 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
0695 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
0696 if (phba->sli_rev >= LPFC_SLI_REV3)
0697 mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
0698
0699 return (0);
0700 }
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717 void
0718 lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
0719 LPFC_MBOXQ_t * pmb)
0720 {
0721 MAILBOX_t *mb;
0722
0723 mb = &pmb->u.mb;
0724 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0725
0726 mb->un.varUnregDID.did = did;
0727 mb->un.varUnregDID.vpi = vpi;
0728 if ((vpi != 0xffff) &&
0729 (phba->sli_rev == LPFC_SLI_REV4))
0730 mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
0731
0732 mb->mbxCommand = MBX_UNREG_D_ID;
0733 mb->mbxOwner = OWN_HOST;
0734 return;
0735 }
0736
0737
0738
0739
0740
0741
0742
0743
0744
0745
0746
0747
0748
0749
0750 void
0751 lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
0752 {
0753 MAILBOX_t *mb;
0754
0755 mb = &pmb->u.mb;
0756 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0757
0758 mb->mbxCommand = MBX_READ_CONFIG;
0759 mb->mbxOwner = OWN_HOST;
0760 return;
0761 }
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775 void
0776 lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
0777 {
0778 MAILBOX_t *mb;
0779
0780 mb = &pmb->u.mb;
0781 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0782
0783 mb->mbxCommand = MBX_READ_LNK_STAT;
0784 mb->mbxOwner = OWN_HOST;
0785 return;
0786 }
0787
0788
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812 int
0813 lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
0814 uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi)
0815 {
0816 MAILBOX_t *mb = &pmb->u.mb;
0817 uint8_t *sparam;
0818 struct lpfc_dmabuf *mp;
0819 int rc;
0820
0821 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0822
0823 mb->un.varRegLogin.rpi = 0;
0824 if (phba->sli_rev == LPFC_SLI_REV4)
0825 mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
0826 if (phba->sli_rev >= LPFC_SLI_REV3)
0827 mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
0828 mb->un.varRegLogin.did = did;
0829 mb->mbxOwner = OWN_HOST;
0830
0831
0832 rc = lpfc_mbox_rsrc_prep(phba, pmb);
0833 if (rc) {
0834 mb->mbxCommand = MBX_REG_LOGIN64;
0835
0836 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
0837 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
0838 "rpi x%x\n", vpi, did, rpi);
0839 return 1;
0840 }
0841
0842
0843 mp = pmb->ctx_buf;
0844 sparam = mp->virt;
0845 memcpy(sparam, param, sizeof (struct serv_parm));
0846
0847
0848 mb->mbxCommand = MBX_REG_LOGIN64;
0849 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
0850 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
0851 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
0852
0853 return 0;
0854 }
0855
0856
0857
0858
0859
0860
0861
0862
0863
0864
0865
0866
0867
0868
0869
0870
0871
0872
0873 void
0874 lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
0875 LPFC_MBOXQ_t * pmb)
0876 {
0877 MAILBOX_t *mb;
0878
0879 mb = &pmb->u.mb;
0880 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0881
0882 mb->un.varUnregLogin.rpi = rpi;
0883 mb->un.varUnregLogin.rsvd1 = 0;
0884 if (phba->sli_rev >= LPFC_SLI_REV3)
0885 mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
0886
0887 mb->mbxCommand = MBX_UNREG_LOGIN;
0888 mb->mbxOwner = OWN_HOST;
0889
0890 return;
0891 }
0892
0893
0894
0895
0896
0897
0898
0899
0900 void
0901 lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
0902 {
0903 struct lpfc_hba *phba = vport->phba;
0904 LPFC_MBOXQ_t *mbox;
0905 int rc;
0906
0907 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
0908 if (mbox) {
0909
0910
0911
0912
0913
0914
0915
0916 lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
0917 mbox);
0918 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
0919 mbox->vport = vport;
0920 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
0921 mbox->ctx_ndlp = NULL;
0922 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
0923 if (rc == MBX_NOT_FINISHED)
0924 mempool_free(mbox, phba->mbox_mem_pool);
0925 }
0926 }
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939
0940
0941 void
0942 lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
0943 {
0944 MAILBOX_t *mb = &pmb->u.mb;
0945 struct lpfc_hba *phba = vport->phba;
0946
0947 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0948
0949
0950
0951 if ((phba->sli_rev == LPFC_SLI_REV4) &&
0952 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
0953 mb->un.varRegVpi.upd = 1;
0954
0955 mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
0956 mb->un.varRegVpi.sid = vport->fc_myDID;
0957 if (phba->sli_rev == LPFC_SLI_REV4)
0958 mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
0959 else
0960 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
0961 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
0962 sizeof(struct lpfc_name));
0963 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
0964 mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
0965
0966 mb->mbxCommand = MBX_REG_VPI;
0967 mb->mbxOwner = OWN_HOST;
0968 return;
0969
0970 }
0971
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987
0988 void
0989 lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
0990 {
0991 MAILBOX_t *mb = &pmb->u.mb;
0992 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
0993
0994 if (phba->sli_rev == LPFC_SLI_REV3)
0995 mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
0996 else if (phba->sli_rev >= LPFC_SLI_REV4)
0997 mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
0998
0999 mb->mbxCommand = MBX_UNREG_VPI;
1000 mb->mbxOwner = OWN_HOST;
1001 return;
1002
1003 }
1004
1005
1006
1007
1008
1009
1010
1011
1012 static void
1013 lpfc_config_pcb_setup(struct lpfc_hba * phba)
1014 {
1015 struct lpfc_sli *psli = &phba->sli;
1016 struct lpfc_sli_ring *pring;
1017 PCB_t *pcbp = phba->pcb;
1018 dma_addr_t pdma_addr;
1019 uint32_t offset;
1020 uint32_t iocbCnt = 0;
1021 int i;
1022
1023 pcbp->maxRing = (psli->num_rings - 1);
1024
1025 for (i = 0; i < psli->num_rings; i++) {
1026 pring = &psli->sli3_ring[i];
1027
1028 pring->sli.sli3.sizeCiocb =
1029 phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
1030 SLI2_IOCB_CMD_SIZE;
1031 pring->sli.sli3.sizeRiocb =
1032 phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE :
1033 SLI2_IOCB_RSP_SIZE;
1034
1035
1036 if ((pring->sli.sli3.numCiocb == 0) ||
1037 (pring->sli.sli3.numRiocb == 0)) {
1038 pcbp->rdsc[i].cmdEntries = 0;
1039 pcbp->rdsc[i].rspEntries = 0;
1040 pcbp->rdsc[i].cmdAddrHigh = 0;
1041 pcbp->rdsc[i].rspAddrHigh = 0;
1042 pcbp->rdsc[i].cmdAddrLow = 0;
1043 pcbp->rdsc[i].rspAddrLow = 0;
1044 pring->sli.sli3.cmdringaddr = NULL;
1045 pring->sli.sli3.rspringaddr = NULL;
1046 continue;
1047 }
1048
1049 pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
1050 pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb;
1051
1052 offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
1053 (uint8_t *) phba->slim2p.virt;
1054 pdma_addr = phba->slim2p.phys + offset;
1055 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
1056 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
1057 iocbCnt += pring->sli.sli3.numCiocb;
1058
1059
1060 pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt];
1061
1062 pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb;
1063 offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
1064 (uint8_t *)phba->slim2p.virt;
1065 pdma_addr = phba->slim2p.phys + offset;
1066 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
1067 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
1068 iocbCnt += pring->sli.sli3.numRiocb;
1069 }
1070 }
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086 void
1087 lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1088 {
1089 MAILBOX_t *mb = &pmb->u.mb;
1090 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1091 mb->un.varRdRev.cv = 1;
1092 mb->un.varRdRev.v3req = 1;
1093 mb->mbxCommand = MBX_READ_REV;
1094 mb->mbxOwner = OWN_HOST;
1095 return;
1096 }
1097
1098 void
1099 lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1100 {
1101 MAILBOX_t *mb = &pmb->u.mb;
1102 struct lpfc_mqe *mqe;
1103
1104 switch (mb->mbxCommand) {
1105 case MBX_READ_REV:
1106 mqe = &pmb->u.mqe;
1107 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name,
1108 mqe->un.read_rev.fw_name, 16);
1109 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name,
1110 mqe->un.read_rev.ulp_fw_name, 16);
1111 break;
1112 default:
1113 break;
1114 }
1115 return;
1116 }
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 static void
1129 lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
1130 struct lpfc_hbq_init *hbq_desc)
1131 {
1132 hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
1133 hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen;
1134 hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
1135 }
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 static void
1148 lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
1149 struct lpfc_hbq_init *hbq_desc)
1150 {
1151 hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
1152 hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen;
1153 hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
1154 hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff;
1155 memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
1156 sizeof(hbqmb->profiles.profile3.cmdmatch));
1157 }
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170 static void
1171 lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
1172 struct lpfc_hbq_init *hbq_desc)
1173 {
1174 hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
1175 hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen;
1176 hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
1177 hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff;
1178 memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
1179 sizeof(hbqmb->profiles.profile5.cmdmatch));
1180 }
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196 void
1197 lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
1198 struct lpfc_hbq_init *hbq_desc,
1199 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
1200 {
1201 int i;
1202 MAILBOX_t *mb = &pmb->u.mb;
1203 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
1204
1205 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1206 hbqmb->hbqId = id;
1207 hbqmb->entry_count = hbq_desc->entry_count;
1208 hbqmb->recvNotify = hbq_desc->rn;
1209
1210 hbqmb->numMask = hbq_desc->mask_count;
1211
1212 hbqmb->profile = hbq_desc->profile;
1213
1214
1215 hbqmb->ringMask = hbq_desc->ring_mask;
1216
1217
1218 hbqmb->headerLen = hbq_desc->headerLen;
1219
1220 hbqmb->logEntry = hbq_desc->logEntry;
1221
1222
1223
1224 hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
1225 hbq_entry_index * sizeof(struct lpfc_hbq_entry);
1226 hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
1227
1228 mb->mbxCommand = MBX_CONFIG_HBQ;
1229 mb->mbxOwner = OWN_HOST;
1230
1231
1232
1233
1234 if (hbq_desc->profile == 2)
1235 lpfc_build_hbq_profile2(hbqmb, hbq_desc);
1236 else if (hbq_desc->profile == 3)
1237 lpfc_build_hbq_profile3(hbqmb, hbq_desc);
1238 else if (hbq_desc->profile == 5)
1239 lpfc_build_hbq_profile5(hbqmb, hbq_desc);
1240
1241
1242 if (!hbq_desc->mask_count)
1243 return;
1244
1245
1246 for (i = 0; i < hbq_desc->mask_count; i++) {
1247 hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
1248 hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask;
1249 hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
1250 hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask;
1251 }
1252
1253 return;
1254 }
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273 void
1274 lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1275 {
1276 int i;
1277 MAILBOX_t *mb = &pmb->u.mb;
1278 struct lpfc_sli *psli;
1279 struct lpfc_sli_ring *pring;
1280
1281 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1282
1283 mb->un.varCfgRing.ring = ring;
1284 mb->un.varCfgRing.maxOrigXchg = 0;
1285 mb->un.varCfgRing.maxRespXchg = 0;
1286 mb->un.varCfgRing.recvNotify = 1;
1287
1288 psli = &phba->sli;
1289 pring = &psli->sli3_ring[ring];
1290 mb->un.varCfgRing.numMask = pring->num_mask;
1291 mb->mbxCommand = MBX_CONFIG_RING;
1292 mb->mbxOwner = OWN_HOST;
1293
1294
1295 if (pring->prt[0].profile) {
1296 mb->un.varCfgRing.profile = pring->prt[0].profile;
1297 return;
1298 }
1299
1300
1301 for (i = 0; i < pring->num_mask; i++) {
1302 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
1303 if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
1304 mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
1305 else
1306 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
1307 mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
1308 mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
1309 }
1310
1311 return;
1312 }
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328 void
1329 lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1330 {
1331 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1332 MAILBOX_t *mb = &pmb->u.mb;
1333 dma_addr_t pdma_addr;
1334 uint32_t bar_low, bar_high;
1335 size_t offset;
1336 struct lpfc_hgp hgp;
1337 int i;
1338 uint32_t pgp_offset;
1339
1340 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1341 mb->mbxCommand = MBX_CONFIG_PORT;
1342 mb->mbxOwner = OWN_HOST;
1343
1344 mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
1345
1346 offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
1347 pdma_addr = phba->slim2p.phys + offset;
1348 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
1349 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
1350
1351
1352 mb->un.varCfgPort.hps = 1;
1353
1354
1355
1356 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1357 if (phba->cfg_enable_bg)
1358 mb->un.varCfgPort.cbg = 1;
1359 mb->un.varCfgPort.cerbm = 1;
1360 mb->un.varCfgPort.ccrp = 1;
1361 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1362 if (phba->max_vpi && phba->cfg_enable_npiv &&
1363 phba->vpd.sli3Feat.cmv) {
1364 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1365 mb->un.varCfgPort.cmv = 1;
1366 } else
1367 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1368 } else
1369 phba->sli_rev = LPFC_SLI_REV2;
1370 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1371
1372
1373 if (phba->sli_rev == LPFC_SLI_REV3)
1374 mb->un.varCfgPort.casabt = 1;
1375
1376
1377 phba->pcb->type = TYPE_NATIVE_SLI2;
1378 phba->pcb->feature = FEATURE_INITIAL_SLI2;
1379
1380
1381 phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
1382 offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
1383 pdma_addr = phba->slim2p.phys + offset;
1384 phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
1385 phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
1407 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437 if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
1438 phba->host_gp = (struct lpfc_hgp __iomem *)
1439 &phba->mbox->us.s2.host[0];
1440 phba->hbq_put = NULL;
1441 offset = (uint8_t *)&phba->mbox->us.s2.host -
1442 (uint8_t *)phba->slim2p.virt;
1443 pdma_addr = phba->slim2p.phys + offset;
1444 phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
1445 phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
1446 } else {
1447
1448 mb->un.varCfgPort.hps = 1;
1449
1450 if (phba->sli_rev == 3) {
1451 phba->host_gp = &mb_slim->us.s3.host[0];
1452 phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
1453 } else {
1454 phba->host_gp = &mb_slim->us.s2.host[0];
1455 phba->hbq_put = NULL;
1456 }
1457
1458
1459 phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
1460 (void __iomem *)phba->host_gp -
1461 (void __iomem *)phba->MBslimaddr;
1462 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
1463 phba->pcb->hgpAddrHigh = bar_high;
1464 else
1465 phba->pcb->hgpAddrHigh = 0;
1466
1467 memset(&hgp, 0, sizeof(struct lpfc_hgp));
1468
1469 for (i = 0; i < phba->sli.num_rings; i++) {
1470 lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
1471 sizeof(*phba->host_gp));
1472 }
1473 }
1474
1475
1476 if (phba->sli_rev == 3)
1477 pgp_offset = offsetof(struct lpfc_sli2_slim,
1478 mbx.us.s3_pgp.port);
1479 else
1480 pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
1481 pdma_addr = phba->slim2p.phys + pgp_offset;
1482 phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
1483 phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
1484
1485
1486 lpfc_config_pcb_setup(phba);
1487
1488
1489 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
1490 uint32_t hbainit[5];
1491
1492 lpfc_hba_init(phba, hbainit);
1493
1494 memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
1495 }
1496
1497
1498 lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
1499 }
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516 void
1517 lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1518 {
1519 MAILBOX_t *mb = &pmb->u.mb;
1520
1521 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1522 mb->mbxCommand = MBX_KILL_BOARD;
1523 mb->mbxOwner = OWN_HOST;
1524 return;
1525 }
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537 void
1538 lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
1539 {
1540 struct lpfc_sli *psli;
1541
1542 psli = &phba->sli;
1543
1544 list_add_tail(&mbq->list, &psli->mboxq);
1545
1546 psli->mboxq_cnt++;
1547
1548 return;
1549 }
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565 LPFC_MBOXQ_t *
1566 lpfc_mbox_get(struct lpfc_hba * phba)
1567 {
1568 LPFC_MBOXQ_t *mbq = NULL;
1569 struct lpfc_sli *psli = &phba->sli;
1570
1571 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
1572 if (mbq)
1573 psli->mboxq_cnt--;
1574
1575 return mbq;
1576 }
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588 void
1589 __lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1590 {
1591 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1592 }
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604 void
1605 lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1606 {
1607 unsigned long iflag;
1608
1609
1610 spin_lock_irqsave(&phba->hbalock, iflag);
1611 __lpfc_mbox_cmpl_put(phba, mbq);
1612 spin_unlock_irqrestore(&phba->hbalock, iflag);
1613 return;
1614 }
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627 int
1628 lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1629 {
1630
1631
1632
1633 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1634 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1635 if (!mboxq->vport) {
1636 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1637 "1814 Mbox x%x failed, no vport\n",
1638 mboxq->u.mb.mbxCommand);
1639 dump_stack();
1640 return -ENODEV;
1641 }
1642 }
1643 return 0;
1644 }
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656 int
1657 lpfc_mbox_dev_check(struct lpfc_hba *phba)
1658 {
1659
1660 if (unlikely(pci_channel_offline(phba->pcidev)))
1661 return -ENODEV;
1662
1663
1664 if (phba->link_state == LPFC_HBA_ERROR)
1665 return -ENODEV;
1666
1667 return 0;
1668 }
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681 int
1682 lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1683 {
1684 MAILBOX_t *mbox = &mboxq->u.mb;
1685 uint8_t subsys, opcode;
1686
1687 switch (mbox->mbxCommand) {
1688 case MBX_WRITE_NV:
1689 case MBX_DUMP_MEMORY:
1690 case MBX_UPDATE_CFG:
1691 case MBX_DOWN_LOAD:
1692 case MBX_DEL_LD_ENTRY:
1693 case MBX_WRITE_VPARMS:
1694 case MBX_LOAD_AREA:
1695 case MBX_WRITE_WWN:
1696 case MBX_LOAD_EXP_ROM:
1697 case MBX_ACCESS_VDATA:
1698 return LPFC_MBOX_TMO_FLASH_CMD;
1699 case MBX_SLI4_CONFIG:
1700 subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
1701 opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq);
1702 if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) {
1703 switch (opcode) {
1704 case LPFC_MBOX_OPCODE_READ_OBJECT:
1705 case LPFC_MBOX_OPCODE_WRITE_OBJECT:
1706 case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
1707 case LPFC_MBOX_OPCODE_DELETE_OBJECT:
1708 case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
1709 case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
1710 case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG:
1711 case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
1712 case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
1713 case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES:
1714 case LPFC_MBOX_OPCODE_SEND_ACTIVATION:
1715 case LPFC_MBOX_OPCODE_RESET_LICENSES:
1716 case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG:
1717 case LPFC_MBOX_OPCODE_GET_VPD_DATA:
1718 case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG:
1719 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1720 }
1721 }
1722 if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) {
1723 switch (opcode) {
1724 case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS:
1725 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1726 }
1727 }
1728 return LPFC_MBOX_SLI4_CONFIG_TMO;
1729 }
1730 return LPFC_MBOX_TMO;
1731 }
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743 void
1744 lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
1745 dma_addr_t phyaddr, uint32_t length)
1746 {
1747 struct lpfc_mbx_nembed_cmd *nembed_sge;
1748
1749 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1750 &mbox->u.mqe.un.nembed_cmd;
1751 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1752 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1753 nembed_sge->sge[sgentry].length = length;
1754 }
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765 void
1766 lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
1767 struct lpfc_mbx_sge *sge)
1768 {
1769 struct lpfc_mbx_nembed_cmd *nembed_sge;
1770
1771 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1772 &mbox->u.mqe.un.nembed_cmd;
1773 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1774 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1775 sge->length = nembed_sge->sge[sgentry].length;
1776 }
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787 void
1788 lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1789 {
1790 struct lpfc_mbx_sli4_config *sli4_cfg;
1791 struct lpfc_mbx_sge sge;
1792 dma_addr_t phyaddr;
1793 uint32_t sgecount, sgentry;
1794
1795 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1796
1797
1798 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1799 mempool_free(mbox, phba->mbox_mem_pool);
1800 return;
1801 }
1802
1803
1804 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1805
1806 if (unlikely(!mbox->sge_array)) {
1807 mempool_free(mbox, phba->mbox_mem_pool);
1808 return;
1809 }
1810
1811 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1812 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1813 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1814 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
1815 mbox->sge_array->addr[sgentry], phyaddr);
1816 }
1817
1818 kfree(mbox->sge_array);
1819
1820 mempool_free(mbox, phba->mbox_mem_pool);
1821 }
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838 int
1839 lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1840 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1841 {
1842 struct lpfc_mbx_sli4_config *sli4_config;
1843 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1844 uint32_t alloc_len;
1845 uint32_t resid_len;
1846 uint32_t pagen, pcount;
1847 void *viraddr;
1848 dma_addr_t phyaddr;
1849
1850
1851 memset(mbox, 0, sizeof(*mbox));
1852 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1853
1854
1855 sli4_config = &mbox->u.mqe.un.sli4_config;
1856
1857
1858 if (emb) {
1859
1860 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1861 sli4_config->header.cfg_mhdr.payload_length = length;
1862
1863 bf_set(lpfc_mbox_hdr_opcode,
1864 &sli4_config->header.cfg_shdr.request, opcode);
1865 bf_set(lpfc_mbox_hdr_subsystem,
1866 &sli4_config->header.cfg_shdr.request, subsystem);
1867 sli4_config->header.cfg_shdr.request.request_length =
1868 length - LPFC_MBX_CMD_HDR_LENGTH;
1869 return length;
1870 }
1871
1872
1873 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1874 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1875 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1876
1877 mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1878 GFP_KERNEL);
1879 if (!mbox->sge_array) {
1880 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1881 "2527 Failed to allocate non-embedded SGE "
1882 "array.\n");
1883 return 0;
1884 }
1885 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1886
1887
1888
1889
1890
1891 viraddr = dma_alloc_coherent(&phba->pcidev->dev,
1892 SLI4_PAGE_SIZE, &phyaddr,
1893 GFP_KERNEL);
1894
1895 if (!viraddr)
1896 break;
1897 mbox->sge_array->addr[pagen] = viraddr;
1898
1899 if (pagen == 0)
1900 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1901 resid_len = length - alloc_len;
1902 if (resid_len > SLI4_PAGE_SIZE) {
1903 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1904 SLI4_PAGE_SIZE);
1905 alloc_len += SLI4_PAGE_SIZE;
1906 } else {
1907 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1908 resid_len);
1909 alloc_len = length;
1910 }
1911 }
1912
1913
1914 sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1915 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1916
1917
1918 if (pagen > 0) {
1919 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1920 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1921 cfg_shdr->request.request_length =
1922 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1923 }
1924
1925 if (cfg_shdr)
1926 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1927 sizeof(union lpfc_sli4_cfg_shdr));
1928 return alloc_len;
1929 }
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946 int
1947 lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1948 uint16_t exts_count, uint16_t rsrc_type, bool emb)
1949 {
1950 uint8_t opcode = 0;
1951 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
1952 void *virtaddr = NULL;
1953
1954
1955 if (emb == LPFC_SLI4_MBX_NEMBED) {
1956
1957 virtaddr = mbox->sge_array->addr[0];
1958 if (virtaddr == NULL)
1959 return 1;
1960 n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
1961 }
1962
1963
1964
1965
1966
1967 if (emb == LPFC_SLI4_MBX_EMBED)
1968 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1969 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1970 rsrc_type);
1971 else {
1972
1973 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1974 n_rsrc_extnt, rsrc_type);
1975 lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
1976 &n_rsrc_extnt->word4,
1977 sizeof(uint32_t));
1978 }
1979
1980
1981 opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox);
1982 switch (opcode) {
1983 case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
1984 if (emb == LPFC_SLI4_MBX_EMBED)
1985 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1986 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1987 exts_count);
1988 else
1989 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1990 n_rsrc_extnt, exts_count);
1991 break;
1992 case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
1993 case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
1994 case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
1995
1996 break;
1997 default:
1998 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1999 "2929 Resource Extent Opcode x%x is "
2000 "unsupported\n", opcode);
2001 return 1;
2002 }
2003
2004 return 0;
2005 }
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017 uint8_t
2018 lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2019 {
2020 struct lpfc_mbx_sli4_config *sli4_cfg;
2021 union lpfc_sli4_cfg_shdr *cfg_shdr;
2022
2023 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
2024 return LPFC_MBOX_SUBSYSTEM_NA;
2025 sli4_cfg = &mbox->u.mqe.un.sli4_config;
2026
2027
2028 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
2029 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
2030 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
2031 }
2032
2033
2034 if (unlikely(!mbox->sge_array))
2035 return LPFC_MBOX_SUBSYSTEM_NA;
2036 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
2037 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
2038 }
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050 uint8_t
2051 lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2052 {
2053 struct lpfc_mbx_sli4_config *sli4_cfg;
2054 union lpfc_sli4_cfg_shdr *cfg_shdr;
2055
2056 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
2057 return LPFC_MBOX_OPCODE_NA;
2058 sli4_cfg = &mbox->u.mqe.un.sli4_config;
2059
2060
2061 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
2062 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
2063 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
2064 }
2065
2066
2067 if (unlikely(!mbox->sge_array))
2068 return LPFC_MBOX_OPCODE_NA;
2069 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
2070 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
2071 }
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085 int
2086 lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
2087 struct lpfcMboxq *mboxq,
2088 uint16_t fcf_index)
2089 {
2090 void *virt_addr;
2091 uint8_t *bytep;
2092 struct lpfc_mbx_sge sge;
2093 uint32_t alloc_len, req_len;
2094 struct lpfc_mbx_read_fcf_tbl *read_fcf;
2095
2096 if (!mboxq)
2097 return -ENOMEM;
2098
2099 req_len = sizeof(struct fcf_record) +
2100 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
2101
2102
2103 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2104 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
2105 LPFC_SLI4_MBX_NEMBED);
2106
2107 if (alloc_len < req_len) {
2108 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2109 "0291 Allocated DMA memory size (x%x) is "
2110 "less than the requested DMA memory "
2111 "size (x%x)\n", alloc_len, req_len);
2112 return -ENOMEM;
2113 }
2114
2115
2116
2117
2118 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
2119 virt_addr = mboxq->sge_array->addr[0];
2120 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
2121
2122
2123 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
2124
2125 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
2126 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
2127
2128 return 0;
2129 }
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139 void
2140 lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
2141 {
2142
2143 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
2144 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
2145
2146
2147 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
2148 bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
2149
2150
2151 if (phba->cfg_enable_bg)
2152 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
2153
2154
2155 if (phba->max_vpi && phba->cfg_enable_npiv)
2156 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
2157
2158 if (phba->nvmet_support) {
2159 bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1);
2160
2161 bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0);
2162 bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0);
2163 }
2164
2165
2166 if (phba->cfg_vmid_app_header) {
2167 bf_set(lpfc_mbx_rq_ftr_rq_ashdr, &mboxq->u.mqe.un.req_ftrs, 1);
2168 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 1);
2169 }
2170 return;
2171 }
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184 void
2185 lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2186 {
2187 struct lpfc_mbx_init_vfi *init_vfi;
2188
2189 memset(mbox, 0, sizeof(*mbox));
2190 mbox->vport = vport;
2191 init_vfi = &mbox->u.mqe.un.init_vfi;
2192 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
2193 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
2194 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
2195 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
2196 bf_set(lpfc_init_vfi_vfi, init_vfi,
2197 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2198 bf_set(lpfc_init_vfi_vpi, init_vfi,
2199 vport->phba->vpi_ids[vport->vpi]);
2200 bf_set(lpfc_init_vfi_fcfi, init_vfi,
2201 vport->phba->fcf.fcfi);
2202 }
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215 void
2216 lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
2217 {
2218 struct lpfc_mbx_reg_vfi *reg_vfi;
2219 struct lpfc_hba *phba = vport->phba;
2220 uint8_t bbscn_fabric = 0, bbscn_max = 0, bbscn_def = 0;
2221
2222 memset(mbox, 0, sizeof(*mbox));
2223 reg_vfi = &mbox->u.mqe.un.reg_vfi;
2224 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
2225 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
2226 bf_set(lpfc_reg_vfi_vfi, reg_vfi,
2227 phba->sli4_hba.vfi_ids[vport->vfi]);
2228 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi);
2229 bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]);
2230 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
2231 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
2232 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
2233 reg_vfi->e_d_tov = phba->fc_edtov;
2234 reg_vfi->r_a_tov = phba->fc_ratov;
2235 if (phys) {
2236 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
2237 reg_vfi->bde.addrLow = putPaddrLow(phys);
2238 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
2239 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2240 }
2241 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
2242
2243
2244 if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
2245 (vport->fc_flag & FC_VFI_REGISTERED) &&
2246 (!phba->fc_topology_changed))
2247 bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
2248
2249 bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 0);
2250 bf_set(lpfc_reg_vfi_bbscn, reg_vfi, 0);
2251 bbscn_fabric = (phba->fc_fabparam.cmn.bbRcvSizeMsb >> 4) & 0xF;
2252
2253 if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
2254 bbscn_fabric != 0) {
2255 bbscn_max = bf_get(lpfc_bbscn_max,
2256 &phba->sli4_hba.bbscn_params);
2257 if (bbscn_fabric <= bbscn_max) {
2258 bbscn_def = bf_get(lpfc_bbscn_def,
2259 &phba->sli4_hba.bbscn_params);
2260
2261 if (bbscn_fabric > bbscn_def)
2262 bf_set(lpfc_reg_vfi_bbscn, reg_vfi,
2263 bbscn_fabric);
2264 else
2265 bf_set(lpfc_reg_vfi_bbscn, reg_vfi, bbscn_def);
2266
2267 bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 1);
2268 }
2269 }
2270 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
2271 "3134 Register VFI, mydid:x%x, fcfi:%d, "
2272 " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
2273 " port_state:x%x topology chg:%d bbscn_fabric :%d\n",
2274 vport->fc_myDID,
2275 phba->fcf.fcfi,
2276 phba->sli4_hba.vfi_ids[vport->vfi],
2277 phba->vpi_ids[vport->vpi],
2278 reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
2279 vport->port_state, phba->fc_topology_changed,
2280 bbscn_fabric);
2281 }
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295 void
2296 lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
2297 {
2298 memset(mbox, 0, sizeof(*mbox));
2299 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
2300 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
2301 phba->vpi_ids[vpi]);
2302 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
2303 phba->sli4_hba.vfi_ids[phba->pport->vfi]);
2304 }
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317 void
2318 lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2319 {
2320 memset(mbox, 0, sizeof(*mbox));
2321 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
2322 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
2323 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2324 }
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334 int
2335 lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2336 {
2337 struct lpfc_dmabuf *mp = NULL;
2338 MAILBOX_t *mb;
2339 int rc;
2340
2341 memset(mbox, 0, sizeof(*mbox));
2342 mb = &mbox->u.mb;
2343
2344 rc = lpfc_mbox_rsrc_prep(phba, mbox);
2345 if (rc) {
2346 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2347 "2569 %s: memory allocation failed\n",
2348 __func__);
2349 return 1;
2350 }
2351
2352 mb->mbxCommand = MBX_DUMP_MEMORY;
2353 mb->un.varDmp.type = DMP_NV_PARAMS;
2354 mb->un.varDmp.region_id = DMP_REGION_23;
2355 mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
2356 mp = mbox->ctx_buf;
2357 mb->un.varWords[3] = putPaddrLow(mp->phys);
2358 mb->un.varWords[4] = putPaddrHigh(mp->phys);
2359 return 0;
2360 }
2361
2362 static void
2363 lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2364 {
2365 MAILBOX_t *mb;
2366 int rc = FAILURE;
2367 struct lpfc_rdp_context *rdp_context =
2368 (struct lpfc_rdp_context *)(mboxq->ctx_ndlp);
2369
2370 mb = &mboxq->u.mb;
2371 if (mb->mbxStatus)
2372 goto mbx_failed;
2373
2374 memcpy(&rdp_context->link_stat, &mb->un.varRdLnk, sizeof(READ_LNK_VAR));
2375
2376 rc = SUCCESS;
2377
2378 mbx_failed:
2379 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
2380 rdp_context->cmpl(phba, rdp_context, rc);
2381 }
2382
2383 static void
2384 lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2385 {
2386 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
2387 struct lpfc_rdp_context *rdp_context =
2388 (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
2389
2390 if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
2391 goto error_mbox_free;
2392
2393 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
2394 DMP_SFF_PAGE_A2_SIZE);
2395
2396 lpfc_read_lnk_stat(phba, mbox);
2397 mbox->vport = rdp_context->ndlp->vport;
2398
2399
2400 mbox->ctx_buf = mp;
2401 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
2402 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
2403 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
2404 goto error_mbox_free;
2405
2406 return;
2407
2408 error_mbox_free:
2409 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
2410 rdp_context->cmpl(phba, rdp_context, FAILURE);
2411 }
2412
2413 void
2414 lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2415 {
2416 int rc;
2417 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
2418 struct lpfc_rdp_context *rdp_context =
2419 (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
2420
2421 if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
2422 goto error;
2423
2424 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0,
2425 DMP_SFF_PAGE_A0_SIZE);
2426
2427 memset(mbox, 0, sizeof(*mbox));
2428
2429 memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE);
2430 INIT_LIST_HEAD(&mp->list);
2431
2432
2433 mbox->ctx_buf = mp;
2434 mbox->vport = rdp_context->ndlp->vport;
2435
2436 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
2437 bf_set(lpfc_mbx_memory_dump_type3_type,
2438 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
2439 bf_set(lpfc_mbx_memory_dump_type3_link,
2440 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
2441 bf_set(lpfc_mbx_memory_dump_type3_page_no,
2442 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2);
2443 bf_set(lpfc_mbx_memory_dump_type3_length,
2444 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE);
2445 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
2446 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
2447
2448 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2;
2449 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
2450 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2451 if (rc == MBX_NOT_FINISHED)
2452 goto error;
2453
2454 return;
2455
2456 error:
2457 lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
2458 rdp_context->cmpl(phba, rdp_context, FAILURE);
2459 }
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470 int
2471 lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2472 {
2473 int rc;
2474 struct lpfc_dmabuf *mp = NULL;
2475
2476 memset(mbox, 0, sizeof(*mbox));
2477
2478 rc = lpfc_mbox_rsrc_prep(phba, mbox);
2479 if (rc) {
2480 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2481 "3569 dump type 3 page 0xA0 allocation failed\n");
2482 return 1;
2483 }
2484
2485 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
2486 bf_set(lpfc_mbx_memory_dump_type3_type,
2487 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
2488 bf_set(lpfc_mbx_memory_dump_type3_link,
2489 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
2490 bf_set(lpfc_mbx_memory_dump_type3_page_no,
2491 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0);
2492 bf_set(lpfc_mbx_memory_dump_type3_length,
2493 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
2494
2495 mp = mbox->ctx_buf;
2496 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
2497 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
2498
2499 return 0;
2500 }
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515 void
2516 lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2517 {
2518 struct lpfc_mbx_reg_fcfi *reg_fcfi;
2519
2520 memset(mbox, 0, sizeof(*mbox));
2521 reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
2522 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
2523 if (phba->nvmet_support == 0) {
2524 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi,
2525 phba->sli4_hba.hdr_rq->queue_id);
2526
2527 bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, 0);
2528 bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0);
2529 bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi, 0);
2530 bf_set(lpfc_reg_fcfi_rctl_mask0, reg_fcfi, 0);
2531
2532 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
2533
2534
2535 bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
2536 (~phba->fcf.addr_mode) & 0x3);
2537 } else {
2538
2539 if (phba->cfg_nvmet_mrq != 1)
2540 return;
2541
2542 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi,
2543 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id);
2544
2545 bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, FC_TYPE_FCP);
2546 bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0xff);
2547 bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi,
2548 FC_RCTL_DD_UNSOL_CMD);
2549
2550 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi,
2551 phba->sli4_hba.hdr_rq->queue_id);
2552
2553 bf_set(lpfc_reg_fcfi_type_match1, reg_fcfi, 0);
2554 bf_set(lpfc_reg_fcfi_type_mask1, reg_fcfi, 0);
2555 bf_set(lpfc_reg_fcfi_rctl_match1, reg_fcfi, 0);
2556 bf_set(lpfc_reg_fcfi_rctl_mask1, reg_fcfi, 0);
2557 }
2558 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
2559 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
2560 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
2561 phba->fcf.current_rec.fcf_indx);
2562 if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
2563 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
2564 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
2565 phba->fcf.current_rec.vlan_id);
2566 }
2567 }
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583 void
2584 lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode)
2585 {
2586 struct lpfc_mbx_reg_fcfi_mrq *reg_fcfi;
2587
2588
2589 if (phba->cfg_nvmet_mrq <= 1)
2590 return;
2591
2592 memset(mbox, 0, sizeof(*mbox));
2593 reg_fcfi = &mbox->u.mqe.un.reg_fcfi_mrq;
2594 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI_MRQ);
2595 if (mode == 0) {
2596 bf_set(lpfc_reg_fcfi_mrq_info_index, reg_fcfi,
2597 phba->fcf.current_rec.fcf_indx);
2598 if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
2599 bf_set(lpfc_reg_fcfi_mrq_vv, reg_fcfi, 1);
2600 bf_set(lpfc_reg_fcfi_mrq_vlan_tag, reg_fcfi,
2601 phba->fcf.current_rec.vlan_id);
2602 }
2603 return;
2604 }
2605
2606 bf_set(lpfc_reg_fcfi_mrq_rq_id0, reg_fcfi,
2607 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id);
2608
2609 bf_set(lpfc_reg_fcfi_mrq_type_match0, reg_fcfi, FC_TYPE_FCP);
2610 bf_set(lpfc_reg_fcfi_mrq_type_mask0, reg_fcfi, 0xff);
2611 bf_set(lpfc_reg_fcfi_mrq_rctl_match0, reg_fcfi, FC_RCTL_DD_UNSOL_CMD);
2612 bf_set(lpfc_reg_fcfi_mrq_rctl_mask0, reg_fcfi, 0xff);
2613 bf_set(lpfc_reg_fcfi_mrq_ptc0, reg_fcfi, 1);
2614 bf_set(lpfc_reg_fcfi_mrq_pt0, reg_fcfi, 1);
2615
2616 bf_set(lpfc_reg_fcfi_mrq_policy, reg_fcfi, 3);
2617 bf_set(lpfc_reg_fcfi_mrq_mode, reg_fcfi, 1);
2618 bf_set(lpfc_reg_fcfi_mrq_filter, reg_fcfi, 1);
2619 bf_set(lpfc_reg_fcfi_mrq_npairs, reg_fcfi, phba->cfg_nvmet_mrq);
2620
2621 bf_set(lpfc_reg_fcfi_mrq_rq_id1, reg_fcfi,
2622 phba->sli4_hba.hdr_rq->queue_id);
2623
2624 bf_set(lpfc_reg_fcfi_mrq_type_match1, reg_fcfi, 0);
2625 bf_set(lpfc_reg_fcfi_mrq_type_mask1, reg_fcfi, 0);
2626 bf_set(lpfc_reg_fcfi_mrq_rctl_match1, reg_fcfi, 0);
2627 bf_set(lpfc_reg_fcfi_mrq_rctl_mask1, reg_fcfi, 0);
2628
2629 bf_set(lpfc_reg_fcfi_mrq_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
2630 bf_set(lpfc_reg_fcfi_mrq_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
2631 }
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641 void
2642 lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
2643 {
2644 memset(mbox, 0, sizeof(*mbox));
2645 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
2646 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
2647 }
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657 void
2658 lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
2659 {
2660 struct lpfc_hba *phba = ndlp->phba;
2661 struct lpfc_mbx_resume_rpi *resume_rpi;
2662
2663 memset(mbox, 0, sizeof(*mbox));
2664 resume_rpi = &mbox->u.mqe.un.resume_rpi;
2665 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
2666 bf_set(lpfc_resume_rpi_index, resume_rpi,
2667 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2668 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
2669 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
2670 }
2671