0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024 #include <linux/ctype.h>
0025 #include <linux/delay.h>
0026 #include <linux/pci.h>
0027 #include <linux/interrupt.h>
0028 #include <linux/module.h>
0029 #include <linux/aer.h>
0030 #include <linux/gfp.h>
0031 #include <linux/kernel.h>
0032
0033 #include <scsi/scsi.h>
0034 #include <scsi/scsi_device.h>
0035 #include <scsi/scsi_host.h>
0036 #include <scsi/scsi_tcq.h>
0037 #include <scsi/scsi_transport_fc.h>
0038 #include <scsi/fc/fc_fs.h>
0039
0040 #include "lpfc_hw4.h"
0041 #include "lpfc_hw.h"
0042 #include "lpfc_sli.h"
0043 #include "lpfc_sli4.h"
0044 #include "lpfc_nl.h"
0045 #include "lpfc_disc.h"
0046 #include "lpfc.h"
0047 #include "lpfc_scsi.h"
0048 #include "lpfc_nvme.h"
0049 #include "lpfc_logmsg.h"
0050 #include "lpfc_version.h"
0051 #include "lpfc_compat.h"
0052 #include "lpfc_crtn.h"
0053 #include "lpfc_vport.h"
0054 #include "lpfc_attr.h"
0055
0056 #define LPFC_DEF_DEVLOSS_TMO 30
0057 #define LPFC_MIN_DEVLOSS_TMO 1
0058 #define LPFC_MAX_DEVLOSS_TMO 255
0059
0060 #define LPFC_MAX_INFO_TMP_LEN 100
0061 #define LPFC_INFO_MORE_STR "\nCould be more info...\n"
0062
0063
0064
0065
0066 #define LPFC_REG_WRITE_KEY_SIZE 4
0067 #define LPFC_REG_WRITE_KEY "EMLX"
0068
0069 const char *const trunk_errmsg[] = {
0070 "",
0071 "link negotiated speed does not match existing"
0072 " trunk - link was \"low\" speed",
0073 "link negotiated speed does not match"
0074 " existing trunk - link was \"middle\" speed",
0075 "link negotiated speed does not match existing"
0076 " trunk - link was \"high\" speed",
0077 "Attached to non-trunking port - F_Port",
0078 "Attached to non-trunking port - N_Port",
0079 "FLOGI response timeout",
0080 "non-FLOGI frame received",
0081 "Invalid FLOGI response",
0082 "Trunking initialization protocol",
0083 "Trunk peer device mismatch",
0084 };
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101 static void
0102 lpfc_jedec_to_ascii(int incr, char hdw[])
0103 {
0104 int i, j;
0105 for (i = 0; i < 8; i++) {
0106 j = (incr & 0xf);
0107 if (j <= 9)
0108 hdw[7 - i] = 0x30 + j;
0109 else
0110 hdw[7 - i] = 0x61 + j - 10;
0111 incr = (incr >> 4);
0112 }
0113 hdw[8] = 0;
0114 return;
0115 }
0116
0117 static ssize_t
0118 lpfc_cmf_info_show(struct device *dev, struct device_attribute *attr,
0119 char *buf)
0120 {
0121 struct Scsi_Host *shost = class_to_shost(dev);
0122 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
0123 struct lpfc_hba *phba = vport->phba;
0124 struct lpfc_cgn_info *cp = NULL;
0125 struct lpfc_cgn_stat *cgs;
0126 int len = 0;
0127 int cpu;
0128 u64 rcv, total;
0129 char tmp[LPFC_MAX_INFO_TMP_LEN] = {0};
0130
0131 if (phba->cgn_i)
0132 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
0133
0134 scnprintf(tmp, sizeof(tmp),
0135 "Congestion Mgmt Info: E2Eattr %d Ver %d "
0136 "CMF %d cnt %d\n",
0137 phba->sli4_hba.pc_sli4_params.mi_ver,
0138 cp ? cp->cgn_info_version : 0,
0139 phba->sli4_hba.pc_sli4_params.cmf, phba->cmf_timer_cnt);
0140
0141 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0142 goto buffer_done;
0143
0144 if (!phba->sli4_hba.pc_sli4_params.cmf)
0145 goto buffer_done;
0146
0147 switch (phba->cgn_init_reg_signal) {
0148 case EDC_CG_SIG_WARN_ONLY:
0149 scnprintf(tmp, sizeof(tmp),
0150 "Register: Init: Signal:WARN ");
0151 break;
0152 case EDC_CG_SIG_WARN_ALARM:
0153 scnprintf(tmp, sizeof(tmp),
0154 "Register: Init: Signal:WARN|ALARM ");
0155 break;
0156 default:
0157 scnprintf(tmp, sizeof(tmp),
0158 "Register: Init: Signal:NONE ");
0159 break;
0160 }
0161 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0162 goto buffer_done;
0163
0164 switch (phba->cgn_init_reg_fpin) {
0165 case LPFC_CGN_FPIN_WARN:
0166 scnprintf(tmp, sizeof(tmp),
0167 "FPIN:WARN\n");
0168 break;
0169 case LPFC_CGN_FPIN_ALARM:
0170 scnprintf(tmp, sizeof(tmp),
0171 "FPIN:ALARM\n");
0172 break;
0173 case LPFC_CGN_FPIN_BOTH:
0174 scnprintf(tmp, sizeof(tmp),
0175 "FPIN:WARN|ALARM\n");
0176 break;
0177 default:
0178 scnprintf(tmp, sizeof(tmp),
0179 "FPIN:NONE\n");
0180 break;
0181 }
0182 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0183 goto buffer_done;
0184
0185 switch (phba->cgn_reg_signal) {
0186 case EDC_CG_SIG_WARN_ONLY:
0187 scnprintf(tmp, sizeof(tmp),
0188 " Current: Signal:WARN ");
0189 break;
0190 case EDC_CG_SIG_WARN_ALARM:
0191 scnprintf(tmp, sizeof(tmp),
0192 " Current: Signal:WARN|ALARM ");
0193 break;
0194 default:
0195 scnprintf(tmp, sizeof(tmp),
0196 " Current: Signal:NONE ");
0197 break;
0198 }
0199 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0200 goto buffer_done;
0201
0202 switch (phba->cgn_reg_fpin) {
0203 case LPFC_CGN_FPIN_WARN:
0204 scnprintf(tmp, sizeof(tmp),
0205 "FPIN:WARN ACQEcnt:%d\n", phba->cgn_acqe_cnt);
0206 break;
0207 case LPFC_CGN_FPIN_ALARM:
0208 scnprintf(tmp, sizeof(tmp),
0209 "FPIN:ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt);
0210 break;
0211 case LPFC_CGN_FPIN_BOTH:
0212 scnprintf(tmp, sizeof(tmp),
0213 "FPIN:WARN|ALARM ACQEcnt:%d\n", phba->cgn_acqe_cnt);
0214 break;
0215 default:
0216 scnprintf(tmp, sizeof(tmp),
0217 "FPIN:NONE ACQEcnt:%d\n", phba->cgn_acqe_cnt);
0218 break;
0219 }
0220 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0221 goto buffer_done;
0222
0223 if (phba->cmf_active_mode != phba->cgn_p.cgn_param_mode) {
0224 switch (phba->cmf_active_mode) {
0225 case LPFC_CFG_OFF:
0226 scnprintf(tmp, sizeof(tmp), "Active: Mode:Off\n");
0227 break;
0228 case LPFC_CFG_MANAGED:
0229 scnprintf(tmp, sizeof(tmp), "Active: Mode:Managed\n");
0230 break;
0231 case LPFC_CFG_MONITOR:
0232 scnprintf(tmp, sizeof(tmp), "Active: Mode:Monitor\n");
0233 break;
0234 default:
0235 scnprintf(tmp, sizeof(tmp), "Active: Mode:Unknown\n");
0236 }
0237 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0238 goto buffer_done;
0239 }
0240
0241 switch (phba->cgn_p.cgn_param_mode) {
0242 case LPFC_CFG_OFF:
0243 scnprintf(tmp, sizeof(tmp), "Config: Mode:Off ");
0244 break;
0245 case LPFC_CFG_MANAGED:
0246 scnprintf(tmp, sizeof(tmp), "Config: Mode:Managed ");
0247 break;
0248 case LPFC_CFG_MONITOR:
0249 scnprintf(tmp, sizeof(tmp), "Config: Mode:Monitor ");
0250 break;
0251 default:
0252 scnprintf(tmp, sizeof(tmp), "Config: Mode:Unknown ");
0253 }
0254 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0255 goto buffer_done;
0256
0257 total = 0;
0258 rcv = 0;
0259 for_each_present_cpu(cpu) {
0260 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
0261 total += atomic64_read(&cgs->total_bytes);
0262 rcv += atomic64_read(&cgs->rcv_bytes);
0263 }
0264
0265 scnprintf(tmp, sizeof(tmp),
0266 "IObusy:%d Info:%d Bytes: Rcv:x%llx Total:x%llx\n",
0267 atomic_read(&phba->cmf_busy),
0268 phba->cmf_active_info, rcv, total);
0269 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0270 goto buffer_done;
0271
0272 scnprintf(tmp, sizeof(tmp),
0273 "Port_speed:%d Link_byte_cnt:%ld "
0274 "Max_byte_per_interval:%ld\n",
0275 lpfc_sli_port_speed_get(phba),
0276 (unsigned long)phba->cmf_link_byte_count,
0277 (unsigned long)phba->cmf_max_bytes_per_interval);
0278 strlcat(buf, tmp, PAGE_SIZE);
0279
0280 buffer_done:
0281 len = strnlen(buf, PAGE_SIZE);
0282
0283 if (unlikely(len >= (PAGE_SIZE - 1))) {
0284 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
0285 "6312 Catching potential buffer "
0286 "overflow > PAGE_SIZE = %lu bytes\n",
0287 PAGE_SIZE);
0288 strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
0289 LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1);
0290 }
0291 return len;
0292 }
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302 static ssize_t
0303 lpfc_drvr_version_show(struct device *dev, struct device_attribute *attr,
0304 char *buf)
0305 {
0306 return scnprintf(buf, PAGE_SIZE, LPFC_MODULE_DESC "\n");
0307 }
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317 static ssize_t
0318 lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
0319 char *buf)
0320 {
0321 struct Scsi_Host *shost = class_to_shost(dev);
0322 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
0323 struct lpfc_hba *phba = vport->phba;
0324
0325 if (phba->hba_flag & HBA_FIP_SUPPORT)
0326 return scnprintf(buf, PAGE_SIZE, "1\n");
0327 else
0328 return scnprintf(buf, PAGE_SIZE, "0\n");
0329 }
0330
0331 static ssize_t
0332 lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
0333 char *buf)
0334 {
0335 struct Scsi_Host *shost = class_to_shost(dev);
0336 struct lpfc_vport *vport = shost_priv(shost);
0337 struct lpfc_hba *phba = vport->phba;
0338 struct lpfc_nvmet_tgtport *tgtp;
0339 struct nvme_fc_local_port *localport;
0340 struct lpfc_nvme_lport *lport;
0341 struct lpfc_nvme_rport *rport;
0342 struct lpfc_nodelist *ndlp;
0343 struct nvme_fc_remote_port *nrport;
0344 struct lpfc_fc4_ctrl_stat *cstat;
0345 uint64_t data1, data2, data3;
0346 uint64_t totin, totout, tot;
0347 char *statep;
0348 int i;
0349 int len = 0;
0350 char tmp[LPFC_MAX_INFO_TMP_LEN] = {0};
0351
0352 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
0353 len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
0354 return len;
0355 }
0356 if (phba->nvmet_support) {
0357 if (!phba->targetport) {
0358 len = scnprintf(buf, PAGE_SIZE,
0359 "NVME Target: x%llx is not allocated\n",
0360 wwn_to_u64(vport->fc_portname.u.wwn));
0361 return len;
0362 }
0363
0364 if (phba->targetport->port_id)
0365 statep = "REGISTERED";
0366 else
0367 statep = "INIT";
0368 scnprintf(tmp, sizeof(tmp),
0369 "NVME Target Enabled State %s\n",
0370 statep);
0371 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0372 goto buffer_done;
0373
0374 scnprintf(tmp, sizeof(tmp),
0375 "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
0376 "NVME Target: lpfc",
0377 phba->brd_no,
0378 wwn_to_u64(vport->fc_portname.u.wwn),
0379 wwn_to_u64(vport->fc_nodename.u.wwn),
0380 phba->targetport->port_id);
0381 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0382 goto buffer_done;
0383
0384 if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE)
0385 >= PAGE_SIZE)
0386 goto buffer_done;
0387
0388 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
0389 scnprintf(tmp, sizeof(tmp),
0390 "LS: Rcv %08x Drop %08x Abort %08x\n",
0391 atomic_read(&tgtp->rcv_ls_req_in),
0392 atomic_read(&tgtp->rcv_ls_req_drop),
0393 atomic_read(&tgtp->xmt_ls_abort));
0394 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0395 goto buffer_done;
0396
0397 if (atomic_read(&tgtp->rcv_ls_req_in) !=
0398 atomic_read(&tgtp->rcv_ls_req_out)) {
0399 scnprintf(tmp, sizeof(tmp),
0400 "Rcv LS: in %08x != out %08x\n",
0401 atomic_read(&tgtp->rcv_ls_req_in),
0402 atomic_read(&tgtp->rcv_ls_req_out));
0403 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0404 goto buffer_done;
0405 }
0406
0407 scnprintf(tmp, sizeof(tmp),
0408 "LS: Xmt %08x Drop %08x Cmpl %08x\n",
0409 atomic_read(&tgtp->xmt_ls_rsp),
0410 atomic_read(&tgtp->xmt_ls_drop),
0411 atomic_read(&tgtp->xmt_ls_rsp_cmpl));
0412 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0413 goto buffer_done;
0414
0415 scnprintf(tmp, sizeof(tmp),
0416 "LS: RSP Abort %08x xb %08x Err %08x\n",
0417 atomic_read(&tgtp->xmt_ls_rsp_aborted),
0418 atomic_read(&tgtp->xmt_ls_rsp_xb_set),
0419 atomic_read(&tgtp->xmt_ls_rsp_error));
0420 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0421 goto buffer_done;
0422
0423 scnprintf(tmp, sizeof(tmp),
0424 "FCP: Rcv %08x Defer %08x Release %08x "
0425 "Drop %08x\n",
0426 atomic_read(&tgtp->rcv_fcp_cmd_in),
0427 atomic_read(&tgtp->rcv_fcp_cmd_defer),
0428 atomic_read(&tgtp->xmt_fcp_release),
0429 atomic_read(&tgtp->rcv_fcp_cmd_drop));
0430 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0431 goto buffer_done;
0432
0433 if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
0434 atomic_read(&tgtp->rcv_fcp_cmd_out)) {
0435 scnprintf(tmp, sizeof(tmp),
0436 "Rcv FCP: in %08x != out %08x\n",
0437 atomic_read(&tgtp->rcv_fcp_cmd_in),
0438 atomic_read(&tgtp->rcv_fcp_cmd_out));
0439 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0440 goto buffer_done;
0441 }
0442
0443 scnprintf(tmp, sizeof(tmp),
0444 "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
0445 "drop %08x\n",
0446 atomic_read(&tgtp->xmt_fcp_read),
0447 atomic_read(&tgtp->xmt_fcp_read_rsp),
0448 atomic_read(&tgtp->xmt_fcp_write),
0449 atomic_read(&tgtp->xmt_fcp_rsp),
0450 atomic_read(&tgtp->xmt_fcp_drop));
0451 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0452 goto buffer_done;
0453
0454 scnprintf(tmp, sizeof(tmp),
0455 "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
0456 atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
0457 atomic_read(&tgtp->xmt_fcp_rsp_error),
0458 atomic_read(&tgtp->xmt_fcp_rsp_drop));
0459 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0460 goto buffer_done;
0461
0462 scnprintf(tmp, sizeof(tmp),
0463 "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
0464 atomic_read(&tgtp->xmt_fcp_rsp_aborted),
0465 atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
0466 atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
0467 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0468 goto buffer_done;
0469
0470 scnprintf(tmp, sizeof(tmp),
0471 "ABORT: Xmt %08x Cmpl %08x\n",
0472 atomic_read(&tgtp->xmt_fcp_abort),
0473 atomic_read(&tgtp->xmt_fcp_abort_cmpl));
0474 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0475 goto buffer_done;
0476
0477 scnprintf(tmp, sizeof(tmp),
0478 "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n",
0479 atomic_read(&tgtp->xmt_abort_sol),
0480 atomic_read(&tgtp->xmt_abort_unsol),
0481 atomic_read(&tgtp->xmt_abort_rsp),
0482 atomic_read(&tgtp->xmt_abort_rsp_error));
0483 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0484 goto buffer_done;
0485
0486 scnprintf(tmp, sizeof(tmp),
0487 "DELAY: ctx %08x fod %08x wqfull %08x\n",
0488 atomic_read(&tgtp->defer_ctx),
0489 atomic_read(&tgtp->defer_fod),
0490 atomic_read(&tgtp->defer_wqfull));
0491 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0492 goto buffer_done;
0493
0494
0495 tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
0496 tot += atomic_read(&tgtp->xmt_fcp_release);
0497 tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
0498
0499 scnprintf(tmp, sizeof(tmp),
0500 "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
0501 "CTX Outstanding %08llx\n\n",
0502 phba->sli4_hba.nvmet_xri_cnt,
0503 phba->sli4_hba.nvmet_io_wait_cnt,
0504 phba->sli4_hba.nvmet_io_wait_total,
0505 tot);
0506 strlcat(buf, tmp, PAGE_SIZE);
0507 goto buffer_done;
0508 }
0509
0510 localport = vport->localport;
0511 if (!localport) {
0512 len = scnprintf(buf, PAGE_SIZE,
0513 "NVME Initiator x%llx is not allocated\n",
0514 wwn_to_u64(vport->fc_portname.u.wwn));
0515 return len;
0516 }
0517 lport = (struct lpfc_nvme_lport *)localport->private;
0518 if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
0519 goto buffer_done;
0520
0521 scnprintf(tmp, sizeof(tmp),
0522 "XRI Dist lpfc%d Total %d IO %d ELS %d\n",
0523 phba->brd_no,
0524 phba->sli4_hba.max_cfg_param.max_xri,
0525 phba->sli4_hba.io_xri_max,
0526 lpfc_sli4_get_els_iocb_cnt(phba));
0527 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0528 goto buffer_done;
0529
0530
0531 if (localport->port_id)
0532 statep = "ONLINE";
0533 else
0534 statep = "UNKNOWN ";
0535
0536 scnprintf(tmp, sizeof(tmp),
0537 "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
0538 "NVME LPORT lpfc",
0539 phba->brd_no,
0540 wwn_to_u64(vport->fc_portname.u.wwn),
0541 wwn_to_u64(vport->fc_nodename.u.wwn),
0542 localport->port_id, statep);
0543 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0544 goto buffer_done;
0545
0546 spin_lock_irq(shost->host_lock);
0547
0548 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
0549 nrport = NULL;
0550 spin_lock(&ndlp->lock);
0551 rport = lpfc_ndlp_get_nrport(ndlp);
0552 if (rport)
0553 nrport = rport->remoteport;
0554 spin_unlock(&ndlp->lock);
0555 if (!nrport)
0556 continue;
0557
0558
0559 switch (nrport->port_state) {
0560 case FC_OBJSTATE_ONLINE:
0561 statep = "ONLINE";
0562 break;
0563 case FC_OBJSTATE_UNKNOWN:
0564 statep = "UNKNOWN ";
0565 break;
0566 default:
0567 statep = "UNSUPPORTED";
0568 break;
0569 }
0570
0571
0572 if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
0573 goto unlock_buf_done;
0574 if (phba->brd_no >= 10) {
0575 if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
0576 goto unlock_buf_done;
0577 }
0578
0579 scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
0580 nrport->port_name);
0581 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0582 goto unlock_buf_done;
0583
0584 scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
0585 nrport->node_name);
0586 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0587 goto unlock_buf_done;
0588
0589 scnprintf(tmp, sizeof(tmp), "DID x%06x ",
0590 nrport->port_id);
0591 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0592 goto unlock_buf_done;
0593
0594
0595 if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
0596 if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
0597 goto unlock_buf_done;
0598 }
0599 if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
0600 if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
0601 goto unlock_buf_done;
0602 }
0603 if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
0604 if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
0605 goto unlock_buf_done;
0606 }
0607 if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
0608 FC_PORT_ROLE_NVME_TARGET |
0609 FC_PORT_ROLE_NVME_DISCOVERY)) {
0610 scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
0611 nrport->port_role);
0612 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0613 goto unlock_buf_done;
0614 }
0615
0616 scnprintf(tmp, sizeof(tmp), "%s\n", statep);
0617 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0618 goto unlock_buf_done;
0619 }
0620 spin_unlock_irq(shost->host_lock);
0621
0622 if (!lport)
0623 goto buffer_done;
0624
0625 if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE)
0626 goto buffer_done;
0627
0628 scnprintf(tmp, sizeof(tmp),
0629 "LS: Xmt %010x Cmpl %010x Abort %08x\n",
0630 atomic_read(&lport->fc4NvmeLsRequests),
0631 atomic_read(&lport->fc4NvmeLsCmpls),
0632 atomic_read(&lport->xmt_ls_abort));
0633 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0634 goto buffer_done;
0635
0636 scnprintf(tmp, sizeof(tmp),
0637 "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
0638 atomic_read(&lport->xmt_ls_err),
0639 atomic_read(&lport->cmpl_ls_xb),
0640 atomic_read(&lport->cmpl_ls_err));
0641 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0642 goto buffer_done;
0643
0644 totin = 0;
0645 totout = 0;
0646 for (i = 0; i < phba->cfg_hdw_queue; i++) {
0647 cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
0648 tot = cstat->io_cmpls;
0649 totin += tot;
0650 data1 = cstat->input_requests;
0651 data2 = cstat->output_requests;
0652 data3 = cstat->control_requests;
0653 totout += (data1 + data2 + data3);
0654 }
0655 scnprintf(tmp, sizeof(tmp),
0656 "Total FCP Cmpl %016llx Issue %016llx "
0657 "OutIO %016llx\n",
0658 totin, totout, totout - totin);
0659 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0660 goto buffer_done;
0661
0662 scnprintf(tmp, sizeof(tmp),
0663 "\tabort %08x noxri %08x nondlp %08x qdepth %08x "
0664 "wqerr %08x err %08x\n",
0665 atomic_read(&lport->xmt_fcp_abort),
0666 atomic_read(&lport->xmt_fcp_noxri),
0667 atomic_read(&lport->xmt_fcp_bad_ndlp),
0668 atomic_read(&lport->xmt_fcp_qdepth),
0669 atomic_read(&lport->xmt_fcp_wqerr),
0670 atomic_read(&lport->xmt_fcp_err));
0671 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0672 goto buffer_done;
0673
0674 scnprintf(tmp, sizeof(tmp),
0675 "FCP CMPL: xb %08x Err %08x\n",
0676 atomic_read(&lport->cmpl_fcp_xb),
0677 atomic_read(&lport->cmpl_fcp_err));
0678 strlcat(buf, tmp, PAGE_SIZE);
0679
0680
0681 goto buffer_done;
0682
0683 unlock_buf_done:
0684 spin_unlock_irq(shost->host_lock);
0685
0686 buffer_done:
0687 len = strnlen(buf, PAGE_SIZE);
0688
0689 if (unlikely(len >= (PAGE_SIZE - 1))) {
0690 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
0691 "6314 Catching potential buffer "
0692 "overflow > PAGE_SIZE = %lu bytes\n",
0693 PAGE_SIZE);
0694 strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
0695 LPFC_INFO_MORE_STR,
0696 sizeof(LPFC_INFO_MORE_STR) + 1);
0697 }
0698
0699 return len;
0700 }
0701
0702 static ssize_t
0703 lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr,
0704 char *buf)
0705 {
0706 struct Scsi_Host *shost = class_to_shost(dev);
0707 struct lpfc_vport *vport = shost_priv(shost);
0708 struct lpfc_hba *phba = vport->phba;
0709 int len;
0710 struct lpfc_fc4_ctrl_stat *cstat;
0711 u64 data1, data2, data3;
0712 u64 tot, totin, totout;
0713 int i;
0714 char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
0715
0716 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
0717 (phba->sli_rev != LPFC_SLI_REV4))
0718 return 0;
0719
0720 scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n");
0721
0722 totin = 0;
0723 totout = 0;
0724 for (i = 0; i < phba->cfg_hdw_queue; i++) {
0725 cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
0726 tot = cstat->io_cmpls;
0727 totin += tot;
0728 data1 = cstat->input_requests;
0729 data2 = cstat->output_requests;
0730 data3 = cstat->control_requests;
0731 totout += (data1 + data2 + data3);
0732
0733 scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
0734 "IO %016llx ", i, data1, data2, data3);
0735 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0736 goto buffer_done;
0737
0738 scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
0739 tot, ((data1 + data2 + data3) - tot));
0740 if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
0741 goto buffer_done;
0742 }
0743 scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
0744 "OutIO %016llx\n", totin, totout, totout - totin);
0745 strlcat(buf, tmp, PAGE_SIZE);
0746
0747 buffer_done:
0748 len = strnlen(buf, PAGE_SIZE);
0749
0750 return len;
0751 }
0752
0753 static ssize_t
0754 lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
0755 char *buf)
0756 {
0757 struct Scsi_Host *shost = class_to_shost(dev);
0758 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
0759 struct lpfc_hba *phba = vport->phba;
0760
0761 if (phba->cfg_enable_bg) {
0762 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
0763 return scnprintf(buf, PAGE_SIZE,
0764 "BlockGuard Enabled\n");
0765 else
0766 return scnprintf(buf, PAGE_SIZE,
0767 "BlockGuard Not Supported\n");
0768 } else
0769 return scnprintf(buf, PAGE_SIZE,
0770 "BlockGuard Disabled\n");
0771 }
0772
0773 static ssize_t
0774 lpfc_bg_guard_err_show(struct device *dev, struct device_attribute *attr,
0775 char *buf)
0776 {
0777 struct Scsi_Host *shost = class_to_shost(dev);
0778 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
0779 struct lpfc_hba *phba = vport->phba;
0780
0781 return scnprintf(buf, PAGE_SIZE, "%llu\n",
0782 (unsigned long long)phba->bg_guard_err_cnt);
0783 }
0784
0785 static ssize_t
0786 lpfc_bg_apptag_err_show(struct device *dev, struct device_attribute *attr,
0787 char *buf)
0788 {
0789 struct Scsi_Host *shost = class_to_shost(dev);
0790 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
0791 struct lpfc_hba *phba = vport->phba;
0792
0793 return scnprintf(buf, PAGE_SIZE, "%llu\n",
0794 (unsigned long long)phba->bg_apptag_err_cnt);
0795 }
0796
0797 static ssize_t
0798 lpfc_bg_reftag_err_show(struct device *dev, struct device_attribute *attr,
0799 char *buf)
0800 {
0801 struct Scsi_Host *shost = class_to_shost(dev);
0802 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
0803 struct lpfc_hba *phba = vport->phba;
0804
0805 return scnprintf(buf, PAGE_SIZE, "%llu\n",
0806 (unsigned long long)phba->bg_reftag_err_cnt);
0807 }
0808
0809
0810
0811
0812
0813
0814
0815
0816
0817 static ssize_t
0818 lpfc_info_show(struct device *dev, struct device_attribute *attr,
0819 char *buf)
0820 {
0821 struct Scsi_Host *host = class_to_shost(dev);
0822
0823 return scnprintf(buf, PAGE_SIZE, "%s\n", lpfc_info(host));
0824 }
0825
0826
0827
0828
0829
0830
0831
0832
0833
0834 static ssize_t
0835 lpfc_serialnum_show(struct device *dev, struct device_attribute *attr,
0836 char *buf)
0837 {
0838 struct Scsi_Host *shost = class_to_shost(dev);
0839 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
0840 struct lpfc_hba *phba = vport->phba;
0841
0842 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->SerialNumber);
0843 }
0844
0845
0846
0847
0848
0849
0850
0851
0852
0853
0854
0855
0856
0857 static ssize_t
0858 lpfc_temp_sensor_show(struct device *dev, struct device_attribute *attr,
0859 char *buf)
0860 {
0861 struct Scsi_Host *shost = class_to_shost(dev);
0862 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
0863 struct lpfc_hba *phba = vport->phba;
0864 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->temp_sensor_support);
0865 }
0866
0867
0868
0869
0870
0871
0872
0873
0874
0875 static ssize_t
0876 lpfc_modeldesc_show(struct device *dev, struct device_attribute *attr,
0877 char *buf)
0878 {
0879 struct Scsi_Host *shost = class_to_shost(dev);
0880 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
0881 struct lpfc_hba *phba = vport->phba;
0882
0883 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelDesc);
0884 }
0885
0886
0887
0888
0889
0890
0891
0892
0893
0894 static ssize_t
0895 lpfc_modelname_show(struct device *dev, struct device_attribute *attr,
0896 char *buf)
0897 {
0898 struct Scsi_Host *shost = class_to_shost(dev);
0899 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
0900 struct lpfc_hba *phba = vport->phba;
0901
0902 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ModelName);
0903 }
0904
0905
0906
0907
0908
0909
0910
0911
0912
0913 static ssize_t
0914 lpfc_programtype_show(struct device *dev, struct device_attribute *attr,
0915 char *buf)
0916 {
0917 struct Scsi_Host *shost = class_to_shost(dev);
0918 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
0919 struct lpfc_hba *phba = vport->phba;
0920
0921 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->ProgramType);
0922 }
0923
0924
0925
0926
0927
0928
0929
0930
0931
0932 static ssize_t
0933 lpfc_vportnum_show(struct device *dev, struct device_attribute *attr,
0934 char *buf)
0935 {
0936 struct Scsi_Host *shost = class_to_shost(dev);
0937 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
0938 struct lpfc_hba *phba = vport->phba;
0939
0940 return scnprintf(buf, PAGE_SIZE, "%s\n", phba->Port);
0941 }
0942
0943
0944
0945
0946
0947
0948
0949
0950
0951 static ssize_t
0952 lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
0953 char *buf)
0954 {
0955 struct Scsi_Host *shost = class_to_shost(dev);
0956 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
0957 struct lpfc_hba *phba = vport->phba;
0958 uint32_t if_type;
0959 uint8_t sli_family;
0960 char fwrev[FW_REV_STR_SIZE];
0961 int len;
0962
0963 lpfc_decode_firmware_rev(phba, fwrev, 1);
0964 if_type = phba->sli4_hba.pc_sli4_params.if_type;
0965 sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
0966
0967 if (phba->sli_rev < LPFC_SLI_REV4)
0968 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
0969 fwrev, phba->sli_rev);
0970 else
0971 len = scnprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
0972 fwrev, phba->sli_rev, if_type, sli_family);
0973
0974 return len;
0975 }
0976
0977
0978
0979
0980
0981
0982
0983
0984
0985 static ssize_t
0986 lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf)
0987 {
0988 char hdw[9];
0989 struct Scsi_Host *shost = class_to_shost(dev);
0990 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
0991 struct lpfc_hba *phba = vport->phba;
0992 lpfc_vpd_t *vp = &phba->vpd;
0993
0994 lpfc_jedec_to_ascii(vp->rev.biuRev, hdw);
0995 return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw,
0996 vp->rev.smRev, vp->rev.smFwRev);
0997 }
0998
0999
1000
1001
1002
1003
1004
1005
1006
1007 static ssize_t
1008 lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
1009 char *buf)
1010 {
1011 struct Scsi_Host *shost = class_to_shost(dev);
1012 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1013 struct lpfc_hba *phba = vport->phba;
1014 char fwrev[FW_REV_STR_SIZE];
1015
1016 if (phba->sli_rev < LPFC_SLI_REV4)
1017 return scnprintf(buf, PAGE_SIZE, "%s\n",
1018 phba->OptionROMVersion);
1019
1020 lpfc_decode_firmware_rev(phba, fwrev, 1);
1021 return scnprintf(buf, PAGE_SIZE, "%s\n", fwrev);
1022 }
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 static ssize_t
1036 lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
1037 char *buf)
1038 {
1039 struct Scsi_Host *shost = class_to_shost(dev);
1040 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1041 struct lpfc_hba *phba = vport->phba;
1042 int len = 0;
1043
1044 switch (phba->link_state) {
1045 case LPFC_LINK_UNKNOWN:
1046 case LPFC_WARM_START:
1047 case LPFC_INIT_START:
1048 case LPFC_INIT_MBX_CMDS:
1049 case LPFC_LINK_DOWN:
1050 case LPFC_HBA_ERROR:
1051 if (phba->hba_flag & LINK_DISABLED)
1052 len += scnprintf(buf + len, PAGE_SIZE-len,
1053 "Link Down - User disabled\n");
1054 else
1055 len += scnprintf(buf + len, PAGE_SIZE-len,
1056 "Link Down\n");
1057 break;
1058 case LPFC_LINK_UP:
1059 case LPFC_CLEAR_LA:
1060 case LPFC_HBA_READY:
1061 len += scnprintf(buf + len, PAGE_SIZE-len, "Link Up - ");
1062
1063 switch (vport->port_state) {
1064 case LPFC_LOCAL_CFG_LINK:
1065 len += scnprintf(buf + len, PAGE_SIZE-len,
1066 "Configuring Link\n");
1067 break;
1068 case LPFC_FDISC:
1069 case LPFC_FLOGI:
1070 case LPFC_FABRIC_CFG_LINK:
1071 case LPFC_NS_REG:
1072 case LPFC_NS_QRY:
1073 case LPFC_BUILD_DISC_LIST:
1074 case LPFC_DISC_AUTH:
1075 len += scnprintf(buf + len, PAGE_SIZE - len,
1076 "Discovery\n");
1077 break;
1078 case LPFC_VPORT_READY:
1079 len += scnprintf(buf + len, PAGE_SIZE - len,
1080 "Ready\n");
1081 break;
1082
1083 case LPFC_VPORT_FAILED:
1084 len += scnprintf(buf + len, PAGE_SIZE - len,
1085 "Failed\n");
1086 break;
1087
1088 case LPFC_VPORT_UNKNOWN:
1089 len += scnprintf(buf + len, PAGE_SIZE - len,
1090 "Unknown\n");
1091 break;
1092 }
1093 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
1094 if (vport->fc_flag & FC_PUBLIC_LOOP)
1095 len += scnprintf(buf + len, PAGE_SIZE-len,
1096 " Public Loop\n");
1097 else
1098 len += scnprintf(buf + len, PAGE_SIZE-len,
1099 " Private Loop\n");
1100 } else {
1101 if (vport->fc_flag & FC_FABRIC) {
1102 if (phba->sli_rev == LPFC_SLI_REV4 &&
1103 vport->port_type == LPFC_PHYSICAL_PORT &&
1104 phba->sli4_hba.fawwpn_flag &
1105 LPFC_FAWWPN_FABRIC)
1106 len += scnprintf(buf + len,
1107 PAGE_SIZE - len,
1108 " Fabric FA-PWWN\n");
1109 else
1110 len += scnprintf(buf + len,
1111 PAGE_SIZE - len,
1112 " Fabric\n");
1113 } else {
1114 len += scnprintf(buf + len, PAGE_SIZE-len,
1115 " Point-2-Point\n");
1116 }
1117 }
1118 }
1119
1120 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1121 ((bf_get(lpfc_sli_intf_if_type,
1122 &phba->sli4_hba.sli_intf) ==
1123 LPFC_SLI_INTF_IF_TYPE_6))) {
1124 struct lpfc_trunk_link link = phba->trunk_link;
1125
1126 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
1127 len += scnprintf(buf + len, PAGE_SIZE - len,
1128 "Trunk port 0: Link %s %s\n",
1129 (link.link0.state == LPFC_LINK_UP) ?
1130 "Up" : "Down. ",
1131 trunk_errmsg[link.link0.fault]);
1132
1133 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
1134 len += scnprintf(buf + len, PAGE_SIZE - len,
1135 "Trunk port 1: Link %s %s\n",
1136 (link.link1.state == LPFC_LINK_UP) ?
1137 "Up" : "Down. ",
1138 trunk_errmsg[link.link1.fault]);
1139
1140 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
1141 len += scnprintf(buf + len, PAGE_SIZE - len,
1142 "Trunk port 2: Link %s %s\n",
1143 (link.link2.state == LPFC_LINK_UP) ?
1144 "Up" : "Down. ",
1145 trunk_errmsg[link.link2.fault]);
1146
1147 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
1148 len += scnprintf(buf + len, PAGE_SIZE - len,
1149 "Trunk port 3: Link %s %s\n",
1150 (link.link3.state == LPFC_LINK_UP) ?
1151 "Up" : "Down. ",
1152 trunk_errmsg[link.link3.fault]);
1153
1154 }
1155
1156 return len;
1157 }
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167 static ssize_t
1168 lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
1169 char *buf)
1170 {
1171 struct Scsi_Host *shost = class_to_shost(dev);
1172 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1173 struct lpfc_hba *phba = vport->phba;
1174
1175 if (phba->sli_rev < LPFC_SLI_REV4)
1176 return scnprintf(buf, PAGE_SIZE, "fc\n");
1177
1178 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
1179 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
1180 return scnprintf(buf, PAGE_SIZE, "fcoe\n");
1181 if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
1182 return scnprintf(buf, PAGE_SIZE, "fc\n");
1183 }
1184 return scnprintf(buf, PAGE_SIZE, "unknown\n");
1185 }
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196 static ssize_t
1197 lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr,
1198 char *buf)
1199 {
1200 struct Scsi_Host *shost = class_to_shost(dev);
1201 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
1202 struct lpfc_hba *phba = vport->phba;
1203
1204 return scnprintf(buf, PAGE_SIZE, "%d\n",
1205 phba->sli4_hba.pc_sli4_params.oas_supported);
1206 }
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220 static ssize_t
1221 lpfc_link_state_store(struct device *dev, struct device_attribute *attr,
1222 const char *buf, size_t count)
1223 {
1224 struct Scsi_Host *shost = class_to_shost(dev);
1225 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1226 struct lpfc_hba *phba = vport->phba;
1227
1228 int status = -EINVAL;
1229
1230 if ((strncmp(buf, "up", sizeof("up") - 1) == 0) &&
1231 (phba->link_state == LPFC_LINK_DOWN))
1232 status = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
1233 else if ((strncmp(buf, "down", sizeof("down") - 1) == 0) &&
1234 (phba->link_state >= LPFC_LINK_UP))
1235 status = phba->lpfc_hba_down_link(phba, MBX_NOWAIT);
1236
1237 if (status == 0)
1238 return strlen(buf);
1239 else
1240 return status;
1241 }
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 static ssize_t
1256 lpfc_num_discovered_ports_show(struct device *dev,
1257 struct device_attribute *attr, char *buf)
1258 {
1259 struct Scsi_Host *shost = class_to_shost(dev);
1260 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1261
1262 return scnprintf(buf, PAGE_SIZE, "%d\n",
1263 vport->fc_map_cnt + vport->fc_unmap_cnt);
1264 }
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280 static int
1281 lpfc_issue_lip(struct Scsi_Host *shost)
1282 {
1283 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1284 struct lpfc_hba *phba = vport->phba;
1285 LPFC_MBOXQ_t *pmboxq;
1286 int mbxstatus = MBXERR_ERROR;
1287
1288
1289
1290
1291
1292 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
1293 (phba->hba_flag & LINK_DISABLED) ||
1294 (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO))
1295 return -EPERM;
1296
1297 pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
1298
1299 if (!pmboxq)
1300 return -ENOMEM;
1301
1302 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1303 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1304 pmboxq->u.mb.mbxOwner = OWN_HOST;
1305
1306 if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
1307 vport->fc_flag &= ~FC_PT2PT_NO_NVME;
1308
1309 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
1310
1311 if ((mbxstatus == MBX_SUCCESS) &&
1312 (pmboxq->u.mb.mbxStatus == 0 ||
1313 pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
1314 memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
1315 lpfc_init_link(phba, pmboxq, phba->cfg_topology,
1316 phba->cfg_link_speed);
1317 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1318 phba->fc_ratov * 2);
1319 if ((mbxstatus == MBX_SUCCESS) &&
1320 (pmboxq->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
1321 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1322 "2859 SLI authentication is required "
1323 "for INIT_LINK but has not done yet\n");
1324 }
1325
1326 lpfc_set_loopback_flag(phba);
1327 if (mbxstatus != MBX_TIMEOUT)
1328 mempool_free(pmboxq, phba->mbox_mem_pool);
1329
1330 if (mbxstatus == MBXERR_ERROR)
1331 return -EIO;
1332
1333 return 0;
1334 }
1335
1336 int
1337 lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
1338 {
1339 int cnt = 0;
1340
1341 spin_lock_irq(lock);
1342 while (!list_empty(q)) {
1343 spin_unlock_irq(lock);
1344 msleep(20);
1345 if (cnt++ > 250) {
1346 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1347 "0466 Outstanding IO when "
1348 "bringing Adapter offline\n");
1349 return 0;
1350 }
1351 spin_lock_irq(lock);
1352 }
1353 spin_unlock_irq(lock);
1354 return 1;
1355 }
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 static int
1373 lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
1374 {
1375 struct completion online_compl;
1376 struct lpfc_queue *qp = NULL;
1377 struct lpfc_sli_ring *pring;
1378 struct lpfc_sli *psli;
1379 int status = 0;
1380 int i;
1381 int rc;
1382
1383 init_completion(&online_compl);
1384 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1385 LPFC_EVT_OFFLINE_PREP);
1386 if (rc == 0)
1387 return -ENOMEM;
1388
1389 wait_for_completion(&online_compl);
1390
1391 if (status != 0)
1392 return -EIO;
1393
1394 psli = &phba->sli;
1395
1396
1397
1398
1399
1400
1401 spin_lock_irq(&phba->hbalock);
1402 if (!(psli->sli_flag & LPFC_QUEUE_FREE_INIT)) {
1403 psli->sli_flag |= LPFC_QUEUE_FREE_WAIT;
1404 } else {
1405 spin_unlock_irq(&phba->hbalock);
1406 goto skip_wait;
1407 }
1408 spin_unlock_irq(&phba->hbalock);
1409
1410
1411
1412
1413 if (phba->sli_rev != LPFC_SLI_REV4) {
1414 for (i = 0; i < psli->num_rings; i++) {
1415 pring = &psli->sli3_ring[i];
1416 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1417 &phba->hbalock))
1418 goto out;
1419 }
1420 } else {
1421 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1422 pring = qp->pring;
1423 if (!pring)
1424 continue;
1425 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1426 &pring->ring_lock))
1427 goto out;
1428 }
1429 }
1430 out:
1431 spin_lock_irq(&phba->hbalock);
1432 psli->sli_flag &= ~LPFC_QUEUE_FREE_WAIT;
1433 spin_unlock_irq(&phba->hbalock);
1434
1435 skip_wait:
1436 init_completion(&online_compl);
1437 rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
1438 if (rc == 0)
1439 return -ENOMEM;
1440
1441 wait_for_completion(&online_compl);
1442
1443 if (status != 0)
1444 return -EIO;
1445
1446 return 0;
1447 }
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469 static int
1470 lpfc_reset_pci_bus(struct lpfc_hba *phba)
1471 {
1472 struct pci_dev *pdev = phba->pcidev;
1473 struct Scsi_Host *shost = NULL;
1474 struct lpfc_hba *phba_other = NULL;
1475 struct pci_dev *ptr = NULL;
1476 int res;
1477
1478 if (phba->cfg_enable_hba_reset != 2)
1479 return -ENOTSUPP;
1480
1481 if (!pdev) {
1482 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8345 pdev NULL!\n");
1483 return -ENODEV;
1484 }
1485
1486 res = lpfc_check_pci_resettable(phba);
1487 if (res)
1488 return res;
1489
1490
1491 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
1492
1493 shost = pci_get_drvdata(ptr);
1494 if (shost) {
1495 phba_other =
1496 ((struct lpfc_vport *)shost->hostdata)->phba;
1497 if (!(phba_other->pport->fc_flag & FC_OFFLINE_MODE)) {
1498 lpfc_printf_log(phba_other, KERN_INFO, LOG_INIT,
1499 "8349 WWPN = 0x%02x%02x%02x%02x"
1500 "%02x%02x%02x%02x is not "
1501 "offline!\n",
1502 phba_other->wwpn[0],
1503 phba_other->wwpn[1],
1504 phba_other->wwpn[2],
1505 phba_other->wwpn[3],
1506 phba_other->wwpn[4],
1507 phba_other->wwpn[5],
1508 phba_other->wwpn[6],
1509 phba_other->wwpn[7]);
1510 return -EBUSY;
1511 }
1512 }
1513 }
1514
1515
1516 res = pci_reset_bus(pdev);
1517 if (res) {
1518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1519 "8350 PCI reset bus failed: %d\n", res);
1520 }
1521
1522 return res;
1523 }
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 int
1543 lpfc_selective_reset(struct lpfc_hba *phba)
1544 {
1545 struct completion online_compl;
1546 int status = 0;
1547 int rc;
1548
1549 if (!phba->cfg_enable_hba_reset)
1550 return -EACCES;
1551
1552 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
1553 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1554
1555 if (status != 0)
1556 return status;
1557 }
1558
1559 init_completion(&online_compl);
1560 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1561 LPFC_EVT_ONLINE);
1562 if (rc == 0)
1563 return -ENOMEM;
1564
1565 wait_for_completion(&online_compl);
1566
1567 if (status != 0)
1568 return -EIO;
1569
1570 return 0;
1571 }
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594 static ssize_t
1595 lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
1596 const char *buf, size_t count)
1597 {
1598 struct Scsi_Host *shost = class_to_shost(dev);
1599 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1600 struct lpfc_hba *phba = vport->phba;
1601 int status = -EINVAL;
1602
1603 if (!phba->cfg_enable_hba_reset)
1604 return -EACCES;
1605
1606 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
1607 status = phba->lpfc_selective_reset(phba);
1608
1609 if (status == 0)
1610 return strlen(buf);
1611 else
1612 return status;
1613 }
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631 int
1632 lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
1633 {
1634 struct lpfc_register portstat_reg = {0};
1635 int i;
1636
1637 msleep(100);
1638 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1639 &portstat_reg.word0))
1640 return -EIO;
1641
1642
1643 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
1644 !bf_get(lpfc_sliport_status_err, &portstat_reg))
1645 return -EPERM;
1646
1647
1648 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
1649 msleep(10);
1650 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
1651 &portstat_reg.word0))
1652 continue;
1653 if (!bf_get(lpfc_sliport_status_err, &portstat_reg))
1654 continue;
1655 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg))
1656 continue;
1657 if (!bf_get(lpfc_sliport_status_rdy, &portstat_reg))
1658 continue;
1659 break;
1660 }
1661
1662 if (i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT)
1663 return 0;
1664 else
1665 return -EIO;
1666 }
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680 static ssize_t
1681 lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
1682 {
1683 struct completion online_compl;
1684 struct pci_dev *pdev = phba->pcidev;
1685 uint32_t before_fc_flag;
1686 uint32_t sriov_nr_virtfn;
1687 uint32_t reg_val;
1688 int status = 0, rc = 0;
1689 int job_posted = 1, sriov_err;
1690
1691 if (!phba->cfg_enable_hba_reset)
1692 return -EACCES;
1693
1694 if ((phba->sli_rev < LPFC_SLI_REV4) ||
1695 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
1696 LPFC_SLI_INTF_IF_TYPE_2))
1697 return -EPERM;
1698
1699
1700 before_fc_flag = phba->pport->fc_flag;
1701 sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
1702
1703 if (opcode == LPFC_FW_DUMP) {
1704 init_completion(&online_compl);
1705 phba->fw_dump_cmpl = &online_compl;
1706 } else {
1707
1708 if (phba->cfg_sriov_nr_virtfn) {
1709 pci_disable_sriov(pdev);
1710 phba->cfg_sriov_nr_virtfn = 0;
1711 }
1712
1713 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1714
1715 if (status != 0)
1716 return status;
1717
1718
1719 msleep(100);
1720 }
1721
1722 reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
1723 LPFC_CTL_PDEV_CTL_OFFSET);
1724
1725 if (opcode == LPFC_FW_DUMP)
1726 reg_val |= LPFC_FW_DUMP_REQUEST;
1727 else if (opcode == LPFC_FW_RESET)
1728 reg_val |= LPFC_CTL_PDEV_CTL_FRST;
1729 else if (opcode == LPFC_DV_RESET)
1730 reg_val |= LPFC_CTL_PDEV_CTL_DRST;
1731
1732 writel(reg_val, phba->sli4_hba.conf_regs_memmap_p +
1733 LPFC_CTL_PDEV_CTL_OFFSET);
1734
1735 readl(phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
1736
1737
1738 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1739
1740 if (rc == -EPERM) {
1741
1742 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1743 "3150 No privilege to perform the requested "
1744 "access: x%x\n", reg_val);
1745 } else if (rc == -EIO) {
1746
1747 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1748 "3153 Fail to perform the requested "
1749 "access: x%x\n", reg_val);
1750 if (phba->fw_dump_cmpl)
1751 phba->fw_dump_cmpl = NULL;
1752 return rc;
1753 }
1754
1755
1756 if (before_fc_flag & FC_OFFLINE_MODE) {
1757 if (phba->fw_dump_cmpl)
1758 phba->fw_dump_cmpl = NULL;
1759 goto out;
1760 }
1761
1762
1763
1764
1765
1766 if (opcode == LPFC_FW_DUMP) {
1767 wait_for_completion(phba->fw_dump_cmpl);
1768 } else {
1769 init_completion(&online_compl);
1770 job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
1771 LPFC_EVT_ONLINE);
1772 if (!job_posted)
1773 goto out;
1774
1775 wait_for_completion(&online_compl);
1776 }
1777 out:
1778
1779 if (sriov_nr_virtfn) {
1780
1781 if (opcode == LPFC_FW_DUMP) {
1782 pci_disable_sriov(pdev);
1783 phba->cfg_sriov_nr_virtfn = 0;
1784 }
1785
1786 sriov_err =
1787 lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
1788 if (!sriov_err)
1789 phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
1790 }
1791
1792
1793 if (!rc) {
1794 if (!job_posted)
1795 rc = -ENOMEM;
1796 else if (status)
1797 rc = -EIO;
1798 }
1799 return rc;
1800 }
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810 static ssize_t
1811 lpfc_nport_evt_cnt_show(struct device *dev, struct device_attribute *attr,
1812 char *buf)
1813 {
1814 struct Scsi_Host *shost = class_to_shost(dev);
1815 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1816 struct lpfc_hba *phba = vport->phba;
1817
1818 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->nport_event_cnt);
1819 }
1820
1821 static int
1822 lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out)
1823 {
1824 LPFC_MBOXQ_t *mbox = NULL;
1825 unsigned long val = 0;
1826 char *pval = NULL;
1827 int rc = 0;
1828
1829 if (!strncmp("enable", buff_out,
1830 strlen("enable"))) {
1831 pval = buff_out + strlen("enable") + 1;
1832 rc = kstrtoul(pval, 0, &val);
1833 if (rc)
1834 return rc;
1835 } else if (!strncmp("disable", buff_out,
1836 strlen("disable"))) {
1837 val = 0;
1838 } else {
1839 return -EINVAL;
1840 }
1841
1842 switch (val) {
1843 case 0:
1844 val = 0x0;
1845 break;
1846 case 2:
1847 val = 0x1;
1848 break;
1849 case 4:
1850 val = 0x2;
1851 break;
1852 default:
1853 return -EINVAL;
1854 }
1855
1856 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1857 "0070 Set trunk mode with val %ld ", val);
1858
1859 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1860 if (!mbox)
1861 return -ENOMEM;
1862
1863 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1864 LPFC_MBOX_OPCODE_FCOE_FC_SET_TRUNK_MODE,
1865 12, LPFC_SLI4_MBX_EMBED);
1866
1867 bf_set(lpfc_mbx_set_trunk_mode,
1868 &mbox->u.mqe.un.set_trunk_mode,
1869 val);
1870 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1871 if (rc)
1872 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1873 "0071 Set trunk mode failed with status: %d",
1874 rc);
1875 mempool_free(mbox, phba->mbox_mem_pool);
1876
1877 return 0;
1878 }
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888 static ssize_t
1889 lpfc_board_mode_show(struct device *dev, struct device_attribute *attr,
1890 char *buf)
1891 {
1892 struct Scsi_Host *shost = class_to_shost(dev);
1893 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1894 struct lpfc_hba *phba = vport->phba;
1895 char * state;
1896
1897 if (phba->link_state == LPFC_HBA_ERROR)
1898 state = "error";
1899 else if (phba->link_state == LPFC_WARM_START)
1900 state = "warm start";
1901 else if (phba->link_state == LPFC_INIT_START)
1902 state = "offline";
1903 else
1904 state = "online";
1905
1906 return scnprintf(buf, PAGE_SIZE, "%s\n", state);
1907 }
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922 static ssize_t
1923 lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
1924 const char *buf, size_t count)
1925 {
1926 struct Scsi_Host *shost = class_to_shost(dev);
1927 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1928 struct lpfc_hba *phba = vport->phba;
1929 struct completion online_compl;
1930 char *board_mode_str = NULL;
1931 int status = 0;
1932 int rc;
1933
1934 if (!phba->cfg_enable_hba_reset) {
1935 status = -EACCES;
1936 goto board_mode_out;
1937 }
1938
1939 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1940 "3050 lpfc_board_mode set to %s\n", buf);
1941
1942 init_completion(&online_compl);
1943
1944 if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
1945 rc = lpfc_workq_post_event(phba, &status, &online_compl,
1946 LPFC_EVT_ONLINE);
1947 if (rc == 0) {
1948 status = -ENOMEM;
1949 goto board_mode_out;
1950 }
1951 wait_for_completion(&online_compl);
1952 if (status)
1953 status = -EIO;
1954 } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
1955 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
1956 else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
1957 if (phba->sli_rev == LPFC_SLI_REV4)
1958 status = -EINVAL;
1959 else
1960 status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
1961 else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
1962 if (phba->sli_rev == LPFC_SLI_REV4)
1963 status = -EINVAL;
1964 else
1965 status = lpfc_do_offline(phba, LPFC_EVT_KILL);
1966 else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
1967 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_DUMP);
1968 else if (strncmp(buf, "fw_reset", sizeof("fw_reset") - 1) == 0)
1969 status = lpfc_sli4_pdev_reg_request(phba, LPFC_FW_RESET);
1970 else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
1971 status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
1972 else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1)
1973 == 0)
1974 status = lpfc_reset_pci_bus(phba);
1975 else if (strncmp(buf, "heartbeat", sizeof("heartbeat") - 1) == 0)
1976 lpfc_issue_hb_tmo(phba);
1977 else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0)
1978 status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk"));
1979 else
1980 status = -EINVAL;
1981
1982 board_mode_out:
1983 if (!status)
1984 return strlen(buf);
1985 else {
1986 board_mode_str = strchr(buf, '\n');
1987 if (board_mode_str)
1988 *board_mode_str = '\0';
1989 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
1990 "3097 Failed \"%s\", status(%d), "
1991 "fc_flag(x%x)\n",
1992 buf, status, phba->pport->fc_flag);
1993 return status;
1994 }
1995 }
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015 static int
2016 lpfc_get_hba_info(struct lpfc_hba *phba,
2017 uint32_t *mxri, uint32_t *axri,
2018 uint32_t *mrpi, uint32_t *arpi,
2019 uint32_t *mvpi, uint32_t *avpi)
2020 {
2021 struct lpfc_mbx_read_config *rd_config;
2022 LPFC_MBOXQ_t *pmboxq;
2023 MAILBOX_t *pmb;
2024 int rc = 0;
2025 uint32_t max_vpi;
2026
2027
2028
2029
2030
2031 if (phba->link_state < LPFC_LINK_DOWN ||
2032 !phba->mbox_mem_pool ||
2033 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
2034 return 0;
2035
2036 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
2037 return 0;
2038
2039 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2040 if (!pmboxq)
2041 return 0;
2042 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
2043
2044 pmb = &pmboxq->u.mb;
2045 pmb->mbxCommand = MBX_READ_CONFIG;
2046 pmb->mbxOwner = OWN_HOST;
2047 pmboxq->ctx_buf = NULL;
2048
2049 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2050 rc = MBX_NOT_FINISHED;
2051 else
2052 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
2053
2054 if (rc != MBX_SUCCESS) {
2055 if (rc != MBX_TIMEOUT)
2056 mempool_free(pmboxq, phba->mbox_mem_pool);
2057 return 0;
2058 }
2059
2060 if (phba->sli_rev == LPFC_SLI_REV4) {
2061 rd_config = &pmboxq->u.mqe.un.rd_config;
2062 if (mrpi)
2063 *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
2064 if (arpi)
2065 *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
2066 phba->sli4_hba.max_cfg_param.rpi_used;
2067 if (mxri)
2068 *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
2069 if (axri)
2070 *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
2071 phba->sli4_hba.max_cfg_param.xri_used;
2072
2073
2074
2075
2076 max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
2077 (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
2078
2079
2080 if (max_vpi > LPFC_MAX_VPI)
2081 max_vpi = LPFC_MAX_VPI;
2082 if (mvpi)
2083 *mvpi = max_vpi;
2084 if (avpi)
2085 *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
2086 } else {
2087 if (mrpi)
2088 *mrpi = pmb->un.varRdConfig.max_rpi;
2089 if (arpi)
2090 *arpi = pmb->un.varRdConfig.avail_rpi;
2091 if (mxri)
2092 *mxri = pmb->un.varRdConfig.max_xri;
2093 if (axri)
2094 *axri = pmb->un.varRdConfig.avail_xri;
2095 if (mvpi)
2096 *mvpi = pmb->un.varRdConfig.max_vpi;
2097 if (avpi) {
2098
2099 if (phba->link_state == LPFC_HBA_READY)
2100 *avpi = pmb->un.varRdConfig.avail_vpi;
2101 else
2102 *avpi = pmb->un.varRdConfig.max_vpi;
2103 }
2104 }
2105
2106 mempool_free(pmboxq, phba->mbox_mem_pool);
2107 return 1;
2108 }
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124 static ssize_t
2125 lpfc_max_rpi_show(struct device *dev, struct device_attribute *attr,
2126 char *buf)
2127 {
2128 struct Scsi_Host *shost = class_to_shost(dev);
2129 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2130 struct lpfc_hba *phba = vport->phba;
2131 uint32_t cnt;
2132
2133 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, NULL, NULL, NULL))
2134 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2135 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2136 }
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152 static ssize_t
2153 lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
2154 char *buf)
2155 {
2156 struct Scsi_Host *shost = class_to_shost(dev);
2157 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2158 struct lpfc_hba *phba = vport->phba;
2159 uint32_t cnt, acnt;
2160
2161 if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
2162 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2163 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2164 }
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180 static ssize_t
2181 lpfc_max_xri_show(struct device *dev, struct device_attribute *attr,
2182 char *buf)
2183 {
2184 struct Scsi_Host *shost = class_to_shost(dev);
2185 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2186 struct lpfc_hba *phba = vport->phba;
2187 uint32_t cnt;
2188
2189 if (lpfc_get_hba_info(phba, &cnt, NULL, NULL, NULL, NULL, NULL))
2190 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2191 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2192 }
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208 static ssize_t
2209 lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
2210 char *buf)
2211 {
2212 struct Scsi_Host *shost = class_to_shost(dev);
2213 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2214 struct lpfc_hba *phba = vport->phba;
2215 uint32_t cnt, acnt;
2216
2217 if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
2218 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2219 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2220 }
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236 static ssize_t
2237 lpfc_max_vpi_show(struct device *dev, struct device_attribute *attr,
2238 char *buf)
2239 {
2240 struct Scsi_Host *shost = class_to_shost(dev);
2241 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2242 struct lpfc_hba *phba = vport->phba;
2243 uint32_t cnt;
2244
2245 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, NULL))
2246 return scnprintf(buf, PAGE_SIZE, "%d\n", cnt);
2247 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2248 }
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264 static ssize_t
2265 lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
2266 char *buf)
2267 {
2268 struct Scsi_Host *shost = class_to_shost(dev);
2269 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2270 struct lpfc_hba *phba = vport->phba;
2271 uint32_t cnt, acnt;
2272
2273 if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
2274 return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
2275 return scnprintf(buf, PAGE_SIZE, "Unknown\n");
2276 }
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291 static ssize_t
2292 lpfc_npiv_info_show(struct device *dev, struct device_attribute *attr,
2293 char *buf)
2294 {
2295 struct Scsi_Host *shost = class_to_shost(dev);
2296 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2297 struct lpfc_hba *phba = vport->phba;
2298
2299 if (!(phba->max_vpi))
2300 return scnprintf(buf, PAGE_SIZE, "NPIV Not Supported\n");
2301 if (vport->port_type == LPFC_PHYSICAL_PORT)
2302 return scnprintf(buf, PAGE_SIZE, "NPIV Physical\n");
2303 return scnprintf(buf, PAGE_SIZE, "NPIV Virtual (VPI %d)\n", vport->vpi);
2304 }
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317 static ssize_t
2318 lpfc_poll_show(struct device *dev, struct device_attribute *attr,
2319 char *buf)
2320 {
2321 struct Scsi_Host *shost = class_to_shost(dev);
2322 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2323 struct lpfc_hba *phba = vport->phba;
2324
2325 return scnprintf(buf, PAGE_SIZE, "%#x\n", phba->cfg_poll);
2326 }
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342 static ssize_t
2343 lpfc_poll_store(struct device *dev, struct device_attribute *attr,
2344 const char *buf, size_t count)
2345 {
2346 struct Scsi_Host *shost = class_to_shost(dev);
2347 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2348 struct lpfc_hba *phba = vport->phba;
2349 uint32_t creg_val;
2350 uint32_t old_val;
2351 int val=0;
2352
2353 if (!isdigit(buf[0]))
2354 return -EINVAL;
2355
2356 if (sscanf(buf, "%i", &val) != 1)
2357 return -EINVAL;
2358
2359 if ((val & 0x3) != val)
2360 return -EINVAL;
2361
2362 if (phba->sli_rev == LPFC_SLI_REV4)
2363 val = 0;
2364
2365 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
2366 "3051 lpfc_poll changed from %d to %d\n",
2367 phba->cfg_poll, val);
2368
2369 spin_lock_irq(&phba->hbalock);
2370
2371 old_val = phba->cfg_poll;
2372
2373 if (val & ENABLE_FCP_RING_POLLING) {
2374 if ((val & DISABLE_FCP_RING_INT) &&
2375 !(old_val & DISABLE_FCP_RING_INT)) {
2376 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2377 spin_unlock_irq(&phba->hbalock);
2378 return -EINVAL;
2379 }
2380 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
2381 writel(creg_val, phba->HCregaddr);
2382 readl(phba->HCregaddr);
2383
2384 lpfc_poll_start_timer(phba);
2385 }
2386 } else if (val != 0x0) {
2387 spin_unlock_irq(&phba->hbalock);
2388 return -EINVAL;
2389 }
2390
2391 if (!(val & DISABLE_FCP_RING_INT) &&
2392 (old_val & DISABLE_FCP_RING_INT))
2393 {
2394 spin_unlock_irq(&phba->hbalock);
2395 del_timer(&phba->fcp_poll_timer);
2396 spin_lock_irq(&phba->hbalock);
2397 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
2398 spin_unlock_irq(&phba->hbalock);
2399 return -EINVAL;
2400 }
2401 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
2402 writel(creg_val, phba->HCregaddr);
2403 readl(phba->HCregaddr);
2404 }
2405
2406 phba->cfg_poll = val;
2407
2408 spin_unlock_irq(&phba->hbalock);
2409
2410 return strlen(buf);
2411 }
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425 static ssize_t
2426 lpfc_sriov_hw_max_virtfn_show(struct device *dev,
2427 struct device_attribute *attr,
2428 char *buf)
2429 {
2430 struct Scsi_Host *shost = class_to_shost(dev);
2431 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2432 struct lpfc_hba *phba = vport->phba;
2433 uint16_t max_nr_virtfn;
2434
2435 max_nr_virtfn = lpfc_sli_sriov_nr_virtfn_get(phba);
2436 return scnprintf(buf, PAGE_SIZE, "%d\n", max_nr_virtfn);
2437 }
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453 static ssize_t
2454 lpfc_enable_bbcr_set(struct lpfc_hba *phba, uint val)
2455 {
2456 if (lpfc_rangecheck(val, 0, 1) && phba->sli_rev == LPFC_SLI_REV4) {
2457 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2458 "3068 lpfc_enable_bbcr changed from %d to "
2459 "%d\n", phba->cfg_enable_bbcr, val);
2460 phba->cfg_enable_bbcr = val;
2461 return 0;
2462 }
2463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2464 "0451 lpfc_enable_bbcr cannot set to %d, range is 0, "
2465 "1\n", val);
2466 return -EINVAL;
2467 }
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483 #define lpfc_param_show(attr) \
2484 static ssize_t \
2485 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2486 char *buf) \
2487 { \
2488 struct Scsi_Host *shost = class_to_shost(dev);\
2489 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2490 struct lpfc_hba *phba = vport->phba;\
2491 return scnprintf(buf, PAGE_SIZE, "%d\n",\
2492 phba->cfg_##attr);\
2493 }
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509 #define lpfc_param_hex_show(attr) \
2510 static ssize_t \
2511 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2512 char *buf) \
2513 { \
2514 struct Scsi_Host *shost = class_to_shost(dev);\
2515 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2516 struct lpfc_hba *phba = vport->phba;\
2517 uint val = 0;\
2518 val = phba->cfg_##attr;\
2519 return scnprintf(buf, PAGE_SIZE, "%#x\n",\
2520 phba->cfg_##attr);\
2521 }
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542 #define lpfc_param_init(attr, default, minval, maxval) \
2543 static int \
2544 lpfc_##attr##_init(struct lpfc_hba *phba, uint val) \
2545 { \
2546 if (lpfc_rangecheck(val, minval, maxval)) {\
2547 phba->cfg_##attr = val;\
2548 return 0;\
2549 }\
2550 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2551 "0449 lpfc_"#attr" attribute cannot be set to %d, "\
2552 "allowed range is ["#minval", "#maxval"]\n", val); \
2553 phba->cfg_##attr = default;\
2554 return -EINVAL;\
2555 }
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577 #define lpfc_param_set(attr, default, minval, maxval) \
2578 static int \
2579 lpfc_##attr##_set(struct lpfc_hba *phba, uint val) \
2580 { \
2581 if (lpfc_rangecheck(val, minval, maxval)) {\
2582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2583 "3052 lpfc_" #attr " changed from %d to %d\n", \
2584 phba->cfg_##attr, val); \
2585 phba->cfg_##attr = val;\
2586 return 0;\
2587 }\
2588 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, \
2589 "0450 lpfc_"#attr" attribute cannot be set to %d, "\
2590 "allowed range is ["#minval", "#maxval"]\n", val); \
2591 return -EINVAL;\
2592 }
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615 #define lpfc_param_store(attr) \
2616 static ssize_t \
2617 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2618 const char *buf, size_t count) \
2619 { \
2620 struct Scsi_Host *shost = class_to_shost(dev);\
2621 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2622 struct lpfc_hba *phba = vport->phba;\
2623 uint val = 0;\
2624 if (!isdigit(buf[0]))\
2625 return -EINVAL;\
2626 if (sscanf(buf, "%i", &val) != 1)\
2627 return -EINVAL;\
2628 if (lpfc_##attr##_set(phba, val) == 0) \
2629 return strlen(buf);\
2630 else \
2631 return -EINVAL;\
2632 }
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648 #define lpfc_vport_param_show(attr) \
2649 static ssize_t \
2650 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2651 char *buf) \
2652 { \
2653 struct Scsi_Host *shost = class_to_shost(dev);\
2654 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2655 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
2656 }
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673 #define lpfc_vport_param_hex_show(attr) \
2674 static ssize_t \
2675 lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
2676 char *buf) \
2677 { \
2678 struct Scsi_Host *shost = class_to_shost(dev);\
2679 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2680 return scnprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
2681 }
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701 #define lpfc_vport_param_init(attr, default, minval, maxval) \
2702 static int \
2703 lpfc_##attr##_init(struct lpfc_vport *vport, uint val) \
2704 { \
2705 if (lpfc_rangecheck(val, minval, maxval)) {\
2706 vport->cfg_##attr = val;\
2707 return 0;\
2708 }\
2709 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2710 "0423 lpfc_"#attr" attribute cannot be set to %d, "\
2711 "allowed range is ["#minval", "#maxval"]\n", val); \
2712 vport->cfg_##attr = default;\
2713 return -EINVAL;\
2714 }
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733 #define lpfc_vport_param_set(attr, default, minval, maxval) \
2734 static int \
2735 lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \
2736 { \
2737 if (lpfc_rangecheck(val, minval, maxval)) {\
2738 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2739 "3053 lpfc_" #attr \
2740 " changed from %d (x%x) to %d (x%x)\n", \
2741 vport->cfg_##attr, vport->cfg_##attr, \
2742 val, val); \
2743 vport->cfg_##attr = val;\
2744 return 0;\
2745 }\
2746 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \
2747 "0424 lpfc_"#attr" attribute cannot be set to %d, "\
2748 "allowed range is ["#minval", "#maxval"]\n", val); \
2749 return -EINVAL;\
2750 }
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769 #define lpfc_vport_param_store(attr) \
2770 static ssize_t \
2771 lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
2772 const char *buf, size_t count) \
2773 { \
2774 struct Scsi_Host *shost = class_to_shost(dev);\
2775 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
2776 uint val = 0;\
2777 if (!isdigit(buf[0]))\
2778 return -EINVAL;\
2779 if (sscanf(buf, "%i", &val) != 1)\
2780 return -EINVAL;\
2781 if (lpfc_##attr##_set(vport, val) == 0) \
2782 return strlen(buf);\
2783 else \
2784 return -EINVAL;\
2785 }
2786
2787
2788 static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL);
2789 static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL);
2790 static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
2791 static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
2792 static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
2793 static DEVICE_ATTR(bg_reftag_err, S_IRUGO, lpfc_bg_reftag_err_show, NULL);
2794 static DEVICE_ATTR(info, S_IRUGO, lpfc_info_show, NULL);
2795 static DEVICE_ATTR(serialnum, S_IRUGO, lpfc_serialnum_show, NULL);
2796 static DEVICE_ATTR(modeldesc, S_IRUGO, lpfc_modeldesc_show, NULL);
2797 static DEVICE_ATTR(modelname, S_IRUGO, lpfc_modelname_show, NULL);
2798 static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
2799 static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
2800 static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
2801 static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
2802 static DEVICE_ATTR(link_state, S_IRUGO | S_IWUSR, lpfc_link_state_show,
2803 lpfc_link_state_store);
2804 static DEVICE_ATTR(option_rom_version, S_IRUGO,
2805 lpfc_option_rom_version_show, NULL);
2806 static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
2807 lpfc_num_discovered_ports_show, NULL);
2808 static DEVICE_ATTR(nport_evt_cnt, S_IRUGO, lpfc_nport_evt_cnt_show, NULL);
2809 static DEVICE_ATTR_RO(lpfc_drvr_version);
2810 static DEVICE_ATTR_RO(lpfc_enable_fip);
2811 static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR,
2812 lpfc_board_mode_show, lpfc_board_mode_store);
2813 static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset);
2814 static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL);
2815 static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL);
2816 static DEVICE_ATTR(max_rpi, S_IRUGO, lpfc_max_rpi_show, NULL);
2817 static DEVICE_ATTR(used_rpi, S_IRUGO, lpfc_used_rpi_show, NULL);
2818 static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
2819 static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
2820 static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
2821 static DEVICE_ATTR_RO(lpfc_temp_sensor);
2822 static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
2823 static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
2824 static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
2825 NULL);
2826 static DEVICE_ATTR(cmf_info, 0444, lpfc_cmf_info_show, NULL);
2827
2828 #define WWN_SZ 8
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839 static size_t
2840 lpfc_wwn_set(const char *buf, size_t cnt, char wwn[])
2841 {
2842 unsigned int i, j;
2843
2844
2845 if (buf[cnt-1] == '\n')
2846 cnt--;
2847
2848 if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) ||
2849 ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x'))))
2850 return -EINVAL;
2851
2852 memset(wwn, 0, WWN_SZ);
2853
2854
2855 for (i = 0, j = 0; i < 16; i++) {
2856 if ((*buf >= 'a') && (*buf <= 'f'))
2857 j = ((j << 4) | ((*buf++ - 'a') + 10));
2858 else if ((*buf >= 'A') && (*buf <= 'F'))
2859 j = ((j << 4) | ((*buf++ - 'A') + 10));
2860 else if ((*buf >= '0') && (*buf <= '9'))
2861 j = ((j << 4) | (*buf++ - '0'));
2862 else
2863 return -EINVAL;
2864 if (i % 2) {
2865 wwn[i/2] = j & 0xff;
2866 j = 0;
2867 }
2868 }
2869 return 0;
2870 }
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883 static ssize_t
2884 lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr,
2885 char *buf)
2886 {
2887 struct Scsi_Host *shost = class_to_shost(dev);
2888 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2889
2890 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
2891 wwn_to_u64(phba->cfg_oas_tgt_wwpn));
2892 }
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907 static ssize_t
2908 lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr,
2909 const char *buf, size_t count)
2910 {
2911 struct Scsi_Host *shost = class_to_shost(dev);
2912 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2913 unsigned int cnt = count;
2914 uint8_t wwpn[WWN_SZ];
2915 int rc;
2916
2917 if (!phba->cfg_fof)
2918 return -EPERM;
2919
2920
2921 if (buf[cnt-1] == '\n')
2922 cnt--;
2923
2924 rc = lpfc_wwn_set(buf, cnt, wwpn);
2925 if (rc)
2926 return rc;
2927
2928 memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2929 memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t)));
2930 if (wwn_to_u64(wwpn) == 0)
2931 phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET;
2932 else
2933 phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET;
2934 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
2935 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
2936 return count;
2937 }
2938 static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR,
2939 lpfc_oas_tgt_show, lpfc_oas_tgt_store);
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951 static ssize_t
2952 lpfc_oas_priority_show(struct device *dev, struct device_attribute *attr,
2953 char *buf)
2954 {
2955 struct Scsi_Host *shost = class_to_shost(dev);
2956 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2957
2958 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_priority);
2959 }
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974 static ssize_t
2975 lpfc_oas_priority_store(struct device *dev, struct device_attribute *attr,
2976 const char *buf, size_t count)
2977 {
2978 struct Scsi_Host *shost = class_to_shost(dev);
2979 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
2980 unsigned int cnt = count;
2981 unsigned long val;
2982 int ret;
2983
2984 if (!phba->cfg_fof)
2985 return -EPERM;
2986
2987
2988 if (buf[cnt-1] == '\n')
2989 cnt--;
2990
2991 ret = kstrtoul(buf, 0, &val);
2992 if (ret || (val > 0x7f))
2993 return -EINVAL;
2994
2995 if (val)
2996 phba->cfg_oas_priority = (uint8_t)val;
2997 else
2998 phba->cfg_oas_priority = phba->cfg_XLanePriority;
2999 return count;
3000 }
3001 static DEVICE_ATTR(lpfc_xlane_priority, S_IRUGO | S_IWUSR,
3002 lpfc_oas_priority_show, lpfc_oas_priority_store);
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014 static ssize_t
3015 lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr,
3016 char *buf)
3017 {
3018 struct Scsi_Host *shost = class_to_shost(dev);
3019 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3020
3021 return scnprintf(buf, PAGE_SIZE, "0x%llx\n",
3022 wwn_to_u64(phba->cfg_oas_vpt_wwpn));
3023 }
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038 static ssize_t
3039 lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr,
3040 const char *buf, size_t count)
3041 {
3042 struct Scsi_Host *shost = class_to_shost(dev);
3043 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3044 unsigned int cnt = count;
3045 uint8_t wwpn[WWN_SZ];
3046 int rc;
3047
3048 if (!phba->cfg_fof)
3049 return -EPERM;
3050
3051
3052 if (buf[cnt-1] == '\n')
3053 cnt--;
3054
3055 rc = lpfc_wwn_set(buf, cnt, wwpn);
3056 if (rc)
3057 return rc;
3058
3059 memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3060 memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t)));
3061 if (wwn_to_u64(wwpn) == 0)
3062 phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT;
3063 else
3064 phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT;
3065 phba->cfg_oas_flags &= ~OAS_LUN_VALID;
3066 if (phba->cfg_oas_priority == 0)
3067 phba->cfg_oas_priority = phba->cfg_XLanePriority;
3068 phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN;
3069 return count;
3070 }
3071 static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR,
3072 lpfc_oas_vpt_show, lpfc_oas_vpt_store);
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085 static ssize_t
3086 lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr,
3087 char *buf)
3088 {
3089 struct Scsi_Host *shost = class_to_shost(dev);
3090 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3091
3092 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state);
3093 }
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109 static ssize_t
3110 lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr,
3111 const char *buf, size_t count)
3112 {
3113 struct Scsi_Host *shost = class_to_shost(dev);
3114 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3115 int val = 0;
3116
3117 if (!phba->cfg_fof)
3118 return -EPERM;
3119
3120 if (!isdigit(buf[0]))
3121 return -EINVAL;
3122
3123 if (sscanf(buf, "%i", &val) != 1)
3124 return -EINVAL;
3125
3126 if ((val != 0) && (val != 1))
3127 return -EINVAL;
3128
3129 phba->cfg_oas_lun_state = val;
3130 return strlen(buf);
3131 }
3132 static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR,
3133 lpfc_oas_lun_state_show, lpfc_oas_lun_state_store);
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146 static ssize_t
3147 lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr,
3148 char *buf)
3149 {
3150 struct Scsi_Host *shost = class_to_shost(dev);
3151 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3152
3153 if (!(phba->cfg_oas_flags & OAS_LUN_VALID))
3154 return -EFAULT;
3155
3156 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status);
3157 }
3158 static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO,
3159 lpfc_oas_lun_status_show, NULL);
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177 static size_t
3178 lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3179 uint8_t tgt_wwpn[], uint64_t lun,
3180 uint32_t oas_state, uint8_t pri)
3181 {
3182
3183 int rc = 0;
3184
3185 if (!phba->cfg_fof)
3186 return -EPERM;
3187
3188 if (oas_state) {
3189 if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3190 (struct lpfc_name *)tgt_wwpn,
3191 lun, pri))
3192 rc = -ENOMEM;
3193 } else {
3194 lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn,
3195 (struct lpfc_name *)tgt_wwpn, lun, pri);
3196 }
3197 return rc;
3198
3199 }
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218 static uint64_t
3219 lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3220 uint8_t tgt_wwpn[], uint32_t *lun_status,
3221 uint32_t *lun_pri)
3222 {
3223 uint64_t found_lun;
3224
3225 if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn)
3226 return NOT_OAS_ENABLED_LUN;
3227 if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *)
3228 phba->sli4_hba.oas_next_vpt_wwpn,
3229 (struct lpfc_name *)
3230 phba->sli4_hba.oas_next_tgt_wwpn,
3231 &phba->sli4_hba.oas_next_lun,
3232 (struct lpfc_name *)vpt_wwpn,
3233 (struct lpfc_name *)tgt_wwpn,
3234 &found_lun, lun_status, lun_pri))
3235 return found_lun;
3236 else
3237 return NOT_OAS_ENABLED_LUN;
3238 }
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257 static ssize_t
3258 lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[],
3259 uint8_t tgt_wwpn[], uint64_t lun,
3260 uint32_t oas_state, uint8_t pri)
3261 {
3262
3263 int rc;
3264
3265 rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun,
3266 oas_state, pri);
3267 return rc;
3268 }
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284 static ssize_t
3285 lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr,
3286 char *buf)
3287 {
3288 struct Scsi_Host *shost = class_to_shost(dev);
3289 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3290
3291 uint64_t oas_lun;
3292 int len = 0;
3293
3294 if (!phba->cfg_fof)
3295 return -EPERM;
3296
3297 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3298 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT))
3299 return -EFAULT;
3300
3301 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3302 if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET))
3303 return -EFAULT;
3304
3305 oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn,
3306 phba->cfg_oas_tgt_wwpn,
3307 &phba->cfg_oas_lun_status,
3308 &phba->cfg_oas_priority);
3309 if (oas_lun != NOT_OAS_ENABLED_LUN)
3310 phba->cfg_oas_flags |= OAS_LUN_VALID;
3311
3312 len += scnprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun);
3313
3314 return len;
3315 }
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333 static ssize_t
3334 lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
3335 const char *buf, size_t count)
3336 {
3337 struct Scsi_Host *shost = class_to_shost(dev);
3338 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3339 uint64_t scsi_lun;
3340 uint32_t pri;
3341 ssize_t rc;
3342
3343 if (!phba->cfg_fof)
3344 return -EPERM;
3345
3346 if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0)
3347 return -EFAULT;
3348
3349 if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0)
3350 return -EFAULT;
3351
3352 if (!isdigit(buf[0]))
3353 return -EINVAL;
3354
3355 if (sscanf(buf, "0x%llx", &scsi_lun) != 1)
3356 return -EINVAL;
3357
3358 pri = phba->cfg_oas_priority;
3359 if (pri == 0)
3360 pri = phba->cfg_XLanePriority;
3361
3362 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3363 "3372 Try to set vport 0x%llx target 0x%llx lun:0x%llx "
3364 "priority 0x%x with oas state %d\n",
3365 wwn_to_u64(phba->cfg_oas_vpt_wwpn),
3366 wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun,
3367 pri, phba->cfg_oas_lun_state);
3368
3369 rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn,
3370 phba->cfg_oas_tgt_wwpn, scsi_lun,
3371 phba->cfg_oas_lun_state, pri);
3372 if (rc)
3373 return rc;
3374
3375 return count;
3376 }
3377 static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
3378 lpfc_oas_lun_show, lpfc_oas_lun_store);
3379
3380 int lpfc_enable_nvmet_cnt;
3381 unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = {
3382 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3383 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3384 module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444);
3385 MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target");
3386
3387 static int lpfc_poll = 0;
3388 module_param(lpfc_poll, int, S_IRUGO);
3389 MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
3390 " 0 - none,"
3391 " 1 - poll with interrupts enabled"
3392 " 3 - poll and disable FCP ring interrupts");
3393
3394 static DEVICE_ATTR_RW(lpfc_poll);
3395
3396 int lpfc_no_hba_reset_cnt;
3397 unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
3398 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3399 module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
3400 MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
3401
3402 LPFC_ATTR(sli_mode, 3, 3, 3,
3403 "SLI mode selector: 3 - select SLI-3");
3404
3405 LPFC_ATTR_R(enable_npiv, 1, 0, 1,
3406 "Enable NPIV functionality");
3407
3408 LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
3409 "FCF Fast failover=1 Priority failover=2");
3410
3411
3412
3413
3414
3415
3416
3417
3418 LPFC_ATTR_R(fcp_wait_abts_rsp, 0, 0, 1, "Wait for FCP ABTS completion");
3419
3420
3421
3422
3423
3424
3425
3426 LPFC_ATTR_R(enable_rrq, 2, 0, 2,
3427 "Enable RRQ functionality");
3428
3429
3430
3431
3432
3433
3434
3435
3436 LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
3437 LPFC_DELAY_INIT_LINK_INDEFINITELY,
3438 "Suppress Link Up at initialization");
3439
3440 static ssize_t
3441 lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf)
3442 {
3443 struct Scsi_Host *shost = class_to_shost(dev);
3444 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3445
3446 return scnprintf(buf, PAGE_SIZE, "%d\n",
3447 phba->sli4_hba.pc_sli4_params.pls);
3448 }
3449 static DEVICE_ATTR(pls, 0444,
3450 lpfc_pls_show, NULL);
3451
3452 static ssize_t
3453 lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf)
3454 {
3455 struct Scsi_Host *shost = class_to_shost(dev);
3456 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3457
3458 return scnprintf(buf, PAGE_SIZE, "%d\n",
3459 (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0);
3460 }
3461 static DEVICE_ATTR(pt, 0444,
3462 lpfc_pt_show, NULL);
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472 static ssize_t
3473 lpfc_iocb_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3474 {
3475 struct Scsi_Host *shost = class_to_shost(dev);
3476 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3477
3478 return scnprintf(buf, PAGE_SIZE, "%d\n", phba->iocb_max);
3479 }
3480
3481 static DEVICE_ATTR(iocb_hw, S_IRUGO,
3482 lpfc_iocb_hw_show, NULL);
3483 static ssize_t
3484 lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
3485 {
3486 struct Scsi_Host *shost = class_to_shost(dev);
3487 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3488 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3489
3490 return scnprintf(buf, PAGE_SIZE, "%d\n",
3491 pring ? pring->txq_max : 0);
3492 }
3493
3494 static DEVICE_ATTR(txq_hw, S_IRUGO,
3495 lpfc_txq_hw_show, NULL);
3496 static ssize_t
3497 lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
3498 char *buf)
3499 {
3500 struct Scsi_Host *shost = class_to_shost(dev);
3501 struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
3502 struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
3503
3504 return scnprintf(buf, PAGE_SIZE, "%d\n",
3505 pring ? pring->txcmplq_max : 0);
3506 }
3507
3508 static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
3509 lpfc_txcmplq_hw_show, NULL);
3510
3511
3512
3513
3514
3515 static int lpfc_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3516 static int lpfc_devloss_tmo = LPFC_DEF_DEVLOSS_TMO;
3517 module_param(lpfc_nodev_tmo, int, 0);
3518 MODULE_PARM_DESC(lpfc_nodev_tmo,
3519 "Seconds driver will hold I/O waiting "
3520 "for a device to come back");
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530 static ssize_t
3531 lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr,
3532 char *buf)
3533 {
3534 struct Scsi_Host *shost = class_to_shost(dev);
3535 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3536
3537 return scnprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo);
3538 }
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555 static int
3556 lpfc_nodev_tmo_init(struct lpfc_vport *vport, int val)
3557 {
3558 if (vport->cfg_devloss_tmo != LPFC_DEF_DEVLOSS_TMO) {
3559 vport->cfg_nodev_tmo = vport->cfg_devloss_tmo;
3560 if (val != LPFC_DEF_DEVLOSS_TMO)
3561 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3562 "0407 Ignoring lpfc_nodev_tmo module "
3563 "parameter because lpfc_devloss_tmo "
3564 "is set.\n");
3565 return 0;
3566 }
3567
3568 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3569 vport->cfg_nodev_tmo = val;
3570 vport->cfg_devloss_tmo = val;
3571 return 0;
3572 }
3573 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3574 "0400 lpfc_nodev_tmo attribute cannot be set to"
3575 " %d, allowed range is [%d, %d]\n",
3576 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3577 vport->cfg_nodev_tmo = LPFC_DEF_DEVLOSS_TMO;
3578 return -EINVAL;
3579 }
3580
3581
3582
3583
3584
3585
3586
3587
3588 static void
3589 lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3590 {
3591 struct Scsi_Host *shost;
3592 struct lpfc_nodelist *ndlp;
3593 #if (IS_ENABLED(CONFIG_NVME_FC))
3594 struct lpfc_nvme_rport *rport;
3595 struct nvme_fc_remote_port *remoteport = NULL;
3596 #endif
3597
3598 shost = lpfc_shost_from_vport(vport);
3599 spin_lock_irq(shost->host_lock);
3600 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3601 if (ndlp->rport)
3602 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3603 #if (IS_ENABLED(CONFIG_NVME_FC))
3604 spin_lock(&ndlp->lock);
3605 rport = lpfc_ndlp_get_nrport(ndlp);
3606 if (rport)
3607 remoteport = rport->remoteport;
3608 spin_unlock(&ndlp->lock);
3609 if (rport && remoteport)
3610 nvme_fc_set_remoteport_devloss(remoteport,
3611 vport->cfg_devloss_tmo);
3612 #endif
3613 }
3614 spin_unlock_irq(shost->host_lock);
3615 }
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632 static int
3633 lpfc_nodev_tmo_set(struct lpfc_vport *vport, int val)
3634 {
3635 if (vport->dev_loss_tmo_changed ||
3636 (lpfc_devloss_tmo != LPFC_DEF_DEVLOSS_TMO)) {
3637 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3638 "0401 Ignoring change to lpfc_nodev_tmo "
3639 "because lpfc_devloss_tmo is set.\n");
3640 return 0;
3641 }
3642 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3643 vport->cfg_nodev_tmo = val;
3644 vport->cfg_devloss_tmo = val;
3645
3646
3647
3648
3649 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3650 lpfc_update_rport_devloss_tmo(vport);
3651 return 0;
3652 }
3653 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3654 "0403 lpfc_nodev_tmo attribute cannot be set to "
3655 "%d, allowed range is [%d, %d]\n",
3656 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3657 return -EINVAL;
3658 }
3659
3660 lpfc_vport_param_store(nodev_tmo)
3661
3662 static DEVICE_ATTR_RW(lpfc_nodev_tmo);
3663
3664
3665
3666
3667
3668
3669 module_param(lpfc_devloss_tmo, int, S_IRUGO);
3670 MODULE_PARM_DESC(lpfc_devloss_tmo,
3671 "Seconds driver will hold I/O waiting "
3672 "for a device to come back");
3673 lpfc_vport_param_init(devloss_tmo, LPFC_DEF_DEVLOSS_TMO,
3674 LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO)
3675 lpfc_vport_param_show(devloss_tmo)
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691 static int
3692 lpfc_devloss_tmo_set(struct lpfc_vport *vport, int val)
3693 {
3694 if (val >= LPFC_MIN_DEVLOSS_TMO && val <= LPFC_MAX_DEVLOSS_TMO) {
3695 vport->cfg_nodev_tmo = val;
3696 vport->cfg_devloss_tmo = val;
3697 vport->dev_loss_tmo_changed = 1;
3698 fc_host_dev_loss_tmo(lpfc_shost_from_vport(vport)) = val;
3699 lpfc_update_rport_devloss_tmo(vport);
3700 return 0;
3701 }
3702
3703 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3704 "0404 lpfc_devloss_tmo attribute cannot be set to "
3705 "%d, allowed range is [%d, %d]\n",
3706 val, LPFC_MIN_DEVLOSS_TMO, LPFC_MAX_DEVLOSS_TMO);
3707 return -EINVAL;
3708 }
3709
3710 lpfc_vport_param_store(devloss_tmo)
3711 static DEVICE_ATTR_RW(lpfc_devloss_tmo);
3712
3713
3714
3715
3716
3717
3718
3719 LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
3720 "Enable suppress rsp feature is firmware supports it");
3721
3722
3723
3724
3725
3726
3727
3728
3729 LPFC_ATTR_R(nvmet_mrq,
3730 LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_AUTO, LPFC_NVMET_MRQ_MAX,
3731 "Specify number of RQ pairs for processing NVMET cmds");
3732
3733
3734
3735
3736
3737 LPFC_ATTR_R(nvmet_mrq_post,
3738 LPFC_NVMET_RQE_DEF_POST, LPFC_NVMET_RQE_MIN_POST,
3739 LPFC_NVMET_RQE_DEF_COUNT,
3740 "Specify number of RQ buffers to initially post");
3741
3742
3743
3744
3745
3746
3747
3748 LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE,
3749 LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE,
3750 "Enable FC4 Protocol support - FCP / NVME");
3751
3752
3753
3754
3755
3756
3757
3758 LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
3759 "Verbose logging bit-mask");
3760
3761
3762
3763
3764
3765 LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
3766 "Deregister nameserver objects before LOGO");
3767
3768
3769
3770
3771
3772 LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512,
3773 "Max number of FCP commands we can queue to a specific LUN");
3774
3775
3776
3777
3778
3779 static uint lpfc_tgt_queue_depth = LPFC_MAX_TGT_QDEPTH;
3780 module_param(lpfc_tgt_queue_depth, uint, 0444);
3781 MODULE_PARM_DESC(lpfc_tgt_queue_depth, "Set max Target queue depth");
3782 lpfc_vport_param_show(tgt_queue_depth);
3783 lpfc_vport_param_init(tgt_queue_depth, LPFC_MAX_TGT_QDEPTH,
3784 LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH);
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797 static int
3798 lpfc_tgt_queue_depth_set(struct lpfc_vport *vport, uint val)
3799 {
3800 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3801 struct lpfc_nodelist *ndlp;
3802
3803 if (!lpfc_rangecheck(val, LPFC_MIN_TGT_QDEPTH, LPFC_MAX_TGT_QDEPTH))
3804 return -EINVAL;
3805
3806 if (val == vport->cfg_tgt_queue_depth)
3807 return 0;
3808
3809 spin_lock_irq(shost->host_lock);
3810 vport->cfg_tgt_queue_depth = val;
3811
3812
3813 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
3814 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
3815
3816 spin_unlock_irq(shost->host_lock);
3817 return 0;
3818 }
3819
3820 lpfc_vport_param_store(tgt_queue_depth);
3821 static DEVICE_ATTR_RW(lpfc_tgt_queue_depth);
3822
3823
3824
3825
3826
3827
3828
3829
3830 LPFC_ATTR_R(hba_queue_depth, 8192, 32, 8192,
3831 "Max number of FCP commands we can queue to a lpfc HBA");
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842 LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
3843 "Allow peer ports on the same physical port to login to each "
3844 "other.");
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857 static int lpfc_restrict_login = 1;
3858 module_param(lpfc_restrict_login, int, S_IRUGO);
3859 MODULE_PARM_DESC(lpfc_restrict_login,
3860 "Restrict virtual ports login to remote initiators.");
3861 lpfc_vport_param_show(restrict_login);
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878 static int
3879 lpfc_restrict_login_init(struct lpfc_vport *vport, int val)
3880 {
3881 if (val < 0 || val > 1) {
3882 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3883 "0422 lpfc_restrict_login attribute cannot "
3884 "be set to %d, allowed range is [0, 1]\n",
3885 val);
3886 vport->cfg_restrict_login = 1;
3887 return -EINVAL;
3888 }
3889 if (vport->port_type == LPFC_PHYSICAL_PORT) {
3890 vport->cfg_restrict_login = 0;
3891 return 0;
3892 }
3893 vport->cfg_restrict_login = val;
3894 return 0;
3895 }
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913 static int
3914 lpfc_restrict_login_set(struct lpfc_vport *vport, int val)
3915 {
3916 if (val < 0 || val > 1) {
3917 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3918 "0425 lpfc_restrict_login attribute cannot "
3919 "be set to %d, allowed range is [0, 1]\n",
3920 val);
3921 vport->cfg_restrict_login = 1;
3922 return -EINVAL;
3923 }
3924 if (vport->port_type == LPFC_PHYSICAL_PORT && val != 0) {
3925 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3926 "0468 lpfc_restrict_login must be 0 for "
3927 "Physical ports.\n");
3928 vport->cfg_restrict_login = 0;
3929 return 0;
3930 }
3931 vport->cfg_restrict_login = val;
3932 return 0;
3933 }
3934 lpfc_vport_param_store(restrict_login);
3935 static DEVICE_ATTR_RW(lpfc_restrict_login);
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954 LPFC_VPORT_ATTR_R(scan_down, 1, 0, 1,
3955 "Start scanning for devices from highest ALPA to lowest");
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968 LPFC_ATTR(topology, 0, 0, 6,
3969 "Select Fibre Channel topology");
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989 static ssize_t
3990 lpfc_topology_store(struct device *dev, struct device_attribute *attr,
3991 const char *buf, size_t count)
3992 {
3993 struct Scsi_Host *shost = class_to_shost(dev);
3994 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3995 struct lpfc_hba *phba = vport->phba;
3996 int val = 0;
3997 int nolip = 0;
3998 const char *val_buf = buf;
3999 int err;
4000 uint32_t prev_val;
4001 u8 sli_family, if_type;
4002
4003 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4004 nolip = 1;
4005 val_buf = &buf[strlen("nolip ")];
4006 }
4007
4008 if (!isdigit(val_buf[0]))
4009 return -EINVAL;
4010 if (sscanf(val_buf, "%i", &val) != 1)
4011 return -EINVAL;
4012
4013 if (val >= 0 && val <= 6) {
4014 prev_val = phba->cfg_topology;
4015 if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
4016 val == 4) {
4017 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4018 "3113 Loop mode not supported at speed %d\n",
4019 val);
4020 return -EINVAL;
4021 }
4022
4023
4024
4025
4026
4027 sli_family = bf_get(lpfc_sli_intf_sli_family,
4028 &phba->sli4_hba.sli_intf);
4029 if_type = bf_get(lpfc_sli_intf_if_type,
4030 &phba->sli4_hba.sli_intf);
4031 if ((phba->hba_flag & HBA_PERSISTENT_TOPO ||
4032 (!phba->sli4_hba.pc_sli4_params.pls &&
4033 (sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
4034 if_type == LPFC_SLI_INTF_IF_TYPE_6))) &&
4035 val == 4) {
4036 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4037 "3114 Loop mode not supported\n");
4038 return -EINVAL;
4039 }
4040 phba->cfg_topology = val;
4041 if (nolip)
4042 return strlen(buf);
4043
4044 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4045 "3054 lpfc_topology changed from %d to %d\n",
4046 prev_val, val);
4047 if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4)
4048 phba->fc_topology_changed = 1;
4049 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4050 if (err) {
4051 phba->cfg_topology = prev_val;
4052 return -EINVAL;
4053 } else
4054 return strlen(buf);
4055 }
4056 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4057 "%d:0467 lpfc_topology attribute cannot be set to %d, "
4058 "allowed range is [0, 6]\n",
4059 phba->brd_no, val);
4060 return -EINVAL;
4061 }
4062
4063 lpfc_param_show(topology)
4064 static DEVICE_ATTR_RW(lpfc_topology);
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077 static ssize_t
4078 lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
4079 char *buf)
4080 {
4081 struct Scsi_Host *shost = class_to_shost(dev);
4082 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4083 if (vport->vport_flag & STATIC_VPORT)
4084 sprintf(buf, "1\n");
4085 else
4086 sprintf(buf, "0\n");
4087
4088 return strlen(buf);
4089 }
4090
4091
4092
4093
4094 static DEVICE_ATTR_RO(lpfc_static_vport);
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116 static ssize_t
4117 lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
4118 const char *buf, size_t count)
4119 {
4120 struct Scsi_Host *shost = class_to_shost(dev);
4121 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4122 struct lpfc_hba *phba = vport->phba;
4123 #define LPFC_MAX_DATA_CTRL_LEN 1024
4124 static char bucket_data[LPFC_MAX_DATA_CTRL_LEN];
4125 unsigned long i;
4126 char *str_ptr, *token;
4127 struct lpfc_vport **vports;
4128 struct Scsi_Host *v_shost;
4129 char *bucket_type_str, *base_str, *step_str;
4130 unsigned long base, step, bucket_type;
4131
4132 if (!strncmp(buf, "setbucket", strlen("setbucket"))) {
4133 if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
4134 return -EINVAL;
4135
4136 strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
4137 str_ptr = &bucket_data[0];
4138
4139 token = strsep(&str_ptr, "\t ");
4140 if (!token)
4141 return -EINVAL;
4142
4143 bucket_type_str = strsep(&str_ptr, "\t ");
4144 if (!bucket_type_str)
4145 return -EINVAL;
4146
4147 if (!strncmp(bucket_type_str, "linear", strlen("linear")))
4148 bucket_type = LPFC_LINEAR_BUCKET;
4149 else if (!strncmp(bucket_type_str, "power2", strlen("power2")))
4150 bucket_type = LPFC_POWER2_BUCKET;
4151 else
4152 return -EINVAL;
4153
4154 base_str = strsep(&str_ptr, "\t ");
4155 if (!base_str)
4156 return -EINVAL;
4157 base = simple_strtoul(base_str, NULL, 0);
4158
4159 step_str = strsep(&str_ptr, "\t ");
4160 if (!step_str)
4161 return -EINVAL;
4162 step = simple_strtoul(step_str, NULL, 0);
4163 if (!step)
4164 return -EINVAL;
4165
4166
4167 vports = lpfc_create_vport_work_array(phba);
4168 if (vports == NULL)
4169 return -ENOMEM;
4170
4171 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4172 v_shost = lpfc_shost_from_vport(vports[i]);
4173 spin_lock_irq(v_shost->host_lock);
4174
4175 vports[i]->stat_data_blocked = 1;
4176 if (vports[i]->stat_data_enabled)
4177 lpfc_vport_reset_stat_data(vports[i]);
4178 spin_unlock_irq(v_shost->host_lock);
4179 }
4180
4181
4182 phba->bucket_type = bucket_type;
4183 phba->bucket_base = base;
4184 phba->bucket_step = step;
4185
4186 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4187 v_shost = lpfc_shost_from_vport(vports[i]);
4188
4189
4190 spin_lock_irq(v_shost->host_lock);
4191 vports[i]->stat_data_blocked = 0;
4192 spin_unlock_irq(v_shost->host_lock);
4193 }
4194 lpfc_destroy_vport_work_array(phba, vports);
4195 return strlen(buf);
4196 }
4197
4198 if (!strncmp(buf, "destroybucket", strlen("destroybucket"))) {
4199 vports = lpfc_create_vport_work_array(phba);
4200 if (vports == NULL)
4201 return -ENOMEM;
4202
4203 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4204 v_shost = lpfc_shost_from_vport(vports[i]);
4205 spin_lock_irq(shost->host_lock);
4206 vports[i]->stat_data_blocked = 1;
4207 lpfc_free_bucket(vport);
4208 vport->stat_data_enabled = 0;
4209 vports[i]->stat_data_blocked = 0;
4210 spin_unlock_irq(shost->host_lock);
4211 }
4212 lpfc_destroy_vport_work_array(phba, vports);
4213 phba->bucket_type = LPFC_NO_BUCKET;
4214 phba->bucket_base = 0;
4215 phba->bucket_step = 0;
4216 return strlen(buf);
4217 }
4218
4219 if (!strncmp(buf, "start", strlen("start"))) {
4220
4221 if (phba->bucket_type == LPFC_NO_BUCKET)
4222 return -EINVAL;
4223 spin_lock_irq(shost->host_lock);
4224 if (vport->stat_data_enabled) {
4225 spin_unlock_irq(shost->host_lock);
4226 return strlen(buf);
4227 }
4228 lpfc_alloc_bucket(vport);
4229 vport->stat_data_enabled = 1;
4230 spin_unlock_irq(shost->host_lock);
4231 return strlen(buf);
4232 }
4233
4234 if (!strncmp(buf, "stop", strlen("stop"))) {
4235 spin_lock_irq(shost->host_lock);
4236 if (vport->stat_data_enabled == 0) {
4237 spin_unlock_irq(shost->host_lock);
4238 return strlen(buf);
4239 }
4240 lpfc_free_bucket(vport);
4241 vport->stat_data_enabled = 0;
4242 spin_unlock_irq(shost->host_lock);
4243 return strlen(buf);
4244 }
4245
4246 if (!strncmp(buf, "reset", strlen("reset"))) {
4247 if ((phba->bucket_type == LPFC_NO_BUCKET)
4248 || !vport->stat_data_enabled)
4249 return strlen(buf);
4250 spin_lock_irq(shost->host_lock);
4251 vport->stat_data_blocked = 1;
4252 lpfc_vport_reset_stat_data(vport);
4253 vport->stat_data_blocked = 0;
4254 spin_unlock_irq(shost->host_lock);
4255 return strlen(buf);
4256 }
4257 return -EINVAL;
4258 }
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271 static ssize_t
4272 lpfc_stat_data_ctrl_show(struct device *dev, struct device_attribute *attr,
4273 char *buf)
4274 {
4275 struct Scsi_Host *shost = class_to_shost(dev);
4276 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4277 struct lpfc_hba *phba = vport->phba;
4278 int index = 0;
4279 int i;
4280 char *bucket_type;
4281 unsigned long bucket_value;
4282
4283 switch (phba->bucket_type) {
4284 case LPFC_LINEAR_BUCKET:
4285 bucket_type = "linear";
4286 break;
4287 case LPFC_POWER2_BUCKET:
4288 bucket_type = "power2";
4289 break;
4290 default:
4291 bucket_type = "No Bucket";
4292 break;
4293 }
4294
4295 sprintf(&buf[index], "Statistical Data enabled :%d, "
4296 "blocked :%d, Bucket type :%s, Bucket base :%d,"
4297 " Bucket step :%d\nLatency Ranges :",
4298 vport->stat_data_enabled, vport->stat_data_blocked,
4299 bucket_type, phba->bucket_base, phba->bucket_step);
4300 index = strlen(buf);
4301 if (phba->bucket_type != LPFC_NO_BUCKET) {
4302 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4303 if (phba->bucket_type == LPFC_LINEAR_BUCKET)
4304 bucket_value = phba->bucket_base +
4305 phba->bucket_step * i;
4306 else
4307 bucket_value = phba->bucket_base +
4308 (1 << i) * phba->bucket_step;
4309
4310 if (index + 10 > PAGE_SIZE)
4311 break;
4312 sprintf(&buf[index], "%08ld ", bucket_value);
4313 index = strlen(buf);
4314 }
4315 }
4316 sprintf(&buf[index], "\n");
4317 return strlen(buf);
4318 }
4319
4320
4321
4322
4323 static DEVICE_ATTR_RW(lpfc_stat_data_ctrl);
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333 #define STAT_DATA_SIZE_PER_TARGET(NUM_BUCKETS) ((NUM_BUCKETS) * 11 + 18)
4334 #define MAX_STAT_DATA_SIZE_PER_TARGET \
4335 STAT_DATA_SIZE_PER_TARGET(LPFC_MAX_BUCKET_COUNT)
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351 static ssize_t
4352 sysfs_drvr_stat_data_read(struct file *filp, struct kobject *kobj,
4353 struct bin_attribute *bin_attr,
4354 char *buf, loff_t off, size_t count)
4355 {
4356 struct device *dev = container_of(kobj, struct device,
4357 kobj);
4358 struct Scsi_Host *shost = class_to_shost(dev);
4359 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4360 struct lpfc_hba *phba = vport->phba;
4361 int i = 0, index = 0;
4362 unsigned long nport_index;
4363 struct lpfc_nodelist *ndlp = NULL;
4364 nport_index = (unsigned long)off /
4365 MAX_STAT_DATA_SIZE_PER_TARGET;
4366
4367 if (!vport->stat_data_enabled || vport->stat_data_blocked
4368 || (phba->bucket_type == LPFC_NO_BUCKET))
4369 return 0;
4370
4371 spin_lock_irq(shost->host_lock);
4372 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4373 if (!ndlp->lat_data)
4374 continue;
4375
4376 if (nport_index > 0) {
4377 nport_index--;
4378 continue;
4379 }
4380
4381 if ((index + MAX_STAT_DATA_SIZE_PER_TARGET)
4382 > count)
4383 break;
4384
4385 if (!ndlp->lat_data)
4386 continue;
4387
4388
4389 sprintf(&buf[index], "%02x%02x%02x%02x%02x%02x%02x%02x:",
4390 ndlp->nlp_portname.u.wwn[0],
4391 ndlp->nlp_portname.u.wwn[1],
4392 ndlp->nlp_portname.u.wwn[2],
4393 ndlp->nlp_portname.u.wwn[3],
4394 ndlp->nlp_portname.u.wwn[4],
4395 ndlp->nlp_portname.u.wwn[5],
4396 ndlp->nlp_portname.u.wwn[6],
4397 ndlp->nlp_portname.u.wwn[7]);
4398
4399 index = strlen(buf);
4400
4401 for (i = 0; i < LPFC_MAX_BUCKET_COUNT; i++) {
4402 sprintf(&buf[index], "%010u,",
4403 ndlp->lat_data[i].cmd_count);
4404 index = strlen(buf);
4405 }
4406 sprintf(&buf[index], "\n");
4407 index = strlen(buf);
4408 }
4409 spin_unlock_irq(shost->host_lock);
4410 return index;
4411 }
4412
4413 static struct bin_attribute sysfs_drvr_stat_data_attr = {
4414 .attr = {
4415 .name = "lpfc_drvr_stat_data",
4416 .mode = S_IRUSR,
4417 },
4418 .size = LPFC_MAX_TARGET * MAX_STAT_DATA_SIZE_PER_TARGET,
4419 .read = sysfs_drvr_stat_data_read,
4420 .write = NULL,
4421 };
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447 static ssize_t
4448 lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
4449 const char *buf, size_t count)
4450 {
4451 struct Scsi_Host *shost = class_to_shost(dev);
4452 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4453 struct lpfc_hba *phba = vport->phba;
4454 int val = LPFC_USER_LINK_SPEED_AUTO;
4455 int nolip = 0;
4456 const char *val_buf = buf;
4457 int err;
4458 uint32_t prev_val, if_type;
4459
4460 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
4461 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2 &&
4462 phba->hba_flag & HBA_FORCED_LINK_SPEED)
4463 return -EPERM;
4464
4465 if (!strncmp(buf, "nolip ", strlen("nolip "))) {
4466 nolip = 1;
4467 val_buf = &buf[strlen("nolip ")];
4468 }
4469
4470 if (!isdigit(val_buf[0]))
4471 return -EINVAL;
4472 if (sscanf(val_buf, "%i", &val) != 1)
4473 return -EINVAL;
4474
4475 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
4476 "3055 lpfc_link_speed changed from %d to %d %s\n",
4477 phba->cfg_link_speed, val, nolip ? "(nolip)" : "(lip)");
4478
4479 if (((val == LPFC_USER_LINK_SPEED_1G) && !(phba->lmt & LMT_1Gb)) ||
4480 ((val == LPFC_USER_LINK_SPEED_2G) && !(phba->lmt & LMT_2Gb)) ||
4481 ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
4482 ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
4483 ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
4484 ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) ||
4485 ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb)) ||
4486 ((val == LPFC_USER_LINK_SPEED_64G) && !(phba->lmt & LMT_64Gb))) {
4487 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4488 "2879 lpfc_link_speed attribute cannot be set "
4489 "to %d. Speed is not supported by this port.\n",
4490 val);
4491 return -EINVAL;
4492 }
4493 if (val >= LPFC_USER_LINK_SPEED_16G &&
4494 phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4496 "3112 lpfc_link_speed attribute cannot be set "
4497 "to %d. Speed is not supported in loop mode.\n",
4498 val);
4499 return -EINVAL;
4500 }
4501
4502 switch (val) {
4503 case LPFC_USER_LINK_SPEED_AUTO:
4504 case LPFC_USER_LINK_SPEED_1G:
4505 case LPFC_USER_LINK_SPEED_2G:
4506 case LPFC_USER_LINK_SPEED_4G:
4507 case LPFC_USER_LINK_SPEED_8G:
4508 case LPFC_USER_LINK_SPEED_16G:
4509 case LPFC_USER_LINK_SPEED_32G:
4510 case LPFC_USER_LINK_SPEED_64G:
4511 prev_val = phba->cfg_link_speed;
4512 phba->cfg_link_speed = val;
4513 if (nolip)
4514 return strlen(buf);
4515
4516 err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport));
4517 if (err) {
4518 phba->cfg_link_speed = prev_val;
4519 return -EINVAL;
4520 }
4521 return strlen(buf);
4522 default:
4523 break;
4524 }
4525
4526 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4527 "0469 lpfc_link_speed attribute cannot be set to %d, "
4528 "allowed values are [%s]\n",
4529 val, LPFC_LINK_SPEED_STRING);
4530 return -EINVAL;
4531
4532 }
4533
4534 static int lpfc_link_speed = 0;
4535 module_param(lpfc_link_speed, int, S_IRUGO);
4536 MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
4537 lpfc_param_show(link_speed)
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555 static int
4556 lpfc_link_speed_init(struct lpfc_hba *phba, int val)
4557 {
4558 if (val >= LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
4559 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4560 "3111 lpfc_link_speed of %d cannot "
4561 "support loop mode, setting topology to default.\n",
4562 val);
4563 phba->cfg_topology = 0;
4564 }
4565
4566 switch (val) {
4567 case LPFC_USER_LINK_SPEED_AUTO:
4568 case LPFC_USER_LINK_SPEED_1G:
4569 case LPFC_USER_LINK_SPEED_2G:
4570 case LPFC_USER_LINK_SPEED_4G:
4571 case LPFC_USER_LINK_SPEED_8G:
4572 case LPFC_USER_LINK_SPEED_16G:
4573 case LPFC_USER_LINK_SPEED_32G:
4574 case LPFC_USER_LINK_SPEED_64G:
4575 phba->cfg_link_speed = val;
4576 return 0;
4577 default:
4578 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4579 "0405 lpfc_link_speed attribute cannot "
4580 "be set to %d, allowed values are "
4581 "["LPFC_LINK_SPEED_STRING"]\n", val);
4582 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
4583 return -EINVAL;
4584 }
4585 }
4586
4587 static DEVICE_ATTR_RW(lpfc_link_speed);
4588
4589
4590
4591
4592
4593
4594
4595 LPFC_ATTR(aer_support, 1, 0, 1,
4596 "Enable PCIe device AER support");
4597 lpfc_param_show(aer_support)
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626 static ssize_t
4627 lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
4628 const char *buf, size_t count)
4629 {
4630 struct Scsi_Host *shost = class_to_shost(dev);
4631 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4632 struct lpfc_hba *phba = vport->phba;
4633 int val = 0, rc = -EINVAL;
4634
4635 if (!isdigit(buf[0]))
4636 return -EINVAL;
4637 if (sscanf(buf, "%i", &val) != 1)
4638 return -EINVAL;
4639
4640 switch (val) {
4641 case 0:
4642 if (phba->hba_flag & HBA_AER_ENABLED) {
4643 rc = pci_disable_pcie_error_reporting(phba->pcidev);
4644 if (!rc) {
4645 spin_lock_irq(&phba->hbalock);
4646 phba->hba_flag &= ~HBA_AER_ENABLED;
4647 spin_unlock_irq(&phba->hbalock);
4648 phba->cfg_aer_support = 0;
4649 rc = strlen(buf);
4650 } else
4651 rc = -EPERM;
4652 } else {
4653 phba->cfg_aer_support = 0;
4654 rc = strlen(buf);
4655 }
4656 break;
4657 case 1:
4658 if (!(phba->hba_flag & HBA_AER_ENABLED)) {
4659 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4660 if (!rc) {
4661 spin_lock_irq(&phba->hbalock);
4662 phba->hba_flag |= HBA_AER_ENABLED;
4663 spin_unlock_irq(&phba->hbalock);
4664 phba->cfg_aer_support = 1;
4665 rc = strlen(buf);
4666 } else
4667 rc = -EPERM;
4668 } else {
4669 phba->cfg_aer_support = 1;
4670 rc = strlen(buf);
4671 }
4672 break;
4673 default:
4674 rc = -EINVAL;
4675 break;
4676 }
4677 return rc;
4678 }
4679
4680 static DEVICE_ATTR_RW(lpfc_aer_support);
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701 static ssize_t
4702 lpfc_aer_cleanup_state(struct device *dev, struct device_attribute *attr,
4703 const char *buf, size_t count)
4704 {
4705 struct Scsi_Host *shost = class_to_shost(dev);
4706 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4707 struct lpfc_hba *phba = vport->phba;
4708 int val, rc = -1;
4709
4710 if (!isdigit(buf[0]))
4711 return -EINVAL;
4712 if (sscanf(buf, "%i", &val) != 1)
4713 return -EINVAL;
4714 if (val != 1)
4715 return -EINVAL;
4716
4717 if (phba->hba_flag & HBA_AER_ENABLED)
4718 rc = pci_aer_clear_nonfatal_status(phba->pcidev);
4719
4720 if (rc == 0)
4721 return strlen(buf);
4722 else
4723 return -EPERM;
4724 }
4725
4726 static DEVICE_ATTR(lpfc_aer_state_cleanup, S_IWUSR, NULL,
4727 lpfc_aer_cleanup_state);
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768 static ssize_t
4769 lpfc_sriov_nr_virtfn_store(struct device *dev, struct device_attribute *attr,
4770 const char *buf, size_t count)
4771 {
4772 struct Scsi_Host *shost = class_to_shost(dev);
4773 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4774 struct lpfc_hba *phba = vport->phba;
4775 struct pci_dev *pdev = phba->pcidev;
4776 int val = 0, rc = -EINVAL;
4777
4778
4779 if (!isdigit(buf[0]))
4780 return -EINVAL;
4781 if (sscanf(buf, "%i", &val) != 1)
4782 return -EINVAL;
4783 if (val < 0)
4784 return -EINVAL;
4785
4786
4787 if (val == 0) {
4788 if (phba->cfg_sriov_nr_virtfn > 0) {
4789 pci_disable_sriov(pdev);
4790 phba->cfg_sriov_nr_virtfn = 0;
4791 }
4792 return strlen(buf);
4793 }
4794
4795
4796 if (phba->cfg_sriov_nr_virtfn > 0) {
4797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4798 "3018 There are %d virtual functions "
4799 "enabled on physical function.\n",
4800 phba->cfg_sriov_nr_virtfn);
4801 return -EEXIST;
4802 }
4803
4804 if (val <= LPFC_MAX_VFN_PER_PFN)
4805 phba->cfg_sriov_nr_virtfn = val;
4806 else {
4807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4808 "3019 Enabling %d virtual functions is not "
4809 "allowed.\n", val);
4810 return -EINVAL;
4811 }
4812
4813 rc = lpfc_sli_probe_sriov_nr_virtfn(phba, phba->cfg_sriov_nr_virtfn);
4814 if (rc) {
4815 phba->cfg_sriov_nr_virtfn = 0;
4816 rc = -EPERM;
4817 } else
4818 rc = strlen(buf);
4819
4820 return rc;
4821 }
4822
4823 LPFC_ATTR(sriov_nr_virtfn, LPFC_DEF_VFN_PER_PFN, 0, LPFC_MAX_VFN_PER_PFN,
4824 "Enable PCIe device SR-IOV virtual fn");
4825
4826 lpfc_param_show(sriov_nr_virtfn)
4827 static DEVICE_ATTR_RW(lpfc_sriov_nr_virtfn);
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844 static ssize_t
4845 lpfc_request_firmware_upgrade_store(struct device *dev,
4846 struct device_attribute *attr,
4847 const char *buf, size_t count)
4848 {
4849 struct Scsi_Host *shost = class_to_shost(dev);
4850 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4851 struct lpfc_hba *phba = vport->phba;
4852 int val = 0, rc;
4853
4854
4855 if (!isdigit(buf[0]))
4856 return -EINVAL;
4857 if (sscanf(buf, "%i", &val) != 1)
4858 return -EINVAL;
4859 if (val != 1)
4860 return -EINVAL;
4861
4862 rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE);
4863 if (rc)
4864 rc = -EPERM;
4865 else
4866 rc = strlen(buf);
4867 return rc;
4868 }
4869
4870 static int lpfc_req_fw_upgrade;
4871 module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR);
4872 MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade");
4873 lpfc_param_show(request_firmware_upgrade)
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887 static int
4888 lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val)
4889 {
4890 if (val >= 0 && val <= 1) {
4891 phba->cfg_request_firmware_upgrade = val;
4892 return 0;
4893 }
4894 return -EINVAL;
4895 }
4896 static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
4897 lpfc_request_firmware_upgrade_show,
4898 lpfc_request_firmware_upgrade_store);
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917 static ssize_t
4918 lpfc_force_rscn_store(struct device *dev, struct device_attribute *attr,
4919 const char *buf, size_t count)
4920 {
4921 struct Scsi_Host *shost = class_to_shost(dev);
4922 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4923 int i;
4924
4925 i = lpfc_issue_els_rscn(vport, 0);
4926 if (i)
4927 return -EIO;
4928 return strlen(buf);
4929 }
4930
4931
4932
4933
4934
4935
4936
4937 static int lpfc_force_rscn;
4938 module_param(lpfc_force_rscn, int, 0644);
4939 MODULE_PARM_DESC(lpfc_force_rscn,
4940 "Force an RSCN to be sent to all remote NPorts");
4941 lpfc_param_show(force_rscn)
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951 static int
4952 lpfc_force_rscn_init(struct lpfc_hba *phba, int val)
4953 {
4954 return 0;
4955 }
4956 static DEVICE_ATTR_RW(lpfc_force_rscn);
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975 static ssize_t
4976 lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
4977 const char *buf, size_t count)
4978 {
4979 struct Scsi_Host *shost = class_to_shost(dev);
4980 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4981 struct lpfc_hba *phba = vport->phba;
4982 struct lpfc_eq_intr_info *eqi;
4983 uint32_t usdelay;
4984 int val = 0, i;
4985
4986
4987 if (phba->sli_rev != LPFC_SLI_REV4)
4988 return -EINVAL;
4989
4990
4991 if (!isdigit(buf[0]))
4992 return -EINVAL;
4993 if (sscanf(buf, "%i", &val) != 1)
4994 return -EINVAL;
4995
4996
4997
4998
4999
5000
5001 if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX))
5002 return -EINVAL;
5003
5004 phba->cfg_auto_imax = (val) ? 0 : 1;
5005 if (phba->cfg_fcp_imax && !val) {
5006 queue_delayed_work(phba->wq, &phba->eq_delay_work,
5007 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
5008
5009 for_each_present_cpu(i) {
5010 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
5011 eqi->icnt = 0;
5012 }
5013 }
5014
5015 phba->cfg_fcp_imax = (uint32_t)val;
5016
5017 if (phba->cfg_fcp_imax)
5018 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
5019 else
5020 usdelay = 0;
5021
5022 for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
5023 lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
5024 usdelay);
5025
5026 return strlen(buf);
5027 }
5028
5029
5030
5031
5032
5033
5034
5035 static int lpfc_fcp_imax = LPFC_DEF_IMAX;
5036 module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
5037 MODULE_PARM_DESC(lpfc_fcp_imax,
5038 "Set the maximum number of FCP interrupts per second per HBA");
5039 lpfc_param_show(fcp_imax)
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054 static int
5055 lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
5056 {
5057 if (phba->sli_rev != LPFC_SLI_REV4) {
5058 phba->cfg_fcp_imax = 0;
5059 return 0;
5060 }
5061
5062 if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) ||
5063 (val == 0)) {
5064 phba->cfg_fcp_imax = val;
5065 return 0;
5066 }
5067
5068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5069 "3016 lpfc_fcp_imax: %d out of range, using default\n",
5070 val);
5071 phba->cfg_fcp_imax = LPFC_DEF_IMAX;
5072
5073 return 0;
5074 }
5075
5076 static DEVICE_ATTR_RW(lpfc_fcp_imax);
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094 static ssize_t
5095 lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr,
5096 const char *buf, size_t count)
5097 {
5098 struct Scsi_Host *shost = class_to_shost(dev);
5099 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5100 struct lpfc_hba *phba = vport->phba;
5101 struct lpfc_queue *eq, *cq;
5102 unsigned long val;
5103 int i;
5104
5105
5106 if (phba->sli_rev != LPFC_SLI_REV4)
5107 return -EINVAL;
5108
5109
5110 if (!isdigit(buf[0]))
5111 return -EINVAL;
5112 if (kstrtoul(buf, 0, &val))
5113 return -EINVAL;
5114
5115 if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT)
5116 return -ERANGE;
5117
5118 phba->cfg_cq_max_proc_limit = (uint32_t)val;
5119
5120
5121 for (i = 0; i < phba->cfg_irq_chann; i++) {
5122
5123 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
5124 if (!eq)
5125 continue;
5126
5127 list_for_each_entry(cq, &eq->child_list, list)
5128 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
5129 cq->entry_count);
5130 }
5131
5132 return strlen(buf);
5133 }
5134
5135
5136
5137
5138
5139 static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5140 module_param(lpfc_cq_max_proc_limit, int, 0644);
5141 MODULE_PARM_DESC(lpfc_cq_max_proc_limit,
5142 "Set the maximum number CQEs processed in an iteration of "
5143 "CQ processing");
5144 lpfc_param_show(cq_max_proc_limit)
5145
5146
5147
5148
5149
5150
5151 LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL,
5152 LPFC_CQ_MIN_THRESHOLD_TO_POLL,
5153 LPFC_CQ_MAX_THRESHOLD_TO_POLL,
5154 "CQE Processing Threshold to enable Polling");
5155
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170 static int
5171 lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val)
5172 {
5173 phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT;
5174
5175 if (phba->sli_rev != LPFC_SLI_REV4)
5176 return 0;
5177
5178 if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) {
5179 phba->cfg_cq_max_proc_limit = val;
5180 return 0;
5181 }
5182
5183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5184 "0371 lpfc_cq_max_proc_limit: %d out of range, using "
5185 "default\n",
5186 phba->cfg_cq_max_proc_limit);
5187
5188 return 0;
5189 }
5190
5191 static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit);
5192
5193
5194
5195
5196
5197
5198
5199
5200
5201 static ssize_t
5202 lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,
5203 char *buf)
5204 {
5205 struct Scsi_Host *shost = class_to_shost(dev);
5206 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5207 struct lpfc_hba *phba = vport->phba;
5208 struct lpfc_vector_map_info *cpup;
5209 int len = 0;
5210
5211 if ((phba->sli_rev != LPFC_SLI_REV4) ||
5212 (phba->intr_type != MSIX))
5213 return len;
5214
5215 switch (phba->cfg_fcp_cpu_map) {
5216 case 0:
5217 len += scnprintf(buf + len, PAGE_SIZE-len,
5218 "fcp_cpu_map: No mapping (%d)\n",
5219 phba->cfg_fcp_cpu_map);
5220 return len;
5221 case 1:
5222 len += scnprintf(buf + len, PAGE_SIZE-len,
5223 "fcp_cpu_map: HBA centric mapping (%d): "
5224 "%d of %d CPUs online from %d possible CPUs\n",
5225 phba->cfg_fcp_cpu_map, num_online_cpus(),
5226 num_present_cpus(),
5227 phba->sli4_hba.num_possible_cpu);
5228 break;
5229 }
5230
5231 while (phba->sli4_hba.curr_disp_cpu <
5232 phba->sli4_hba.num_possible_cpu) {
5233 cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu];
5234
5235 if (!cpu_present(phba->sli4_hba.curr_disp_cpu))
5236 len += scnprintf(buf + len, PAGE_SIZE - len,
5237 "CPU %02d not present\n",
5238 phba->sli4_hba.curr_disp_cpu);
5239 else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
5240 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5241 len += scnprintf(
5242 buf + len, PAGE_SIZE - len,
5243 "CPU %02d hdwq None "
5244 "physid %d coreid %d ht %d ua %d\n",
5245 phba->sli4_hba.curr_disp_cpu,
5246 cpup->phys_id, cpup->core_id,
5247 (cpup->flag & LPFC_CPU_MAP_HYPER),
5248 (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5249 else
5250 len += scnprintf(
5251 buf + len, PAGE_SIZE - len,
5252 "CPU %02d EQ None hdwq %04d "
5253 "physid %d coreid %d ht %d ua %d\n",
5254 phba->sli4_hba.curr_disp_cpu,
5255 cpup->hdwq, cpup->phys_id,
5256 cpup->core_id,
5257 (cpup->flag & LPFC_CPU_MAP_HYPER),
5258 (cpup->flag & LPFC_CPU_MAP_UNASSIGN));
5259 } else {
5260 if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY)
5261 len += scnprintf(
5262 buf + len, PAGE_SIZE - len,
5263 "CPU %02d hdwq None "
5264 "physid %d coreid %d ht %d ua %d IRQ %d\n",
5265 phba->sli4_hba.curr_disp_cpu,
5266 cpup->phys_id,
5267 cpup->core_id,
5268 (cpup->flag & LPFC_CPU_MAP_HYPER),
5269 (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5270 lpfc_get_irq(cpup->eq));
5271 else
5272 len += scnprintf(
5273 buf + len, PAGE_SIZE - len,
5274 "CPU %02d EQ %04d hdwq %04d "
5275 "physid %d coreid %d ht %d ua %d IRQ %d\n",
5276 phba->sli4_hba.curr_disp_cpu,
5277 cpup->eq, cpup->hdwq, cpup->phys_id,
5278 cpup->core_id,
5279 (cpup->flag & LPFC_CPU_MAP_HYPER),
5280 (cpup->flag & LPFC_CPU_MAP_UNASSIGN),
5281 lpfc_get_irq(cpup->eq));
5282 }
5283
5284 phba->sli4_hba.curr_disp_cpu++;
5285
5286
5287 if (phba->sli4_hba.curr_disp_cpu <
5288 phba->sli4_hba.num_possible_cpu &&
5289 (len >= (PAGE_SIZE - 64))) {
5290 len += scnprintf(buf + len,
5291 PAGE_SIZE - len, "more...\n");
5292 break;
5293 }
5294 }
5295
5296 if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu)
5297 phba->sli4_hba.curr_disp_cpu = 0;
5298
5299 return len;
5300 }
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312 static ssize_t
5313 lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
5314 const char *buf, size_t count)
5315 {
5316 return -EINVAL;
5317 }
5318
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329 static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5330 module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR);
5331 MODULE_PARM_DESC(lpfc_fcp_cpu_map,
5332 "Defines how to map CPUs to IRQ vectors per HBA");
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347 static int
5348 lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val)
5349 {
5350 if (phba->sli_rev != LPFC_SLI_REV4) {
5351 phba->cfg_fcp_cpu_map = 0;
5352 return 0;
5353 }
5354
5355 if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) {
5356 phba->cfg_fcp_cpu_map = val;
5357 return 0;
5358 }
5359
5360 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5361 "3326 lpfc_fcp_cpu_map: %d out of range, using "
5362 "default\n", val);
5363 phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP;
5364
5365 return 0;
5366 }
5367
5368 static DEVICE_ATTR_RW(lpfc_fcp_cpu_map);
5369
5370
5371
5372
5373
5374 LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3,
5375 "Select Fibre Channel class of service for FCP sequences");
5376
5377
5378
5379
5380
5381 LPFC_VPORT_ATTR_RW(use_adisc, 1, 0, 1,
5382 "Use ADISC on rediscovery to authenticate FCP devices");
5383
5384
5385
5386
5387
5388
5389 LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
5390 "First burst size for Targets that support first burst");
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400
5401 LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
5402 "NVME Target mode first burst size in 512B increments.");
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412 LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
5413 "Enable First Burst feature for NVME Initiator.");
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423 LPFC_VPORT_ATTR(max_scsicmpl_time, 0, 0, 60000,
5424 "Use command completion time to control queue depth");
5425
5426 lpfc_vport_param_show(max_scsicmpl_time);
5427 static int
5428 lpfc_max_scsicmpl_time_set(struct lpfc_vport *vport, int val)
5429 {
5430 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5431 struct lpfc_nodelist *ndlp, *next_ndlp;
5432
5433 if (val == vport->cfg_max_scsicmpl_time)
5434 return 0;
5435 if ((val < 0) || (val > 60000))
5436 return -EINVAL;
5437 vport->cfg_max_scsicmpl_time = val;
5438
5439 spin_lock_irq(shost->host_lock);
5440 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
5441 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
5442 continue;
5443 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
5444 }
5445 spin_unlock_irq(shost->host_lock);
5446 return 0;
5447 }
5448 lpfc_vport_param_store(max_scsicmpl_time);
5449 static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time);
5450
5451
5452
5453
5454
5455 LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
5456
5457
5458
5459
5460
5461 LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing");
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479 LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU,
5480 LPFC_FCP_SCHED_BY_HDWQ,
5481 LPFC_FCP_SCHED_BY_CPU,
5482 "Determine scheduling algorithm for "
5483 "issuing commands [0] - Hardware Queue, [1] - Current CPU");
5484
5485
5486
5487
5488
5489
5490
5491
5492 LPFC_ATTR_RW(ns_query, LPFC_NS_QUERY_GID_FT,
5493 LPFC_NS_QUERY_GID_FT, LPFC_NS_QUERY_GID_PT,
5494 "Determine algorithm NameServer queries after RSCN "
5495 "[0] - GID_FT, [1] - GID_PT");
5496
5497
5498
5499
5500
5501
5502
5503 LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for "
5504 "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset");
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514 LPFC_ATTR_RW(cr_delay, 0, 0, 63, "A count of milliseconds after which an "
5515 "interrupt response is generated");
5516
5517 LPFC_ATTR_RW(cr_count, 1, 1, 255, "A count of I/O completions after which an "
5518 "interrupt response is generated");
5519
5520
5521
5522
5523
5524
5525 LPFC_ATTR_R(multi_ring_support, 1, 1, 2, "Determines number of primary "
5526 "SLI rings to spread IOCB entries across");
5527
5528
5529
5530
5531
5532
5533 LPFC_ATTR_R(multi_ring_rctl, FC_RCTL_DD_UNSOL_DATA, 1,
5534 255, "Identifies RCTL for additional ring configuration");
5535
5536
5537
5538
5539
5540
5541 LPFC_ATTR_R(multi_ring_type, FC_TYPE_IP, 1,
5542 255, "Identifies TYPE for additional ring configuration");
5543
5544
5545
5546
5547
5548
5549
5550
5551 LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
5552
5553
5554
5555
5556
5557
5558
5559
5560
5561
5562
5563
5564 LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
5565
5566
5567
5568
5569
5570 LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
5571 "during discovery");
5572
5573
5574
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595 LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID");
5596
5597
5598
5599
5600
5601 LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
5602 "Milliseconds driver will wait between polling FCP ring");
5603
5604
5605
5606
5607
5608 LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180,
5609 "Maximum time to wait for task management commands to complete");
5610
5611
5612
5613
5614
5615
5616
5617
5618 LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
5619 "MSI-X (2), if possible");
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629 LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
5630 "Use OAS bit on NVME IOs");
5631
5632
5633
5634
5635
5636
5637
5638
5639
5640
5641 LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
5642 "Embed NVME Command in WQE");
5643
5644
5645
5646
5647
5648
5649
5650
5651
5652
5653 LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5654 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
5655 "Set the number of SCSI Queues advertised");
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671 LPFC_ATTR_R(hdw_queue,
5672 LPFC_HBA_HDWQ_DEF,
5673 LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX,
5674 "Set the number of I/O Hardware Queues");
5675
5676 #if IS_ENABLED(CONFIG_X86)
5677
5678
5679
5680
5681
5682 static void
5683 lpfc_cpumask_irq_mode_init(struct lpfc_hba *phba)
5684 {
5685 unsigned int cpu, first_cpu, numa_node = NUMA_NO_NODE;
5686 const struct cpumask *sibling_mask;
5687 struct cpumask *aff_mask = &phba->sli4_hba.irq_aff_mask;
5688
5689 cpumask_clear(aff_mask);
5690
5691 if (phba->irq_chann_mode == NUMA_MODE) {
5692
5693 numa_node = dev_to_node(&phba->pcidev->dev);
5694 if (numa_node == NUMA_NO_NODE) {
5695 phba->irq_chann_mode = NORMAL_MODE;
5696 return;
5697 }
5698 }
5699
5700 for_each_possible_cpu(cpu) {
5701 switch (phba->irq_chann_mode) {
5702 case NUMA_MODE:
5703 if (cpu_to_node(cpu) == numa_node)
5704 cpumask_set_cpu(cpu, aff_mask);
5705 break;
5706 case NHT_MODE:
5707 sibling_mask = topology_sibling_cpumask(cpu);
5708 first_cpu = cpumask_first(sibling_mask);
5709 if (first_cpu < nr_cpu_ids)
5710 cpumask_set_cpu(first_cpu, aff_mask);
5711 break;
5712 default:
5713 break;
5714 }
5715 }
5716 }
5717 #endif
5718
5719 static void
5720 lpfc_assign_default_irq_chann(struct lpfc_hba *phba)
5721 {
5722 #if IS_ENABLED(CONFIG_X86)
5723 switch (boot_cpu_data.x86_vendor) {
5724 case X86_VENDOR_AMD:
5725
5726 phba->irq_chann_mode = NUMA_MODE;
5727 break;
5728 case X86_VENDOR_INTEL:
5729
5730 phba->irq_chann_mode = NHT_MODE;
5731 break;
5732 default:
5733 phba->irq_chann_mode = NORMAL_MODE;
5734 break;
5735 }
5736 lpfc_cpumask_irq_mode_init(phba);
5737 #else
5738 phba->irq_chann_mode = NORMAL_MODE;
5739 #endif
5740 }
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756 static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF;
5757 module_param(lpfc_irq_chann, uint, 0444);
5758 MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate");
5759
5760
5761
5762
5763
5764
5765
5766
5767
5768
5769
5770
5771
5772
5773 static int
5774 lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val)
5775 {
5776 const struct cpumask *aff_mask;
5777
5778 if (phba->cfg_use_msi != 2) {
5779 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5780 "8532 use_msi = %u ignoring cfg_irq_numa\n",
5781 phba->cfg_use_msi);
5782 phba->irq_chann_mode = NORMAL_MODE;
5783 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5784 return 0;
5785 }
5786
5787
5788 if (val == LPFC_IRQ_CHANN_DEF &&
5789 phba->cfg_hdw_queue == LPFC_HBA_HDWQ_DEF &&
5790 phba->sli_rev == LPFC_SLI_REV4)
5791 lpfc_assign_default_irq_chann(phba);
5792
5793 if (phba->irq_chann_mode != NORMAL_MODE) {
5794 aff_mask = &phba->sli4_hba.irq_aff_mask;
5795
5796 if (cpumask_empty(aff_mask)) {
5797 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5798 "8533 Could not identify CPUS for "
5799 "mode %d, ignoring\n",
5800 phba->irq_chann_mode);
5801 phba->irq_chann_mode = NORMAL_MODE;
5802 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5803 } else {
5804 phba->cfg_irq_chann = cpumask_weight(aff_mask);
5805
5806
5807
5808
5809 if (phba->irq_chann_mode == NHT_MODE)
5810 phba->cfg_hdw_queue = phba->cfg_irq_chann;
5811
5812 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5813 "8543 lpfc_irq_chann set to %u "
5814 "(mode: %d)\n", phba->cfg_irq_chann,
5815 phba->irq_chann_mode);
5816 }
5817 } else {
5818 if (val > LPFC_IRQ_CHANN_MAX) {
5819 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5820 "8545 lpfc_irq_chann attribute cannot "
5821 "be set to %u, allowed range is "
5822 "[%u,%u]\n",
5823 val,
5824 LPFC_IRQ_CHANN_MIN,
5825 LPFC_IRQ_CHANN_MAX);
5826 phba->cfg_irq_chann = LPFC_IRQ_CHANN_DEF;
5827 return -EINVAL;
5828 }
5829 if (phba->sli_rev == LPFC_SLI_REV4) {
5830 phba->cfg_irq_chann = val;
5831 } else {
5832 phba->cfg_irq_chann = 2;
5833 phba->cfg_hdw_queue = 1;
5834 }
5835 }
5836
5837 return 0;
5838 }
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848 static ssize_t
5849 lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr,
5850 char *buf)
5851 {
5852 struct Scsi_Host *shost = class_to_shost(dev);
5853 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5854 struct lpfc_hba *phba = vport->phba;
5855
5856 return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann);
5857 }
5858
5859 static DEVICE_ATTR_RO(lpfc_irq_chann);
5860
5861
5862
5863
5864
5865
5866
5867
5868 LPFC_ATTR_RW(enable_hba_reset, 1, 0, 2, "Enable HBA resets from the driver.");
5869
5870
5871
5872
5873
5874
5875
5876 LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");
5877
5878
5879
5880
5881
5882
5883
5884 LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature.");
5885
5886
5887
5888
5889
5890
5891 LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature.");
5892
5893
5894
5895
5896
5897
5898
5899 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
5900
5901
5902
5903
5904
5905
5906
5907
5908
5909
5910
5911
5912
5913
5914
5915
5916 LPFC_ATTR(prot_mask,
5917 (SHOST_DIF_TYPE1_PROTECTION |
5918 SHOST_DIX_TYPE0_PROTECTION |
5919 SHOST_DIX_TYPE1_PROTECTION),
5920 0,
5921 (SHOST_DIF_TYPE1_PROTECTION |
5922 SHOST_DIX_TYPE0_PROTECTION |
5923 SHOST_DIX_TYPE1_PROTECTION),
5924 "T10-DIF host protection capabilities mask");
5925
5926
5927
5928
5929
5930
5931
5932
5933
5934 LPFC_ATTR(prot_guard,
5935 SHOST_DIX_GUARD_IP, SHOST_DIX_GUARD_CRC, SHOST_DIX_GUARD_IP,
5936 "T10-DIF host protection guard type");
5937
5938
5939
5940
5941
5942
5943
5944
5945
5946
5947
5948
5949
5950
5951
5952 LPFC_ATTR(delay_discovery, 0, 0, 1,
5953 "Delay NPort discovery when Clean Address bit is cleared.");
5954
5955
5956
5957
5958
5959
5960
5961
5962
5963
5964 static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
5965 module_param(lpfc_sg_seg_cnt, uint, 0444);
5966 MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
5967
5968
5969
5970
5971
5972
5973
5974
5975
5976
5977 static ssize_t
5978 lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
5979 char *buf)
5980 {
5981 struct Scsi_Host *shost = class_to_shost(dev);
5982 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
5983 struct lpfc_hba *phba = vport->phba;
5984 int len;
5985
5986 len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
5987 phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
5988
5989 len += scnprintf(buf + len, PAGE_SIZE - len,
5990 "Cfg: %d SCSI: %d NVME: %d\n",
5991 phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
5992 phba->cfg_nvme_seg_cnt);
5993 return len;
5994 }
5995
5996 static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
5997
5998
5999
6000
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012 static int
6013 lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
6014 {
6015 if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
6016 phba->cfg_sg_seg_cnt = val;
6017 return 0;
6018 }
6019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6020 "0409 lpfc_sg_seg_cnt attribute cannot be set to %d, "
6021 "allowed range is [%d, %d]\n",
6022 val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
6023 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
6024 return -EINVAL;
6025 }
6026
6027
6028
6029
6030
6031
6032
6033 LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
6034
6035
6036
6037
6038
6039
6040
6041 LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
6042 lpfc_param_show(ras_fwlog_buffsize);
6043
6044 static ssize_t
6045 lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val)
6046 {
6047 int ret = 0;
6048 enum ras_state state;
6049
6050 if (!lpfc_rangecheck(val, 0, 4))
6051 return -EINVAL;
6052
6053 if (phba->cfg_ras_fwlog_buffsize == val)
6054 return 0;
6055
6056 if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))
6057 return -EINVAL;
6058
6059 spin_lock_irq(&phba->hbalock);
6060 state = phba->ras_fwlog.state;
6061 spin_unlock_irq(&phba->hbalock);
6062
6063 if (state == REG_INPROGRESS) {
6064 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
6065 "registration is in progress\n");
6066 return -EBUSY;
6067 }
6068
6069
6070
6071
6072
6073 phba->cfg_ras_fwlog_buffsize = val;
6074 if (state == ACTIVE) {
6075 lpfc_ras_stop_fwlog(phba);
6076 lpfc_sli4_ras_dma_free(phba);
6077 }
6078
6079 lpfc_sli4_ras_init(phba);
6080 if (phba->ras_fwlog.ras_enabled)
6081 ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6082 LPFC_RAS_ENABLE_LOGGING);
6083 return ret;
6084 }
6085
6086 lpfc_param_store(ras_fwlog_buffsize);
6087 static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize);
6088
6089
6090
6091
6092
6093
6094
6095 LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
6096
6097
6098
6099
6100
6101
6102
6103
6104 LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
6105
6106
6107
6108
6109
6110
6111
6112 LPFC_BBCR_ATTR_RW(enable_bbcr, 1, 0, 1, "Enable BBC Recovery");
6113
6114
6115 int lpfc_fabric_cgn_frequency = 100;
6116 module_param(lpfc_fabric_cgn_frequency, int, 0444);
6117 MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq");
6118
6119 int lpfc_acqe_cgn_frequency = 10;
6120 module_param(lpfc_acqe_cgn_frequency, int, 0444);
6121 MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq");
6122
6123 int lpfc_use_cgn_signal = 1;
6124 module_param(lpfc_use_cgn_signal, int, 0444);
6125 MODULE_PARM_DESC(lpfc_use_cgn_signal, "Use Congestion signaling if available");
6126
6127
6128
6129
6130
6131
6132
6133 LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push");
6134
6135
6136
6137
6138
6139
6140
6141 LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI");
6142
6143
6144
6145
6146
6147
6148
6149 LPFC_ATTR_RW(max_vmid, LPFC_MIN_VMID, LPFC_MIN_VMID, LPFC_MAX_VMID,
6150 "Maximum number of VMs supported");
6151
6152
6153
6154
6155
6156
6157 LPFC_ATTR_RW(vmid_inactivity_timeout, 4, 0, 24,
6158 "Inactivity timeout in hours");
6159
6160
6161
6162
6163
6164
6165
6166 LPFC_ATTR_RW(vmid_app_header, LPFC_VMID_APP_HEADER_DISABLE,
6167 LPFC_VMID_APP_HEADER_DISABLE, LPFC_VMID_APP_HEADER_ENABLE,
6168 "Enable App Header VMID support");
6169
6170
6171
6172
6173
6174
6175
6176
6177 LPFC_ATTR_RW(vmid_priority_tagging, LPFC_VMID_PRIO_TAG_DISABLE,
6178 LPFC_VMID_PRIO_TAG_DISABLE,
6179 LPFC_VMID_PRIO_TAG_ALL_TARGETS,
6180 "Enable Priority Tagging VMID support");
6181
6182 static struct attribute *lpfc_hba_attrs[] = {
6183 &dev_attr_nvme_info.attr,
6184 &dev_attr_scsi_stat.attr,
6185 &dev_attr_bg_info.attr,
6186 &dev_attr_bg_guard_err.attr,
6187 &dev_attr_bg_apptag_err.attr,
6188 &dev_attr_bg_reftag_err.attr,
6189 &dev_attr_info.attr,
6190 &dev_attr_serialnum.attr,
6191 &dev_attr_modeldesc.attr,
6192 &dev_attr_modelname.attr,
6193 &dev_attr_programtype.attr,
6194 &dev_attr_portnum.attr,
6195 &dev_attr_fwrev.attr,
6196 &dev_attr_hdw.attr,
6197 &dev_attr_option_rom_version.attr,
6198 &dev_attr_link_state.attr,
6199 &dev_attr_num_discovered_ports.attr,
6200 &dev_attr_lpfc_drvr_version.attr,
6201 &dev_attr_lpfc_enable_fip.attr,
6202 &dev_attr_lpfc_temp_sensor.attr,
6203 &dev_attr_lpfc_log_verbose.attr,
6204 &dev_attr_lpfc_lun_queue_depth.attr,
6205 &dev_attr_lpfc_tgt_queue_depth.attr,
6206 &dev_attr_lpfc_hba_queue_depth.attr,
6207 &dev_attr_lpfc_peer_port_login.attr,
6208 &dev_attr_lpfc_nodev_tmo.attr,
6209 &dev_attr_lpfc_devloss_tmo.attr,
6210 &dev_attr_lpfc_enable_fc4_type.attr,
6211 &dev_attr_lpfc_fcp_class.attr,
6212 &dev_attr_lpfc_use_adisc.attr,
6213 &dev_attr_lpfc_first_burst_size.attr,
6214 &dev_attr_lpfc_ack0.attr,
6215 &dev_attr_lpfc_xri_rebalancing.attr,
6216 &dev_attr_lpfc_topology.attr,
6217 &dev_attr_lpfc_scan_down.attr,
6218 &dev_attr_lpfc_link_speed.attr,
6219 &dev_attr_lpfc_fcp_io_sched.attr,
6220 &dev_attr_lpfc_ns_query.attr,
6221 &dev_attr_lpfc_fcp2_no_tgt_reset.attr,
6222 &dev_attr_lpfc_cr_delay.attr,
6223 &dev_attr_lpfc_cr_count.attr,
6224 &dev_attr_lpfc_multi_ring_support.attr,
6225 &dev_attr_lpfc_multi_ring_rctl.attr,
6226 &dev_attr_lpfc_multi_ring_type.attr,
6227 &dev_attr_lpfc_fdmi_on.attr,
6228 &dev_attr_lpfc_enable_SmartSAN.attr,
6229 &dev_attr_lpfc_max_luns.attr,
6230 &dev_attr_lpfc_enable_npiv.attr,
6231 &dev_attr_lpfc_fcf_failover_policy.attr,
6232 &dev_attr_lpfc_enable_rrq.attr,
6233 &dev_attr_lpfc_fcp_wait_abts_rsp.attr,
6234 &dev_attr_nport_evt_cnt.attr,
6235 &dev_attr_board_mode.attr,
6236 &dev_attr_max_vpi.attr,
6237 &dev_attr_used_vpi.attr,
6238 &dev_attr_max_rpi.attr,
6239 &dev_attr_used_rpi.attr,
6240 &dev_attr_max_xri.attr,
6241 &dev_attr_used_xri.attr,
6242 &dev_attr_npiv_info.attr,
6243 &dev_attr_issue_reset.attr,
6244 &dev_attr_lpfc_poll.attr,
6245 &dev_attr_lpfc_poll_tmo.attr,
6246 &dev_attr_lpfc_task_mgmt_tmo.attr,
6247 &dev_attr_lpfc_use_msi.attr,
6248 &dev_attr_lpfc_nvme_oas.attr,
6249 &dev_attr_lpfc_nvme_embed_cmd.attr,
6250 &dev_attr_lpfc_fcp_imax.attr,
6251 &dev_attr_lpfc_force_rscn.attr,
6252 &dev_attr_lpfc_cq_poll_threshold.attr,
6253 &dev_attr_lpfc_cq_max_proc_limit.attr,
6254 &dev_attr_lpfc_fcp_cpu_map.attr,
6255 &dev_attr_lpfc_fcp_mq_threshold.attr,
6256 &dev_attr_lpfc_hdw_queue.attr,
6257 &dev_attr_lpfc_irq_chann.attr,
6258 &dev_attr_lpfc_suppress_rsp.attr,
6259 &dev_attr_lpfc_nvmet_mrq.attr,
6260 &dev_attr_lpfc_nvmet_mrq_post.attr,
6261 &dev_attr_lpfc_nvme_enable_fb.attr,
6262 &dev_attr_lpfc_nvmet_fb_size.attr,
6263 &dev_attr_lpfc_enable_bg.attr,
6264 &dev_attr_lpfc_enable_hba_reset.attr,
6265 &dev_attr_lpfc_enable_hba_heartbeat.attr,
6266 &dev_attr_lpfc_EnableXLane.attr,
6267 &dev_attr_lpfc_XLanePriority.attr,
6268 &dev_attr_lpfc_xlane_lun.attr,
6269 &dev_attr_lpfc_xlane_tgt.attr,
6270 &dev_attr_lpfc_xlane_vpt.attr,
6271 &dev_attr_lpfc_xlane_lun_state.attr,
6272 &dev_attr_lpfc_xlane_lun_status.attr,
6273 &dev_attr_lpfc_xlane_priority.attr,
6274 &dev_attr_lpfc_sg_seg_cnt.attr,
6275 &dev_attr_lpfc_max_scsicmpl_time.attr,
6276 &dev_attr_lpfc_stat_data_ctrl.attr,
6277 &dev_attr_lpfc_aer_support.attr,
6278 &dev_attr_lpfc_aer_state_cleanup.attr,
6279 &dev_attr_lpfc_sriov_nr_virtfn.attr,
6280 &dev_attr_lpfc_req_fw_upgrade.attr,
6281 &dev_attr_lpfc_suppress_link_up.attr,
6282 &dev_attr_iocb_hw.attr,
6283 &dev_attr_pls.attr,
6284 &dev_attr_pt.attr,
6285 &dev_attr_txq_hw.attr,
6286 &dev_attr_txcmplq_hw.attr,
6287 &dev_attr_lpfc_sriov_hw_max_virtfn.attr,
6288 &dev_attr_protocol.attr,
6289 &dev_attr_lpfc_xlane_supported.attr,
6290 &dev_attr_lpfc_enable_mds_diags.attr,
6291 &dev_attr_lpfc_ras_fwlog_buffsize.attr,
6292 &dev_attr_lpfc_ras_fwlog_level.attr,
6293 &dev_attr_lpfc_ras_fwlog_func.attr,
6294 &dev_attr_lpfc_enable_bbcr.attr,
6295 &dev_attr_lpfc_enable_dpp.attr,
6296 &dev_attr_lpfc_enable_mi.attr,
6297 &dev_attr_cmf_info.attr,
6298 &dev_attr_lpfc_max_vmid.attr,
6299 &dev_attr_lpfc_vmid_inactivity_timeout.attr,
6300 &dev_attr_lpfc_vmid_app_header.attr,
6301 &dev_attr_lpfc_vmid_priority_tagging.attr,
6302 NULL,
6303 };
6304
6305 static const struct attribute_group lpfc_hba_attr_group = {
6306 .attrs = lpfc_hba_attrs
6307 };
6308
6309 const struct attribute_group *lpfc_hba_groups[] = {
6310 &lpfc_hba_attr_group,
6311 NULL
6312 };
6313
6314 static struct attribute *lpfc_vport_attrs[] = {
6315 &dev_attr_info.attr,
6316 &dev_attr_link_state.attr,
6317 &dev_attr_num_discovered_ports.attr,
6318 &dev_attr_lpfc_drvr_version.attr,
6319 &dev_attr_lpfc_log_verbose.attr,
6320 &dev_attr_lpfc_lun_queue_depth.attr,
6321 &dev_attr_lpfc_tgt_queue_depth.attr,
6322 &dev_attr_lpfc_nodev_tmo.attr,
6323 &dev_attr_lpfc_devloss_tmo.attr,
6324 &dev_attr_lpfc_hba_queue_depth.attr,
6325 &dev_attr_lpfc_peer_port_login.attr,
6326 &dev_attr_lpfc_restrict_login.attr,
6327 &dev_attr_lpfc_fcp_class.attr,
6328 &dev_attr_lpfc_use_adisc.attr,
6329 &dev_attr_lpfc_first_burst_size.attr,
6330 &dev_attr_lpfc_max_luns.attr,
6331 &dev_attr_nport_evt_cnt.attr,
6332 &dev_attr_npiv_info.attr,
6333 &dev_attr_lpfc_enable_da_id.attr,
6334 &dev_attr_lpfc_max_scsicmpl_time.attr,
6335 &dev_attr_lpfc_stat_data_ctrl.attr,
6336 &dev_attr_lpfc_static_vport.attr,
6337 &dev_attr_cmf_info.attr,
6338 NULL,
6339 };
6340
6341 static const struct attribute_group lpfc_vport_attr_group = {
6342 .attrs = lpfc_vport_attrs
6343 };
6344
6345 const struct attribute_group *lpfc_vport_groups[] = {
6346 &lpfc_vport_attr_group,
6347 NULL
6348 };
6349
6350
6351
6352
6353
6354
6355
6356
6357
6358
6359
6360
6361
6362
6363
6364
6365
6366
6367
6368
6369 static ssize_t
6370 sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
6371 struct bin_attribute *bin_attr,
6372 char *buf, loff_t off, size_t count)
6373 {
6374 size_t buf_off;
6375 struct device *dev = container_of(kobj, struct device, kobj);
6376 struct Scsi_Host *shost = class_to_shost(dev);
6377 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6378 struct lpfc_hba *phba = vport->phba;
6379
6380 if (phba->sli_rev >= LPFC_SLI_REV4)
6381 return -EPERM;
6382
6383 if ((off + count) > FF_REG_AREA_SIZE)
6384 return -ERANGE;
6385
6386 if (count <= LPFC_REG_WRITE_KEY_SIZE)
6387 return 0;
6388
6389 if (off % 4 || count % 4 || (unsigned long)buf % 4)
6390 return -EINVAL;
6391
6392
6393 if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
6394 return -EINVAL;
6395
6396 if (!(vport->fc_flag & FC_OFFLINE_MODE))
6397 return -EPERM;
6398
6399 spin_lock_irq(&phba->hbalock);
6400 for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
6401 buf_off += sizeof(uint32_t))
6402 writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
6403 phba->ctrl_regs_memmap_p + off + buf_off);
6404
6405 spin_unlock_irq(&phba->hbalock);
6406
6407 return count;
6408 }
6409
6410
6411
6412
6413
6414
6415
6416
6417
6418
6419
6420
6421
6422
6423
6424
6425
6426
6427
6428 static ssize_t
6429 sysfs_ctlreg_read(struct file *filp, struct kobject *kobj,
6430 struct bin_attribute *bin_attr,
6431 char *buf, loff_t off, size_t count)
6432 {
6433 size_t buf_off;
6434 uint32_t * tmp_ptr;
6435 struct device *dev = container_of(kobj, struct device, kobj);
6436 struct Scsi_Host *shost = class_to_shost(dev);
6437 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6438 struct lpfc_hba *phba = vport->phba;
6439
6440 if (phba->sli_rev >= LPFC_SLI_REV4)
6441 return -EPERM;
6442
6443 if (off > FF_REG_AREA_SIZE)
6444 return -ERANGE;
6445
6446 if ((off + count) > FF_REG_AREA_SIZE)
6447 count = FF_REG_AREA_SIZE - off;
6448
6449 if (count == 0) return 0;
6450
6451 if (off % 4 || count % 4 || (unsigned long)buf % 4)
6452 return -EINVAL;
6453
6454 spin_lock_irq(&phba->hbalock);
6455
6456 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) {
6457 tmp_ptr = (uint32_t *)(buf + buf_off);
6458 *tmp_ptr = readl(phba->ctrl_regs_memmap_p + off + buf_off);
6459 }
6460
6461 spin_unlock_irq(&phba->hbalock);
6462
6463 return count;
6464 }
6465
6466 static struct bin_attribute sysfs_ctlreg_attr = {
6467 .attr = {
6468 .name = "ctlreg",
6469 .mode = S_IRUSR | S_IWUSR,
6470 },
6471 .size = 256,
6472 .read = sysfs_ctlreg_read,
6473 .write = sysfs_ctlreg_write,
6474 };
6475
6476
6477
6478
6479
6480
6481
6482
6483
6484
6485
6486
6487
6488
6489
6490
6491
6492 static ssize_t
6493 sysfs_mbox_write(struct file *filp, struct kobject *kobj,
6494 struct bin_attribute *bin_attr,
6495 char *buf, loff_t off, size_t count)
6496 {
6497 return -EPERM;
6498 }
6499
6500
6501
6502
6503
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513
6514
6515
6516 static ssize_t
6517 sysfs_mbox_read(struct file *filp, struct kobject *kobj,
6518 struct bin_attribute *bin_attr,
6519 char *buf, loff_t off, size_t count)
6520 {
6521 return -EPERM;
6522 }
6523
6524 static struct bin_attribute sysfs_mbox_attr = {
6525 .attr = {
6526 .name = "mbox",
6527 .mode = S_IRUSR | S_IWUSR,
6528 },
6529 .size = MAILBOX_SYSFS_MAX,
6530 .read = sysfs_mbox_read,
6531 .write = sysfs_mbox_write,
6532 };
6533
6534
6535
6536
6537
6538
6539
6540
6541
6542 int
6543 lpfc_alloc_sysfs_attr(struct lpfc_vport *vport)
6544 {
6545 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6546 int error;
6547
6548 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6549 &sysfs_drvr_stat_data_attr);
6550
6551
6552 if (error || vport->port_type == LPFC_NPIV_PORT)
6553 goto out;
6554
6555 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6556 &sysfs_ctlreg_attr);
6557 if (error)
6558 goto out_remove_stat_attr;
6559
6560 error = sysfs_create_bin_file(&shost->shost_dev.kobj,
6561 &sysfs_mbox_attr);
6562 if (error)
6563 goto out_remove_ctlreg_attr;
6564
6565 return 0;
6566 out_remove_ctlreg_attr:
6567 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6568 out_remove_stat_attr:
6569 sysfs_remove_bin_file(&shost->shost_dev.kobj,
6570 &sysfs_drvr_stat_data_attr);
6571 out:
6572 return error;
6573 }
6574
6575
6576
6577
6578
6579 void
6580 lpfc_free_sysfs_attr(struct lpfc_vport *vport)
6581 {
6582 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6583 sysfs_remove_bin_file(&shost->shost_dev.kobj,
6584 &sysfs_drvr_stat_data_attr);
6585
6586 if (vport->port_type == LPFC_NPIV_PORT)
6587 return;
6588 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_mbox_attr);
6589 sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
6590 }
6591
6592
6593
6594
6595
6596
6597
6598
6599
6600 static void
6601 lpfc_get_host_symbolic_name(struct Scsi_Host *shost)
6602 {
6603 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
6604
6605 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
6606 sizeof fc_host_symbolic_name(shost));
6607 }
6608
6609
6610
6611
6612
6613 static void
6614 lpfc_get_host_port_id(struct Scsi_Host *shost)
6615 {
6616 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6617
6618
6619 fc_host_port_id(shost) = vport->fc_myDID;
6620 }
6621
6622
6623
6624
6625
6626 static void
6627 lpfc_get_host_port_type(struct Scsi_Host *shost)
6628 {
6629 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6630 struct lpfc_hba *phba = vport->phba;
6631
6632 spin_lock_irq(shost->host_lock);
6633
6634 if (vport->port_type == LPFC_NPIV_PORT) {
6635 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
6636 } else if (lpfc_is_link_up(phba)) {
6637 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6638 if (vport->fc_flag & FC_PUBLIC_LOOP)
6639 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
6640 else
6641 fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
6642 } else {
6643 if (vport->fc_flag & FC_FABRIC)
6644 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
6645 else
6646 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
6647 }
6648 } else
6649 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
6650
6651 spin_unlock_irq(shost->host_lock);
6652 }
6653
6654
6655
6656
6657
6658 static void
6659 lpfc_get_host_port_state(struct Scsi_Host *shost)
6660 {
6661 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6662 struct lpfc_hba *phba = vport->phba;
6663
6664 spin_lock_irq(shost->host_lock);
6665
6666 if (vport->fc_flag & FC_OFFLINE_MODE)
6667 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
6668 else {
6669 switch (phba->link_state) {
6670 case LPFC_LINK_UNKNOWN:
6671 case LPFC_LINK_DOWN:
6672 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
6673 break;
6674 case LPFC_LINK_UP:
6675 case LPFC_CLEAR_LA:
6676 case LPFC_HBA_READY:
6677
6678 if (vport->port_state < LPFC_VPORT_READY)
6679 fc_host_port_state(shost) =
6680 FC_PORTSTATE_BYPASSED;
6681 else
6682 fc_host_port_state(shost) =
6683 FC_PORTSTATE_ONLINE;
6684 break;
6685 case LPFC_HBA_ERROR:
6686 fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
6687 break;
6688 default:
6689 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
6690 break;
6691 }
6692 }
6693
6694 spin_unlock_irq(shost->host_lock);
6695 }
6696
6697
6698
6699
6700
6701 static void
6702 lpfc_get_host_speed(struct Scsi_Host *shost)
6703 {
6704 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6705 struct lpfc_hba *phba = vport->phba;
6706
6707 spin_lock_irq(shost->host_lock);
6708
6709 if ((lpfc_is_link_up(phba)) && (!(phba->hba_flag & HBA_FCOE_MODE))) {
6710 switch(phba->fc_linkspeed) {
6711 case LPFC_LINK_SPEED_1GHZ:
6712 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6713 break;
6714 case LPFC_LINK_SPEED_2GHZ:
6715 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
6716 break;
6717 case LPFC_LINK_SPEED_4GHZ:
6718 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
6719 break;
6720 case LPFC_LINK_SPEED_8GHZ:
6721 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
6722 break;
6723 case LPFC_LINK_SPEED_10GHZ:
6724 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6725 break;
6726 case LPFC_LINK_SPEED_16GHZ:
6727 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
6728 break;
6729 case LPFC_LINK_SPEED_32GHZ:
6730 fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
6731 break;
6732 case LPFC_LINK_SPEED_64GHZ:
6733 fc_host_speed(shost) = FC_PORTSPEED_64GBIT;
6734 break;
6735 case LPFC_LINK_SPEED_128GHZ:
6736 fc_host_speed(shost) = FC_PORTSPEED_128GBIT;
6737 break;
6738 case LPFC_LINK_SPEED_256GHZ:
6739 fc_host_speed(shost) = FC_PORTSPEED_256GBIT;
6740 break;
6741 default:
6742 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6743 break;
6744 }
6745 } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
6746 switch (phba->fc_linkspeed) {
6747 case LPFC_ASYNC_LINK_SPEED_1GBPS:
6748 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
6749 break;
6750 case LPFC_ASYNC_LINK_SPEED_10GBPS:
6751 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
6752 break;
6753 case LPFC_ASYNC_LINK_SPEED_20GBPS:
6754 fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
6755 break;
6756 case LPFC_ASYNC_LINK_SPEED_25GBPS:
6757 fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
6758 break;
6759 case LPFC_ASYNC_LINK_SPEED_40GBPS:
6760 fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
6761 break;
6762 case LPFC_ASYNC_LINK_SPEED_100GBPS:
6763 fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
6764 break;
6765 default:
6766 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6767 break;
6768 }
6769 } else
6770 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
6771
6772 spin_unlock_irq(shost->host_lock);
6773 }
6774
6775
6776
6777
6778
6779 static void
6780 lpfc_get_host_fabric_name (struct Scsi_Host *shost)
6781 {
6782 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6783 struct lpfc_hba *phba = vport->phba;
6784 u64 node_name;
6785
6786 spin_lock_irq(shost->host_lock);
6787
6788 if ((vport->port_state > LPFC_FLOGI) &&
6789 ((vport->fc_flag & FC_FABRIC) ||
6790 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
6791 (vport->fc_flag & FC_PUBLIC_LOOP))))
6792 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
6793 else
6794
6795 node_name = 0;
6796
6797 spin_unlock_irq(shost->host_lock);
6798
6799 fc_host_fabric_name(shost) = node_name;
6800 }
6801
6802
6803
6804
6805
6806
6807
6808
6809
6810
6811
6812
6813
6814 static struct fc_host_statistics *
6815 lpfc_get_stats(struct Scsi_Host *shost)
6816 {
6817 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6818 struct lpfc_hba *phba = vport->phba;
6819 struct lpfc_sli *psli = &phba->sli;
6820 struct fc_host_statistics *hs = &phba->link_stats;
6821 struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
6822 LPFC_MBOXQ_t *pmboxq;
6823 MAILBOX_t *pmb;
6824 int rc = 0;
6825
6826
6827
6828
6829
6830 if (phba->link_state < LPFC_LINK_DOWN ||
6831 !phba->mbox_mem_pool ||
6832 (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
6833 return NULL;
6834
6835 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
6836 return NULL;
6837
6838 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6839 if (!pmboxq)
6840 return NULL;
6841 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
6842
6843 pmb = &pmboxq->u.mb;
6844 pmb->mbxCommand = MBX_READ_STATUS;
6845 pmb->mbxOwner = OWN_HOST;
6846 pmboxq->ctx_buf = NULL;
6847 pmboxq->vport = vport;
6848
6849 if (vport->fc_flag & FC_OFFLINE_MODE) {
6850 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6851 if (rc != MBX_SUCCESS) {
6852 mempool_free(pmboxq, phba->mbox_mem_pool);
6853 return NULL;
6854 }
6855 } else {
6856 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6857 if (rc != MBX_SUCCESS) {
6858 if (rc != MBX_TIMEOUT)
6859 mempool_free(pmboxq, phba->mbox_mem_pool);
6860 return NULL;
6861 }
6862 }
6863
6864 memset(hs, 0, sizeof (struct fc_host_statistics));
6865
6866 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
6867 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
6868
6869
6870
6871
6872
6873
6874
6875
6876 if (pmb->un.varRdStatus.xkb & RD_ST_XKB) {
6877 hs->tx_words = (u64)
6878 ((((u64)(pmb->un.varRdStatus.xmit_xkb &
6879 RD_ST_XMIT_XKB_MASK) << 32) |
6880 (u64)pmb->un.varRdStatus.xmitByteCnt) *
6881 (u64)256);
6882 hs->rx_words = (u64)
6883 ((((u64)(pmb->un.varRdStatus.rcv_xkb &
6884 RD_ST_RCV_XKB_MASK) << 32) |
6885 (u64)pmb->un.varRdStatus.rcvByteCnt) *
6886 (u64)256);
6887 } else {
6888 hs->tx_words = (uint64_t)
6889 ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
6890 * (uint64_t)256);
6891 hs->rx_words = (uint64_t)
6892 ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
6893 * (uint64_t)256);
6894 }
6895
6896 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
6897 pmb->mbxCommand = MBX_READ_LNK_STAT;
6898 pmb->mbxOwner = OWN_HOST;
6899 pmboxq->ctx_buf = NULL;
6900 pmboxq->vport = vport;
6901
6902 if (vport->fc_flag & FC_OFFLINE_MODE) {
6903 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6904 if (rc != MBX_SUCCESS) {
6905 mempool_free(pmboxq, phba->mbox_mem_pool);
6906 return NULL;
6907 }
6908 } else {
6909 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6910 if (rc != MBX_SUCCESS) {
6911 if (rc != MBX_TIMEOUT)
6912 mempool_free(pmboxq, phba->mbox_mem_pool);
6913 return NULL;
6914 }
6915 }
6916
6917 hs->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
6918 hs->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
6919 hs->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
6920 hs->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
6921 hs->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
6922 hs->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
6923 hs->error_frames = pmb->un.varRdLnk.crcCnt;
6924
6925 hs->cn_sig_warn = atomic64_read(&phba->cgn_acqe_stat.warn);
6926 hs->cn_sig_alarm = atomic64_read(&phba->cgn_acqe_stat.alarm);
6927
6928 hs->link_failure_count -= lso->link_failure_count;
6929 hs->loss_of_sync_count -= lso->loss_of_sync_count;
6930 hs->loss_of_signal_count -= lso->loss_of_signal_count;
6931 hs->prim_seq_protocol_err_count -= lso->prim_seq_protocol_err_count;
6932 hs->invalid_tx_word_count -= lso->invalid_tx_word_count;
6933 hs->invalid_crc_count -= lso->invalid_crc_count;
6934 hs->error_frames -= lso->error_frames;
6935
6936 if (phba->hba_flag & HBA_FCOE_MODE) {
6937 hs->lip_count = -1;
6938 hs->nos_count = (phba->link_events >> 1);
6939 hs->nos_count -= lso->link_events;
6940 } else if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6941 hs->lip_count = (phba->fc_eventTag >> 1);
6942 hs->lip_count -= lso->link_events;
6943 hs->nos_count = -1;
6944 } else {
6945 hs->lip_count = -1;
6946 hs->nos_count = (phba->fc_eventTag >> 1);
6947 hs->nos_count -= lso->link_events;
6948 }
6949
6950 hs->dumped_frames = -1;
6951
6952 hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start;
6953
6954 mempool_free(pmboxq, phba->mbox_mem_pool);
6955
6956 return hs;
6957 }
6958
6959
6960
6961
6962
6963 static void
6964 lpfc_reset_stats(struct Scsi_Host *shost)
6965 {
6966 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6967 struct lpfc_hba *phba = vport->phba;
6968 struct lpfc_sli *psli = &phba->sli;
6969 struct lpfc_lnk_stat *lso = &psli->lnk_stat_offsets;
6970 LPFC_MBOXQ_t *pmboxq;
6971 MAILBOX_t *pmb;
6972 int rc = 0;
6973
6974 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
6975 return;
6976
6977 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6978 if (!pmboxq)
6979 return;
6980 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
6981
6982 pmb = &pmboxq->u.mb;
6983 pmb->mbxCommand = MBX_READ_STATUS;
6984 pmb->mbxOwner = OWN_HOST;
6985 pmb->un.varWords[0] = 0x1;
6986 pmboxq->ctx_buf = NULL;
6987 pmboxq->vport = vport;
6988
6989 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
6990 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
6991 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
6992 if (rc != MBX_SUCCESS) {
6993 mempool_free(pmboxq, phba->mbox_mem_pool);
6994 return;
6995 }
6996 } else {
6997 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
6998 if (rc != MBX_SUCCESS) {
6999 if (rc != MBX_TIMEOUT)
7000 mempool_free(pmboxq, phba->mbox_mem_pool);
7001 return;
7002 }
7003 }
7004
7005 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
7006 pmb->mbxCommand = MBX_READ_LNK_STAT;
7007 pmb->mbxOwner = OWN_HOST;
7008 pmboxq->ctx_buf = NULL;
7009 pmboxq->vport = vport;
7010
7011 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
7012 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
7013 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
7014 if (rc != MBX_SUCCESS) {
7015 mempool_free(pmboxq, phba->mbox_mem_pool);
7016 return;
7017 }
7018 } else {
7019 rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
7020 if (rc != MBX_SUCCESS) {
7021 if (rc != MBX_TIMEOUT)
7022 mempool_free(pmboxq, phba->mbox_mem_pool);
7023 return;
7024 }
7025 }
7026
7027 lso->link_failure_count = pmb->un.varRdLnk.linkFailureCnt;
7028 lso->loss_of_sync_count = pmb->un.varRdLnk.lossSyncCnt;
7029 lso->loss_of_signal_count = pmb->un.varRdLnk.lossSignalCnt;
7030 lso->prim_seq_protocol_err_count = pmb->un.varRdLnk.primSeqErrCnt;
7031 lso->invalid_tx_word_count = pmb->un.varRdLnk.invalidXmitWord;
7032 lso->invalid_crc_count = pmb->un.varRdLnk.crcCnt;
7033 lso->error_frames = pmb->un.varRdLnk.crcCnt;
7034 if (phba->hba_flag & HBA_FCOE_MODE)
7035 lso->link_events = (phba->link_events >> 1);
7036 else
7037 lso->link_events = (phba->fc_eventTag >> 1);
7038
7039 atomic64_set(&phba->cgn_acqe_stat.warn, 0);
7040 atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
7041
7042 memset(&shost_to_fc_host(shost)->fpin_stats, 0,
7043 sizeof(shost_to_fc_host(shost)->fpin_stats));
7044
7045 psli->stats_start = ktime_get_seconds();
7046
7047 mempool_free(pmboxq, phba->mbox_mem_pool);
7048
7049 return;
7050 }
7051
7052
7053
7054
7055
7056
7057
7058
7059
7060
7061
7062
7063
7064
7065 static struct lpfc_nodelist *
7066 lpfc_get_node_by_target(struct scsi_target *starget)
7067 {
7068 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
7069 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
7070 struct lpfc_nodelist *ndlp;
7071
7072 spin_lock_irq(shost->host_lock);
7073
7074 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
7075 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
7076 starget->id == ndlp->nlp_sid) {
7077 spin_unlock_irq(shost->host_lock);
7078 return ndlp;
7079 }
7080 }
7081 spin_unlock_irq(shost->host_lock);
7082 return NULL;
7083 }
7084
7085
7086
7087
7088
7089 static void
7090 lpfc_get_starget_port_id(struct scsi_target *starget)
7091 {
7092 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7093
7094 fc_starget_port_id(starget) = ndlp ? ndlp->nlp_DID : -1;
7095 }
7096
7097
7098
7099
7100
7101
7102
7103 static void
7104 lpfc_get_starget_node_name(struct scsi_target *starget)
7105 {
7106 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7107
7108 fc_starget_node_name(starget) =
7109 ndlp ? wwn_to_u64(ndlp->nlp_nodename.u.wwn) : 0;
7110 }
7111
7112
7113
7114
7115
7116
7117
7118 static void
7119 lpfc_get_starget_port_name(struct scsi_target *starget)
7120 {
7121 struct lpfc_nodelist *ndlp = lpfc_get_node_by_target(starget);
7122
7123 fc_starget_port_name(starget) =
7124 ndlp ? wwn_to_u64(ndlp->nlp_portname.u.wwn) : 0;
7125 }
7126
7127
7128
7129
7130
7131
7132
7133
7134
7135
7136 static void
7137 lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
7138 {
7139 struct lpfc_rport_data *rdata = rport->dd_data;
7140 struct lpfc_nodelist *ndlp = rdata->pnode;
7141 #if (IS_ENABLED(CONFIG_NVME_FC))
7142 struct lpfc_nvme_rport *nrport = NULL;
7143 #endif
7144
7145 if (timeout)
7146 rport->dev_loss_tmo = timeout;
7147 else
7148 rport->dev_loss_tmo = 1;
7149
7150 if (!ndlp) {
7151 dev_info(&rport->dev, "Cannot find remote node to "
7152 "set rport dev loss tmo, port_id x%x\n",
7153 rport->port_id);
7154 return;
7155 }
7156
7157 #if (IS_ENABLED(CONFIG_NVME_FC))
7158 nrport = lpfc_ndlp_get_nrport(ndlp);
7159
7160 if (nrport && nrport->remoteport)
7161 nvme_fc_set_remoteport_devloss(nrport->remoteport,
7162 rport->dev_loss_tmo);
7163 #endif
7164 }
7165
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175
7176
7177
7178 #define lpfc_rport_show_function(field, format_string, sz, cast) \
7179 static ssize_t \
7180 lpfc_show_rport_##field (struct device *dev, \
7181 struct device_attribute *attr, \
7182 char *buf) \
7183 { \
7184 struct fc_rport *rport = transport_class_to_rport(dev); \
7185 struct lpfc_rport_data *rdata = rport->hostdata; \
7186 return scnprintf(buf, sz, format_string, \
7187 (rdata->target) ? cast rdata->target->field : 0); \
7188 }
7189
7190 #define lpfc_rport_rd_attr(field, format_string, sz) \
7191 lpfc_rport_show_function(field, format_string, sz, ) \
7192 static FC_RPORT_ATTR(field, S_IRUGO, lpfc_show_rport_##field, NULL)
7193
7194
7195
7196
7197
7198
7199
7200
7201
7202
7203 static void
7204 lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
7205 {
7206 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
7207
7208 if (vport->port_state == LPFC_VPORT_READY)
7209 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
7210 }
7211
7212
7213
7214
7215
7216
7217
7218
7219
7220
7221
7222 static void
7223 lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
7224 {
7225 phba->cfg_log_verbose = verbose;
7226 }
7227
7228 struct fc_function_template lpfc_transport_functions = {
7229
7230 .show_host_node_name = 1,
7231 .show_host_port_name = 1,
7232 .show_host_supported_classes = 1,
7233 .show_host_supported_fc4s = 1,
7234 .show_host_supported_speeds = 1,
7235 .show_host_maxframe_size = 1,
7236
7237 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
7238 .show_host_symbolic_name = 1,
7239
7240
7241 .get_host_port_id = lpfc_get_host_port_id,
7242 .show_host_port_id = 1,
7243
7244 .get_host_port_type = lpfc_get_host_port_type,
7245 .show_host_port_type = 1,
7246
7247 .get_host_port_state = lpfc_get_host_port_state,
7248 .show_host_port_state = 1,
7249
7250
7251 .show_host_active_fc4s = 1,
7252
7253 .get_host_speed = lpfc_get_host_speed,
7254 .show_host_speed = 1,
7255
7256 .get_host_fabric_name = lpfc_get_host_fabric_name,
7257 .show_host_fabric_name = 1,
7258
7259
7260
7261
7262
7263
7264 .get_fc_host_stats = lpfc_get_stats,
7265 .reset_fc_host_stats = lpfc_reset_stats,
7266
7267 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
7268 .show_rport_maxframe_size = 1,
7269 .show_rport_supported_classes = 1,
7270
7271 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7272 .show_rport_dev_loss_tmo = 1,
7273
7274 .get_starget_port_id = lpfc_get_starget_port_id,
7275 .show_starget_port_id = 1,
7276
7277 .get_starget_node_name = lpfc_get_starget_node_name,
7278 .show_starget_node_name = 1,
7279
7280 .get_starget_port_name = lpfc_get_starget_port_name,
7281 .show_starget_port_name = 1,
7282
7283 .issue_fc_host_lip = lpfc_issue_lip,
7284 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7285 .terminate_rport_io = lpfc_terminate_rport_io,
7286
7287 .dd_fcvport_size = sizeof(struct lpfc_vport *),
7288
7289 .vport_disable = lpfc_vport_disable,
7290
7291 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7292
7293 .bsg_request = lpfc_bsg_request,
7294 .bsg_timeout = lpfc_bsg_timeout,
7295 };
7296
7297 struct fc_function_template lpfc_vport_transport_functions = {
7298
7299 .show_host_node_name = 1,
7300 .show_host_port_name = 1,
7301 .show_host_supported_classes = 1,
7302 .show_host_supported_fc4s = 1,
7303 .show_host_supported_speeds = 1,
7304 .show_host_maxframe_size = 1,
7305
7306 .get_host_symbolic_name = lpfc_get_host_symbolic_name,
7307 .show_host_symbolic_name = 1,
7308
7309
7310 .get_host_port_id = lpfc_get_host_port_id,
7311 .show_host_port_id = 1,
7312
7313 .get_host_port_type = lpfc_get_host_port_type,
7314 .show_host_port_type = 1,
7315
7316 .get_host_port_state = lpfc_get_host_port_state,
7317 .show_host_port_state = 1,
7318
7319
7320 .show_host_active_fc4s = 1,
7321
7322 .get_host_speed = lpfc_get_host_speed,
7323 .show_host_speed = 1,
7324
7325 .get_host_fabric_name = lpfc_get_host_fabric_name,
7326 .show_host_fabric_name = 1,
7327
7328
7329
7330
7331
7332
7333 .get_fc_host_stats = lpfc_get_stats,
7334 .reset_fc_host_stats = lpfc_reset_stats,
7335
7336 .dd_fcrport_size = sizeof(struct lpfc_rport_data),
7337 .show_rport_maxframe_size = 1,
7338 .show_rport_supported_classes = 1,
7339
7340 .set_rport_dev_loss_tmo = lpfc_set_rport_loss_tmo,
7341 .show_rport_dev_loss_tmo = 1,
7342
7343 .get_starget_port_id = lpfc_get_starget_port_id,
7344 .show_starget_port_id = 1,
7345
7346 .get_starget_node_name = lpfc_get_starget_node_name,
7347 .show_starget_node_name = 1,
7348
7349 .get_starget_port_name = lpfc_get_starget_port_name,
7350 .show_starget_port_name = 1,
7351
7352 .dev_loss_tmo_callbk = lpfc_dev_loss_tmo_callbk,
7353 .terminate_rport_io = lpfc_terminate_rport_io,
7354
7355 .vport_disable = lpfc_vport_disable,
7356
7357 .set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
7358 };
7359
7360
7361
7362
7363
7364
7365 static void
7366 lpfc_get_hba_function_mode(struct lpfc_hba *phba)
7367 {
7368
7369 switch (phba->pcidev->device) {
7370 case PCI_DEVICE_ID_SKYHAWK:
7371 case PCI_DEVICE_ID_SKYHAWK_VF:
7372 case PCI_DEVICE_ID_LANCER_FCOE:
7373 case PCI_DEVICE_ID_LANCER_FCOE_VF:
7374 case PCI_DEVICE_ID_ZEPHYR_DCSP:
7375 case PCI_DEVICE_ID_TIGERSHARK:
7376 case PCI_DEVICE_ID_TOMCAT:
7377 phba->hba_flag |= HBA_FCOE_MODE;
7378 break;
7379 default:
7380
7381 phba->hba_flag &= ~HBA_FCOE_MODE;
7382 }
7383 }
7384
7385
7386
7387
7388
7389 void
7390 lpfc_get_cfgparam(struct lpfc_hba *phba)
7391 {
7392 lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
7393 lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
7394 lpfc_ns_query_init(phba, lpfc_ns_query);
7395 lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
7396 lpfc_cr_delay_init(phba, lpfc_cr_delay);
7397 lpfc_cr_count_init(phba, lpfc_cr_count);
7398 lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
7399 lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl);
7400 lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type);
7401 lpfc_ack0_init(phba, lpfc_ack0);
7402 lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing);
7403 lpfc_topology_init(phba, lpfc_topology);
7404 lpfc_link_speed_init(phba, lpfc_link_speed);
7405 lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
7406 lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo);
7407 lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
7408 lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy);
7409 lpfc_enable_rrq_init(phba, lpfc_enable_rrq);
7410 lpfc_fcp_wait_abts_rsp_init(phba, lpfc_fcp_wait_abts_rsp);
7411 lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
7412 lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
7413 lpfc_use_msi_init(phba, lpfc_use_msi);
7414 lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
7415 lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd);
7416 lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
7417 lpfc_force_rscn_init(phba, lpfc_force_rscn);
7418 lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold);
7419 lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit);
7420 lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
7421 lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
7422 lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
7423
7424 lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
7425
7426 lpfc_max_vmid_init(phba, lpfc_max_vmid);
7427 lpfc_vmid_inactivity_timeout_init(phba, lpfc_vmid_inactivity_timeout);
7428 lpfc_vmid_app_header_init(phba, lpfc_vmid_app_header);
7429 lpfc_vmid_priority_tagging_init(phba, lpfc_vmid_priority_tagging);
7430 if (phba->sli_rev != LPFC_SLI_REV4)
7431 phba->cfg_EnableXLane = 0;
7432 lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
7433
7434 memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
7435 memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
7436 phba->cfg_oas_lun_state = 0;
7437 phba->cfg_oas_lun_status = 0;
7438 phba->cfg_oas_flags = 0;
7439 phba->cfg_oas_priority = 0;
7440 lpfc_enable_bg_init(phba, lpfc_enable_bg);
7441 lpfc_prot_mask_init(phba, lpfc_prot_mask);
7442 lpfc_prot_guard_init(phba, lpfc_prot_guard);
7443 if (phba->sli_rev == LPFC_SLI_REV4)
7444 phba->cfg_poll = 0;
7445 else
7446 phba->cfg_poll = lpfc_poll;
7447
7448
7449 lpfc_get_hba_function_mode(phba);
7450
7451
7452 if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) {
7453 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7454 "0581 BlockGuard feature not supported\n");
7455
7456 phba->cfg_enable_bg = 0;
7457 } else if (phba->cfg_enable_bg) {
7458 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
7459 }
7460
7461 lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
7462
7463 lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
7464 lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
7465 lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
7466
7467
7468 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
7469 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
7470 lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
7471 lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
7472 lpfc_irq_chann_init(phba, lpfc_irq_chann);
7473 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
7474 lpfc_enable_dpp_init(phba, lpfc_enable_dpp);
7475 lpfc_enable_mi_init(phba, lpfc_enable_mi);
7476
7477 phba->cgn_p.cgn_param_mode = LPFC_CFG_OFF;
7478 phba->cmf_active_mode = LPFC_CFG_OFF;
7479 if (lpfc_fabric_cgn_frequency > EDC_CG_SIGFREQ_CNT_MAX ||
7480 lpfc_fabric_cgn_frequency < EDC_CG_SIGFREQ_CNT_MIN)
7481 lpfc_fabric_cgn_frequency = 100;
7482
7483 if (phba->sli_rev != LPFC_SLI_REV4) {
7484
7485 phba->nvmet_support = 0;
7486 phba->cfg_nvmet_mrq = 0;
7487 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
7488 phba->cfg_enable_bbcr = 0;
7489 phba->cfg_xri_rebalancing = 0;
7490 } else {
7491
7492 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
7493 phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
7494 }
7495
7496 phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1;
7497
7498 phba->cfg_enable_pbde = 0;
7499
7500
7501 if (phba->cfg_hdw_queue == 0)
7502 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7503 if (phba->cfg_irq_chann == 0)
7504 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7505 if (phba->cfg_irq_chann > phba->cfg_hdw_queue &&
7506 phba->sli_rev == LPFC_SLI_REV4)
7507 phba->cfg_irq_chann = phba->cfg_hdw_queue;
7508
7509 lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
7510 lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
7511 lpfc_aer_support_init(phba, lpfc_aer_support);
7512 lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
7513 lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
7514 lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
7515 lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
7516 lpfc_sli_mode_init(phba, lpfc_sli_mode);
7517 lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
7518 lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
7519 lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
7520 lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
7521
7522 return;
7523 }
7524
7525
7526
7527
7528
7529
7530 void
7531 lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
7532 {
7533 int logit = 0;
7534
7535 if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) {
7536 phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu;
7537 logit = 1;
7538 }
7539 if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) {
7540 phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu;
7541 logit = 1;
7542 }
7543 if (phba->cfg_irq_chann > phba->cfg_hdw_queue) {
7544 phba->cfg_irq_chann = phba->cfg_hdw_queue;
7545 logit = 1;
7546 }
7547 if (logit)
7548 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7549 "2006 Reducing Queues - CPU limitation: "
7550 "IRQ %d HDWQ %d\n",
7551 phba->cfg_irq_chann,
7552 phba->cfg_hdw_queue);
7553
7554 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
7555 phba->nvmet_support) {
7556 phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
7557
7558 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
7559 "6013 %s x%x fb_size x%x, fb_max x%x\n",
7560 "NVME Target PRLI ACC enable_fb ",
7561 phba->cfg_nvme_enable_fb,
7562 phba->cfg_nvmet_fb_size,
7563 LPFC_NVMET_FB_SZ_MAX);
7564
7565 if (phba->cfg_nvme_enable_fb == 0)
7566 phba->cfg_nvmet_fb_size = 0;
7567 else {
7568 if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX)
7569 phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
7570 }
7571
7572 if (!phba->cfg_nvmet_mrq)
7573 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7574
7575
7576 if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) {
7577 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
7578 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
7579 "6018 Adjust lpfc_nvmet_mrq to %d\n",
7580 phba->cfg_nvmet_mrq);
7581 }
7582 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
7583 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
7584
7585 } else {
7586
7587 phba->nvmet_support = 0;
7588 phba->cfg_nvmet_mrq = 0;
7589 phba->cfg_nvmet_fb_size = 0;
7590 }
7591 }
7592
7593
7594
7595
7596
7597 void
7598 lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
7599 {
7600 lpfc_log_verbose_init(vport, lpfc_log_verbose);
7601 lpfc_lun_queue_depth_init(vport, lpfc_lun_queue_depth);
7602 lpfc_tgt_queue_depth_init(vport, lpfc_tgt_queue_depth);
7603 lpfc_devloss_tmo_init(vport, lpfc_devloss_tmo);
7604 lpfc_nodev_tmo_init(vport, lpfc_nodev_tmo);
7605 lpfc_peer_port_login_init(vport, lpfc_peer_port_login);
7606 lpfc_restrict_login_init(vport, lpfc_restrict_login);
7607 lpfc_fcp_class_init(vport, lpfc_fcp_class);
7608 lpfc_use_adisc_init(vport, lpfc_use_adisc);
7609 lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
7610 lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
7611 lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
7612 lpfc_max_luns_init(vport, lpfc_max_luns);
7613 lpfc_scan_down_init(vport, lpfc_scan_down);
7614 lpfc_enable_da_id_init(vport, lpfc_enable_da_id);
7615 return;
7616 }