0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023 #include <linux/interrupt.h>
0024 #include <linux/mempool.h>
0025 #include <linux/pci.h>
0026 #include <linux/slab.h>
0027 #include <linux/delay.h>
0028 #include <linux/list.h>
0029 #include <linux/bsg-lib.h>
0030 #include <linux/vmalloc.h>
0031
0032 #include <scsi/scsi.h>
0033 #include <scsi/scsi_host.h>
0034 #include <scsi/scsi_transport_fc.h>
0035 #include <scsi/scsi_bsg_fc.h>
0036 #include <scsi/fc/fc_fs.h>
0037
0038 #include "lpfc_hw4.h"
0039 #include "lpfc_hw.h"
0040 #include "lpfc_sli.h"
0041 #include "lpfc_sli4.h"
0042 #include "lpfc_nl.h"
0043 #include "lpfc_bsg.h"
0044 #include "lpfc_disc.h"
0045 #include "lpfc_scsi.h"
0046 #include "lpfc.h"
0047 #include "lpfc_logmsg.h"
0048 #include "lpfc_crtn.h"
0049 #include "lpfc_debugfs.h"
0050 #include "lpfc_vport.h"
0051 #include "lpfc_version.h"
0052
0053 struct lpfc_bsg_event {
0054 struct list_head node;
0055 struct kref kref;
0056 wait_queue_head_t wq;
0057
0058
0059 uint32_t type_mask;
0060 uint32_t req_id;
0061 uint32_t reg_id;
0062
0063
0064 unsigned long wait_time_stamp;
0065 int waiting;
0066
0067
0068 struct list_head events_to_get;
0069 struct list_head events_to_see;
0070
0071
0072 void *dd_data;
0073 };
0074
0075 struct lpfc_bsg_iocb {
0076 struct lpfc_iocbq *cmdiocbq;
0077 struct lpfc_dmabuf *rmp;
0078 struct lpfc_nodelist *ndlp;
0079 };
0080
0081 struct lpfc_bsg_mbox {
0082 LPFC_MBOXQ_t *pmboxq;
0083 MAILBOX_t *mb;
0084 struct lpfc_dmabuf *dmabuffers;
0085 uint8_t *ext;
0086 uint32_t mbOffset;
0087 uint32_t inExtWLen;
0088 uint32_t outExtWLen;
0089 };
0090
0091 #define TYPE_EVT 1
0092 #define TYPE_IOCB 2
0093 #define TYPE_MBOX 3
0094 struct bsg_job_data {
0095 uint32_t type;
0096 struct bsg_job *set_job;
0097 union {
0098 struct lpfc_bsg_event *evt;
0099 struct lpfc_bsg_iocb iocb;
0100 struct lpfc_bsg_mbox mbox;
0101 } context_un;
0102 };
0103
0104 struct event_data {
0105 struct list_head node;
0106 uint32_t type;
0107 uint32_t immed_dat;
0108 void *data;
0109 uint32_t len;
0110 };
0111
0112 #define BUF_SZ_4K 4096
0113 #define SLI_CT_ELX_LOOPBACK 0x10
0114
0115 enum ELX_LOOPBACK_CMD {
0116 ELX_LOOPBACK_XRI_SETUP,
0117 ELX_LOOPBACK_DATA,
0118 };
0119
0120 #define ELX_LOOPBACK_HEADER_SZ \
0121 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
0122
0123 struct lpfc_dmabufext {
0124 struct lpfc_dmabuf dma;
0125 uint32_t size;
0126 uint32_t flag;
0127 };
0128
0129 static void
0130 lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
0131 {
0132 struct lpfc_dmabuf *mlast, *next_mlast;
0133
0134 if (mlist) {
0135 list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
0136 list) {
0137 lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
0138 list_del(&mlast->list);
0139 kfree(mlast);
0140 }
0141 lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
0142 kfree(mlist);
0143 }
0144 return;
0145 }
0146
0147 static struct lpfc_dmabuf *
0148 lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
0149 int outbound_buffers, struct ulp_bde64 *bpl,
0150 int *bpl_entries)
0151 {
0152 struct lpfc_dmabuf *mlist = NULL;
0153 struct lpfc_dmabuf *mp;
0154 unsigned int bytes_left = size;
0155
0156
0157 if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
0158 return NULL;
0159
0160
0161 *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
0162 size/LPFC_BPL_SIZE);
0163
0164
0165 while (bytes_left) {
0166
0167 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
0168 if (!mp) {
0169 if (mlist)
0170 lpfc_free_bsg_buffers(phba, mlist);
0171 return NULL;
0172 }
0173
0174 INIT_LIST_HEAD(&mp->list);
0175 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
0176
0177 if (!mp->virt) {
0178 kfree(mp);
0179 if (mlist)
0180 lpfc_free_bsg_buffers(phba, mlist);
0181 return NULL;
0182 }
0183
0184
0185 if (!mlist)
0186 mlist = mp;
0187 else
0188 list_add_tail(&mp->list, &mlist->list);
0189
0190
0191 if (outbound_buffers)
0192 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
0193 else
0194 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
0195 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
0196 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
0197 bpl->tus.f.bdeSize = (uint16_t)
0198 (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
0199 bytes_left);
0200 bytes_left -= bpl->tus.f.bdeSize;
0201 bpl->tus.w = le32_to_cpu(bpl->tus.w);
0202 bpl++;
0203 }
0204 return mlist;
0205 }
0206
0207 static unsigned int
0208 lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
0209 struct bsg_buffer *bsg_buffers,
0210 unsigned int bytes_to_transfer, int to_buffers)
0211 {
0212
0213 struct lpfc_dmabuf *mp;
0214 unsigned int transfer_bytes, bytes_copied = 0;
0215 unsigned int sg_offset, dma_offset;
0216 unsigned char *dma_address, *sg_address;
0217 LIST_HEAD(temp_list);
0218 struct sg_mapping_iter miter;
0219 unsigned long flags;
0220 unsigned int sg_flags = SG_MITER_ATOMIC;
0221 bool sg_valid;
0222
0223 list_splice_init(&dma_buffers->list, &temp_list);
0224 list_add(&dma_buffers->list, &temp_list);
0225 sg_offset = 0;
0226 if (to_buffers)
0227 sg_flags |= SG_MITER_FROM_SG;
0228 else
0229 sg_flags |= SG_MITER_TO_SG;
0230 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
0231 sg_flags);
0232 local_irq_save(flags);
0233 sg_valid = sg_miter_next(&miter);
0234 list_for_each_entry(mp, &temp_list, list) {
0235 dma_offset = 0;
0236 while (bytes_to_transfer && sg_valid &&
0237 (dma_offset < LPFC_BPL_SIZE)) {
0238 dma_address = mp->virt + dma_offset;
0239 if (sg_offset) {
0240
0241 sg_address = miter.addr + sg_offset;
0242 transfer_bytes = miter.length - sg_offset;
0243 } else {
0244 sg_address = miter.addr;
0245 transfer_bytes = miter.length;
0246 }
0247 if (bytes_to_transfer < transfer_bytes)
0248 transfer_bytes = bytes_to_transfer;
0249 if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
0250 transfer_bytes = LPFC_BPL_SIZE - dma_offset;
0251 if (to_buffers)
0252 memcpy(dma_address, sg_address, transfer_bytes);
0253 else
0254 memcpy(sg_address, dma_address, transfer_bytes);
0255 dma_offset += transfer_bytes;
0256 sg_offset += transfer_bytes;
0257 bytes_to_transfer -= transfer_bytes;
0258 bytes_copied += transfer_bytes;
0259 if (sg_offset >= miter.length) {
0260 sg_offset = 0;
0261 sg_valid = sg_miter_next(&miter);
0262 }
0263 }
0264 }
0265 sg_miter_stop(&miter);
0266 local_irq_restore(flags);
0267 list_del_init(&dma_buffers->list);
0268 list_splice(&temp_list, &dma_buffers->list);
0269 return bytes_copied;
0270 }
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289 static void
0290 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
0291 struct lpfc_iocbq *cmdiocbq,
0292 struct lpfc_iocbq *rspiocbq)
0293 {
0294 struct bsg_job_data *dd_data;
0295 struct bsg_job *job;
0296 struct fc_bsg_reply *bsg_reply;
0297 struct lpfc_dmabuf *bmp, *cmp, *rmp;
0298 struct lpfc_nodelist *ndlp;
0299 struct lpfc_bsg_iocb *iocb;
0300 unsigned long flags;
0301 int rc = 0;
0302 u32 ulp_status, ulp_word4, total_data_placed;
0303
0304 dd_data = cmdiocbq->context_un.dd_data;
0305
0306
0307 spin_lock_irqsave(&phba->ct_ev_lock, flags);
0308 job = dd_data->set_job;
0309 if (job) {
0310 bsg_reply = job->reply;
0311
0312 job->dd_data = NULL;
0313 }
0314 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
0315
0316
0317 spin_lock_irqsave(&phba->hbalock, flags);
0318 cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
0319 spin_unlock_irqrestore(&phba->hbalock, flags);
0320
0321 iocb = &dd_data->context_un.iocb;
0322 ndlp = iocb->cmdiocbq->ndlp;
0323 rmp = iocb->rmp;
0324 cmp = cmdiocbq->cmd_dmabuf;
0325 bmp = cmdiocbq->bpl_dmabuf;
0326 ulp_status = get_job_ulpstatus(phba, rspiocbq);
0327 ulp_word4 = get_job_word4(phba, rspiocbq);
0328 total_data_placed = get_job_data_placed(phba, rspiocbq);
0329
0330
0331
0332 if (job) {
0333 if (ulp_status) {
0334 if (ulp_status == IOSTAT_LOCAL_REJECT) {
0335 switch (ulp_word4 & IOERR_PARAM_MASK) {
0336 case IOERR_SEQUENCE_TIMEOUT:
0337 rc = -ETIMEDOUT;
0338 break;
0339 case IOERR_INVALID_RPI:
0340 rc = -EFAULT;
0341 break;
0342 default:
0343 rc = -EACCES;
0344 break;
0345 }
0346 } else {
0347 rc = -EACCES;
0348 }
0349 } else {
0350 bsg_reply->reply_payload_rcv_len =
0351 lpfc_bsg_copy_data(rmp, &job->reply_payload,
0352 total_data_placed, 0);
0353 }
0354 }
0355
0356 lpfc_free_bsg_buffers(phba, cmp);
0357 lpfc_free_bsg_buffers(phba, rmp);
0358 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
0359 kfree(bmp);
0360 lpfc_nlp_put(ndlp);
0361 lpfc_sli_release_iocbq(phba, cmdiocbq);
0362 kfree(dd_data);
0363
0364
0365
0366 if (job) {
0367 bsg_reply->result = rc;
0368 bsg_job_done(job, bsg_reply->result,
0369 bsg_reply->reply_payload_rcv_len);
0370 }
0371 return;
0372 }
0373
0374
0375
0376
0377
0378 static int
0379 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
0380 {
0381 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
0382 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
0383 struct lpfc_hba *phba = vport->phba;
0384 struct lpfc_nodelist *ndlp = rdata->pnode;
0385 struct fc_bsg_reply *bsg_reply = job->reply;
0386 struct ulp_bde64 *bpl = NULL;
0387 struct lpfc_iocbq *cmdiocbq = NULL;
0388 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
0389 int request_nseg, reply_nseg;
0390 u32 num_entry;
0391 struct bsg_job_data *dd_data;
0392 unsigned long flags;
0393 uint32_t creg_val;
0394 int rc = 0;
0395 int iocb_stat;
0396 u16 ulp_context;
0397
0398
0399 bsg_reply->reply_payload_rcv_len = 0;
0400
0401 if (ndlp->nlp_flag & NLP_ELS_SND_MASK)
0402 return -ENODEV;
0403
0404
0405 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
0406 if (!dd_data) {
0407 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
0408 "2733 Failed allocation of dd_data\n");
0409 rc = -ENOMEM;
0410 goto no_dd_data;
0411 }
0412
0413 cmdiocbq = lpfc_sli_get_iocbq(phba);
0414 if (!cmdiocbq) {
0415 rc = -ENOMEM;
0416 goto free_dd;
0417 }
0418
0419 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
0420 if (!bmp) {
0421 rc = -ENOMEM;
0422 goto free_cmdiocbq;
0423 }
0424 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
0425 if (!bmp->virt) {
0426 rc = -ENOMEM;
0427 goto free_bmp;
0428 }
0429
0430 INIT_LIST_HEAD(&bmp->list);
0431
0432 bpl = (struct ulp_bde64 *) bmp->virt;
0433 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
0434 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
0435 1, bpl, &request_nseg);
0436 if (!cmp) {
0437 rc = -ENOMEM;
0438 goto free_bmp;
0439 }
0440 lpfc_bsg_copy_data(cmp, &job->request_payload,
0441 job->request_payload.payload_len, 1);
0442
0443 bpl += request_nseg;
0444 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
0445 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
0446 bpl, &reply_nseg);
0447 if (!rmp) {
0448 rc = -ENOMEM;
0449 goto free_cmp;
0450 }
0451
0452 num_entry = request_nseg + reply_nseg;
0453
0454 if (phba->sli_rev == LPFC_SLI_REV4)
0455 ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
0456 else
0457 ulp_context = ndlp->nlp_rpi;
0458
0459 lpfc_sli_prep_gen_req(phba, cmdiocbq, bmp, ulp_context, num_entry,
0460 phba->fc_ratov * 2);
0461
0462 cmdiocbq->num_bdes = num_entry;
0463 cmdiocbq->vport = phba->pport;
0464 cmdiocbq->cmd_dmabuf = cmp;
0465 cmdiocbq->bpl_dmabuf = bmp;
0466 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
0467
0468 cmdiocbq->cmd_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
0469 cmdiocbq->context_un.dd_data = dd_data;
0470
0471 dd_data->type = TYPE_IOCB;
0472 dd_data->set_job = job;
0473 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
0474 dd_data->context_un.iocb.rmp = rmp;
0475 job->dd_data = dd_data;
0476
0477 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
0478 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
0479 rc = -EIO ;
0480 goto free_rmp;
0481 }
0482 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
0483 writel(creg_val, phba->HCregaddr);
0484 readl(phba->HCregaddr);
0485 }
0486
0487 cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
0488 if (!cmdiocbq->ndlp) {
0489 rc = -ENODEV;
0490 goto free_rmp;
0491 }
0492
0493 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
0494 if (iocb_stat == IOCB_SUCCESS) {
0495 spin_lock_irqsave(&phba->hbalock, flags);
0496
0497 if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
0498
0499 cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
0500 }
0501 spin_unlock_irqrestore(&phba->hbalock, flags);
0502 return 0;
0503 } else if (iocb_stat == IOCB_BUSY) {
0504 rc = -EAGAIN;
0505 } else {
0506 rc = -EIO;
0507 }
0508
0509
0510 lpfc_nlp_put(ndlp);
0511
0512 free_rmp:
0513 lpfc_free_bsg_buffers(phba, rmp);
0514 free_cmp:
0515 lpfc_free_bsg_buffers(phba, cmp);
0516 free_bmp:
0517 if (bmp->virt)
0518 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
0519 kfree(bmp);
0520 free_cmdiocbq:
0521 lpfc_sli_release_iocbq(phba, cmdiocbq);
0522 free_dd:
0523 kfree(dd_data);
0524 no_dd_data:
0525
0526 bsg_reply->result = rc;
0527 job->dd_data = NULL;
0528 return rc;
0529 }
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548 static void
0549 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
0550 struct lpfc_iocbq *cmdiocbq,
0551 struct lpfc_iocbq *rspiocbq)
0552 {
0553 struct bsg_job_data *dd_data;
0554 struct bsg_job *job;
0555 struct fc_bsg_reply *bsg_reply;
0556 struct lpfc_nodelist *ndlp;
0557 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
0558 struct fc_bsg_ctels_reply *els_reply;
0559 uint8_t *rjt_data;
0560 unsigned long flags;
0561 unsigned int rsp_size;
0562 int rc = 0;
0563 u32 ulp_status, ulp_word4, total_data_placed;
0564
0565 dd_data = cmdiocbq->context_un.dd_data;
0566 ndlp = dd_data->context_un.iocb.ndlp;
0567 cmdiocbq->ndlp = ndlp;
0568
0569
0570 spin_lock_irqsave(&phba->ct_ev_lock, flags);
0571 job = dd_data->set_job;
0572 if (job) {
0573 bsg_reply = job->reply;
0574
0575 job->dd_data = NULL;
0576 }
0577 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
0578
0579
0580 spin_lock_irqsave(&phba->hbalock, flags);
0581 cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
0582 spin_unlock_irqrestore(&phba->hbalock, flags);
0583
0584 ulp_status = get_job_ulpstatus(phba, rspiocbq);
0585 ulp_word4 = get_job_word4(phba, rspiocbq);
0586 total_data_placed = get_job_data_placed(phba, rspiocbq);
0587 pcmd = cmdiocbq->cmd_dmabuf;
0588 prsp = (struct lpfc_dmabuf *)pcmd->list.next;
0589
0590
0591
0592
0593
0594 if (job) {
0595 if (ulp_status == IOSTAT_SUCCESS) {
0596 rsp_size = total_data_placed;
0597 bsg_reply->reply_payload_rcv_len =
0598 sg_copy_from_buffer(job->reply_payload.sg_list,
0599 job->reply_payload.sg_cnt,
0600 prsp->virt,
0601 rsp_size);
0602 } else if (ulp_status == IOSTAT_LS_RJT) {
0603 bsg_reply->reply_payload_rcv_len =
0604 sizeof(struct fc_bsg_ctels_reply);
0605
0606 rjt_data = (uint8_t *)&ulp_word4;
0607 els_reply = &bsg_reply->reply_data.ctels_reply;
0608 els_reply->status = FC_CTELS_STATUS_REJECT;
0609 els_reply->rjt_data.action = rjt_data[3];
0610 els_reply->rjt_data.reason_code = rjt_data[2];
0611 els_reply->rjt_data.reason_explanation = rjt_data[1];
0612 els_reply->rjt_data.vendor_unique = rjt_data[0];
0613 } else if (ulp_status == IOSTAT_LOCAL_REJECT &&
0614 (ulp_word4 & IOERR_PARAM_MASK) ==
0615 IOERR_SEQUENCE_TIMEOUT) {
0616 rc = -ETIMEDOUT;
0617 } else {
0618 rc = -EIO;
0619 }
0620 }
0621
0622 lpfc_els_free_iocb(phba, cmdiocbq);
0623
0624 lpfc_nlp_put(ndlp);
0625 kfree(dd_data);
0626
0627
0628
0629 if (job) {
0630 bsg_reply->result = rc;
0631 bsg_job_done(job, bsg_reply->result,
0632 bsg_reply->reply_payload_rcv_len);
0633 }
0634 return;
0635 }
0636
0637
0638
0639
0640
0641 static int
0642 lpfc_bsg_rport_els(struct bsg_job *job)
0643 {
0644 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
0645 struct lpfc_hba *phba = vport->phba;
0646 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
0647 struct lpfc_nodelist *ndlp = rdata->pnode;
0648 struct fc_bsg_request *bsg_request = job->request;
0649 struct fc_bsg_reply *bsg_reply = job->reply;
0650 uint32_t elscmd;
0651 uint32_t cmdsize;
0652 struct lpfc_iocbq *cmdiocbq;
0653 uint16_t rpi = 0;
0654 struct bsg_job_data *dd_data;
0655 unsigned long flags;
0656 uint32_t creg_val;
0657 int rc = 0;
0658
0659
0660 bsg_reply->reply_payload_rcv_len = 0;
0661
0662
0663
0664
0665
0666 if (job->request_payload.payload_len > FCELSSIZE) {
0667 rc = -EINVAL;
0668 goto no_dd_data;
0669 }
0670
0671
0672 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
0673 if (!dd_data) {
0674 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
0675 "2735 Failed allocation of dd_data\n");
0676 rc = -ENOMEM;
0677 goto no_dd_data;
0678 }
0679
0680 elscmd = bsg_request->rqst_data.r_els.els_code;
0681 cmdsize = job->request_payload.payload_len;
0682
0683 if (!lpfc_nlp_get(ndlp)) {
0684 rc = -ENODEV;
0685 goto free_dd_data;
0686 }
0687
0688
0689
0690
0691
0692
0693 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
0694 ndlp->nlp_DID, elscmd);
0695 if (!cmdiocbq) {
0696 rc = -EIO;
0697 goto release_ndlp;
0698 }
0699
0700
0701 sg_copy_to_buffer(job->request_payload.sg_list,
0702 job->request_payload.sg_cnt,
0703 cmdiocbq->cmd_dmabuf->virt,
0704 cmdsize);
0705
0706 rpi = ndlp->nlp_rpi;
0707
0708 if (phba->sli_rev == LPFC_SLI_REV4)
0709 bf_set(wqe_ctxt_tag, &cmdiocbq->wqe.generic.wqe_com,
0710 phba->sli4_hba.rpi_ids[rpi]);
0711 else
0712 cmdiocbq->iocb.ulpContext = rpi;
0713 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
0714 cmdiocbq->context_un.dd_data = dd_data;
0715 cmdiocbq->ndlp = ndlp;
0716 cmdiocbq->cmd_cmpl = lpfc_bsg_rport_els_cmp;
0717 dd_data->type = TYPE_IOCB;
0718 dd_data->set_job = job;
0719 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
0720 dd_data->context_un.iocb.ndlp = ndlp;
0721 dd_data->context_un.iocb.rmp = NULL;
0722 job->dd_data = dd_data;
0723
0724 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
0725 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
0726 rc = -EIO;
0727 goto linkdown_err;
0728 }
0729 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
0730 writel(creg_val, phba->HCregaddr);
0731 readl(phba->HCregaddr);
0732 }
0733
0734 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
0735 if (rc == IOCB_SUCCESS) {
0736 spin_lock_irqsave(&phba->hbalock, flags);
0737
0738 if (cmdiocbq->cmd_flag & LPFC_IO_LIBDFC) {
0739
0740 cmdiocbq->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
0741 }
0742 spin_unlock_irqrestore(&phba->hbalock, flags);
0743 return 0;
0744 } else if (rc == IOCB_BUSY) {
0745 rc = -EAGAIN;
0746 } else {
0747 rc = -EIO;
0748 }
0749
0750
0751
0752 linkdown_err:
0753 lpfc_els_free_iocb(phba, cmdiocbq);
0754
0755 release_ndlp:
0756 lpfc_nlp_put(ndlp);
0757
0758 free_dd_data:
0759 kfree(dd_data);
0760
0761 no_dd_data:
0762
0763 bsg_reply->result = rc;
0764 job->dd_data = NULL;
0765 return rc;
0766 }
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776 static void
0777 lpfc_bsg_event_free(struct kref *kref)
0778 {
0779 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
0780 kref);
0781 struct event_data *ed;
0782
0783 list_del(&evt->node);
0784
0785 while (!list_empty(&evt->events_to_get)) {
0786 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
0787 list_del(&ed->node);
0788 kfree(ed->data);
0789 kfree(ed);
0790 }
0791
0792 while (!list_empty(&evt->events_to_see)) {
0793 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
0794 list_del(&ed->node);
0795 kfree(ed->data);
0796 kfree(ed);
0797 }
0798
0799 kfree(evt->dd_data);
0800 kfree(evt);
0801 }
0802
0803
0804
0805
0806
0807 static inline void
0808 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
0809 {
0810 kref_get(&evt->kref);
0811 }
0812
0813
0814
0815
0816
0817 static inline void
0818 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
0819 {
0820 kref_put(&evt->kref, lpfc_bsg_event_free);
0821 }
0822
0823
0824
0825
0826
0827
0828
0829 static struct lpfc_bsg_event *
0830 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
0831 {
0832 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
0833
0834 if (!evt)
0835 return NULL;
0836
0837 INIT_LIST_HEAD(&evt->events_to_get);
0838 INIT_LIST_HEAD(&evt->events_to_see);
0839 evt->type_mask = ev_mask;
0840 evt->req_id = ev_req_id;
0841 evt->reg_id = ev_reg_id;
0842 evt->wait_time_stamp = jiffies;
0843 evt->dd_data = NULL;
0844 init_waitqueue_head(&evt->wq);
0845 kref_init(&evt->kref);
0846 return evt;
0847 }
0848
0849
0850
0851
0852
0853
0854 static int
0855 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
0856 {
0857 struct lpfc_dmabufext *mlast;
0858 struct pci_dev *pcidev;
0859 struct list_head head, *curr, *next;
0860
0861 if ((!mlist) || (!lpfc_is_link_up(phba) &&
0862 (phba->link_flag & LS_LOOPBACK_MODE))) {
0863 return 0;
0864 }
0865
0866 pcidev = phba->pcidev;
0867 list_add_tail(&head, &mlist->dma.list);
0868
0869 list_for_each_safe(curr, next, &head) {
0870 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
0871 if (mlast->dma.virt)
0872 dma_free_coherent(&pcidev->dev,
0873 mlast->size,
0874 mlast->dma.virt,
0875 mlast->dma.phys);
0876 kfree(mlast);
0877 }
0878 return 0;
0879 }
0880
0881
0882
0883
0884
0885
0886
0887 int
0888 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
0889 struct lpfc_iocbq *piocbq)
0890 {
0891 uint32_t evt_req_id = 0;
0892 uint32_t cmd;
0893 struct lpfc_dmabuf *dmabuf = NULL;
0894 struct lpfc_bsg_event *evt;
0895 struct event_data *evt_dat = NULL;
0896 struct lpfc_iocbq *iocbq;
0897 IOCB_t *iocb = NULL;
0898 size_t offset = 0;
0899 struct list_head head;
0900 struct ulp_bde64 *bde;
0901 dma_addr_t dma_addr;
0902 int i;
0903 struct lpfc_dmabuf *bdeBuf1 = piocbq->cmd_dmabuf;
0904 struct lpfc_dmabuf *bdeBuf2 = piocbq->bpl_dmabuf;
0905 struct lpfc_sli_ct_request *ct_req;
0906 struct bsg_job *job = NULL;
0907 struct fc_bsg_reply *bsg_reply;
0908 struct bsg_job_data *dd_data = NULL;
0909 unsigned long flags;
0910 int size = 0;
0911 u32 bde_count = 0;
0912
0913 INIT_LIST_HEAD(&head);
0914 list_add_tail(&head, &piocbq->list);
0915
0916 ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt;
0917 evt_req_id = ct_req->FsType;
0918 cmd = ct_req->CommandResponse.bits.CmdRsp;
0919
0920 spin_lock_irqsave(&phba->ct_ev_lock, flags);
0921 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
0922 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
0923 evt->req_id != evt_req_id)
0924 continue;
0925
0926 lpfc_bsg_event_ref(evt);
0927 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
0928 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
0929 if (evt_dat == NULL) {
0930 spin_lock_irqsave(&phba->ct_ev_lock, flags);
0931 lpfc_bsg_event_unref(evt);
0932 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
0933 "2614 Memory allocation failed for "
0934 "CT event\n");
0935 break;
0936 }
0937
0938 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
0939
0940 iocbq = list_entry(head.prev, typeof(*iocbq), list);
0941 if (phba->sli_rev == LPFC_SLI_REV4)
0942 evt_dat->len = iocbq->wcqe_cmpl.total_data_placed;
0943 else
0944 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
0945 } else {
0946 list_for_each_entry(iocbq, &head, list) {
0947 iocb = &iocbq->iocb;
0948 for (i = 0; i < iocb->ulpBdeCount;
0949 i++)
0950 evt_dat->len +=
0951 iocb->un.cont64[i].tus.f.bdeSize;
0952 }
0953 }
0954
0955 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
0956 if (evt_dat->data == NULL) {
0957 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
0958 "2615 Memory allocation failed for "
0959 "CT event data, size %d\n",
0960 evt_dat->len);
0961 kfree(evt_dat);
0962 spin_lock_irqsave(&phba->ct_ev_lock, flags);
0963 lpfc_bsg_event_unref(evt);
0964 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
0965 goto error_ct_unsol_exit;
0966 }
0967
0968 list_for_each_entry(iocbq, &head, list) {
0969 size = 0;
0970 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
0971 bdeBuf1 = iocbq->cmd_dmabuf;
0972 bdeBuf2 = iocbq->bpl_dmabuf;
0973 }
0974 if (phba->sli_rev == LPFC_SLI_REV4)
0975 bde_count = iocbq->wcqe_cmpl.word3;
0976 else
0977 bde_count = iocbq->iocb.ulpBdeCount;
0978 for (i = 0; i < bde_count; i++) {
0979 if (phba->sli3_options &
0980 LPFC_SLI3_HBQ_ENABLED) {
0981 if (i == 0) {
0982 size = iocbq->wqe.gen_req.bde.tus.f.bdeSize;
0983 dmabuf = bdeBuf1;
0984 } else if (i == 1) {
0985 size = iocbq->unsol_rcv_len;
0986 dmabuf = bdeBuf2;
0987 }
0988 if ((offset + size) > evt_dat->len)
0989 size = evt_dat->len - offset;
0990 } else {
0991 size = iocbq->iocb.un.cont64[i].
0992 tus.f.bdeSize;
0993 bde = &iocbq->iocb.un.cont64[i];
0994 dma_addr = getPaddr(bde->addrHigh,
0995 bde->addrLow);
0996 dmabuf = lpfc_sli_ringpostbuf_get(phba,
0997 pring, dma_addr);
0998 }
0999 if (!dmabuf) {
1000 lpfc_printf_log(phba, KERN_ERR,
1001 LOG_LIBDFC, "2616 No dmabuf "
1002 "found for iocbq x%px\n",
1003 iocbq);
1004 kfree(evt_dat->data);
1005 kfree(evt_dat);
1006 spin_lock_irqsave(&phba->ct_ev_lock,
1007 flags);
1008 lpfc_bsg_event_unref(evt);
1009 spin_unlock_irqrestore(
1010 &phba->ct_ev_lock, flags);
1011 goto error_ct_unsol_exit;
1012 }
1013 memcpy((char *)(evt_dat->data) + offset,
1014 dmabuf->virt, size);
1015 offset += size;
1016 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
1017 !(phba->sli3_options &
1018 LPFC_SLI3_HBQ_ENABLED)) {
1019 lpfc_sli_ringpostbuf_put(phba, pring,
1020 dmabuf);
1021 } else {
1022 switch (cmd) {
1023 case ELX_LOOPBACK_DATA:
1024 if (phba->sli_rev <
1025 LPFC_SLI_REV4)
1026 diag_cmd_data_free(phba,
1027 (struct lpfc_dmabufext
1028 *)dmabuf);
1029 break;
1030 case ELX_LOOPBACK_XRI_SETUP:
1031 if ((phba->sli_rev ==
1032 LPFC_SLI_REV2) ||
1033 (phba->sli3_options &
1034 LPFC_SLI3_HBQ_ENABLED
1035 )) {
1036 lpfc_in_buf_free(phba,
1037 dmabuf);
1038 } else {
1039 lpfc_sli3_post_buffer(phba,
1040 pring,
1041 1);
1042 }
1043 break;
1044 default:
1045 if (!(phba->sli3_options &
1046 LPFC_SLI3_HBQ_ENABLED))
1047 lpfc_sli3_post_buffer(phba,
1048 pring,
1049 1);
1050 break;
1051 }
1052 }
1053 }
1054 }
1055
1056 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1057 if (phba->sli_rev == LPFC_SLI_REV4) {
1058 evt_dat->immed_dat = phba->ctx_idx;
1059 phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
1060
1061 if (phba->ct_ctx[evt_dat->immed_dat].valid ==
1062 UNSOL_VALID)
1063 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1064 "2717 CT context array entry "
1065 "[%d] over-run: oxid:x%x, "
1066 "sid:x%x\n", phba->ctx_idx,
1067 phba->ct_ctx[
1068 evt_dat->immed_dat].oxid,
1069 phba->ct_ctx[
1070 evt_dat->immed_dat].SID);
1071 phba->ct_ctx[evt_dat->immed_dat].rxid =
1072 get_job_ulpcontext(phba, piocbq);
1073 phba->ct_ctx[evt_dat->immed_dat].oxid =
1074 get_job_rcvoxid(phba, piocbq);
1075 phba->ct_ctx[evt_dat->immed_dat].SID =
1076 bf_get(wqe_els_did,
1077 &piocbq->wqe.xmit_els_rsp.wqe_dest);
1078 phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
1079 } else
1080 evt_dat->immed_dat = get_job_ulpcontext(phba, piocbq);
1081
1082 evt_dat->type = FC_REG_CT_EVENT;
1083 list_add(&evt_dat->node, &evt->events_to_see);
1084 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
1085 wake_up_interruptible(&evt->wq);
1086 lpfc_bsg_event_unref(evt);
1087 break;
1088 }
1089
1090 list_move(evt->events_to_see.prev, &evt->events_to_get);
1091
1092 dd_data = (struct bsg_job_data *)evt->dd_data;
1093 job = dd_data->set_job;
1094 dd_data->set_job = NULL;
1095 lpfc_bsg_event_unref(evt);
1096 if (job) {
1097 bsg_reply = job->reply;
1098 bsg_reply->reply_payload_rcv_len = size;
1099
1100 bsg_reply->result = 0;
1101 job->dd_data = NULL;
1102
1103 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1104 bsg_job_done(job, bsg_reply->result,
1105 bsg_reply->reply_payload_rcv_len);
1106 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1107 }
1108 }
1109 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1110
1111 error_ct_unsol_exit:
1112 if (!list_empty(&head))
1113 list_del(&head);
1114 if ((phba->sli_rev < LPFC_SLI_REV4) &&
1115 (evt_req_id == SLI_CT_ELX_LOOPBACK))
1116 return 0;
1117 return 1;
1118 }
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132 int
1133 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1134 {
1135 struct fc_frame_header fc_hdr;
1136 struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
1137 int ctx_idx, handled = 0;
1138 uint16_t oxid, rxid;
1139 uint32_t sid;
1140
1141 memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
1142 sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
1143 oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
1144 rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
1145
1146 for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
1147 if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
1148 continue;
1149 if (phba->ct_ctx[ctx_idx].rxid != rxid)
1150 continue;
1151 if (phba->ct_ctx[ctx_idx].oxid != oxid)
1152 continue;
1153 if (phba->ct_ctx[ctx_idx].SID != sid)
1154 continue;
1155 phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
1156 handled = 1;
1157 }
1158 return handled;
1159 }
1160
1161
1162
1163
1164
1165 static int
1166 lpfc_bsg_hba_set_event(struct bsg_job *job)
1167 {
1168 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1169 struct lpfc_hba *phba = vport->phba;
1170 struct fc_bsg_request *bsg_request = job->request;
1171 struct set_ct_event *event_req;
1172 struct lpfc_bsg_event *evt;
1173 int rc = 0;
1174 struct bsg_job_data *dd_data = NULL;
1175 uint32_t ev_mask;
1176 unsigned long flags;
1177
1178 if (job->request_len <
1179 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1180 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1181 "2612 Received SET_CT_EVENT below minimum "
1182 "size\n");
1183 rc = -EINVAL;
1184 goto job_error;
1185 }
1186
1187 event_req = (struct set_ct_event *)
1188 bsg_request->rqst_data.h_vendor.vendor_cmd;
1189 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1190 FC_REG_EVENT_MASK);
1191 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1192 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1193 if (evt->reg_id == event_req->ev_reg_id) {
1194 lpfc_bsg_event_ref(evt);
1195 evt->wait_time_stamp = jiffies;
1196 dd_data = (struct bsg_job_data *)evt->dd_data;
1197 break;
1198 }
1199 }
1200 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1201
1202 if (&evt->node == &phba->ct_ev_waiters) {
1203
1204 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1205 if (dd_data == NULL) {
1206 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1207 "2734 Failed allocation of dd_data\n");
1208 rc = -ENOMEM;
1209 goto job_error;
1210 }
1211 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1212 event_req->ev_req_id);
1213 if (!evt) {
1214 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1215 "2617 Failed allocation of event "
1216 "waiter\n");
1217 rc = -ENOMEM;
1218 goto job_error;
1219 }
1220 dd_data->type = TYPE_EVT;
1221 dd_data->set_job = NULL;
1222 dd_data->context_un.evt = evt;
1223 evt->dd_data = (void *)dd_data;
1224 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1225 list_add(&evt->node, &phba->ct_ev_waiters);
1226 lpfc_bsg_event_ref(evt);
1227 evt->wait_time_stamp = jiffies;
1228 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1229 }
1230
1231 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1232 evt->waiting = 1;
1233 dd_data->set_job = job;
1234 job->dd_data = dd_data;
1235 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1236 return 0;
1237
1238 job_error:
1239 kfree(dd_data);
1240 job->dd_data = NULL;
1241 return rc;
1242 }
1243
1244
1245
1246
1247
1248 static int
1249 lpfc_bsg_hba_get_event(struct bsg_job *job)
1250 {
1251 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1252 struct lpfc_hba *phba = vport->phba;
1253 struct fc_bsg_request *bsg_request = job->request;
1254 struct fc_bsg_reply *bsg_reply = job->reply;
1255 struct get_ct_event *event_req;
1256 struct get_ct_event_reply *event_reply;
1257 struct lpfc_bsg_event *evt, *evt_next;
1258 struct event_data *evt_dat = NULL;
1259 unsigned long flags;
1260 uint32_t rc = 0;
1261
1262 if (job->request_len <
1263 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1264 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1265 "2613 Received GET_CT_EVENT request below "
1266 "minimum size\n");
1267 rc = -EINVAL;
1268 goto job_error;
1269 }
1270
1271 event_req = (struct get_ct_event *)
1272 bsg_request->rqst_data.h_vendor.vendor_cmd;
1273
1274 event_reply = (struct get_ct_event_reply *)
1275 bsg_reply->reply_data.vendor_reply.vendor_rsp;
1276 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1277 list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
1278 if (evt->reg_id == event_req->ev_reg_id) {
1279 if (list_empty(&evt->events_to_get))
1280 break;
1281 lpfc_bsg_event_ref(evt);
1282 evt->wait_time_stamp = jiffies;
1283 evt_dat = list_entry(evt->events_to_get.prev,
1284 struct event_data, node);
1285 list_del(&evt_dat->node);
1286 break;
1287 }
1288 }
1289 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1290
1291
1292
1293
1294 if (evt_dat == NULL) {
1295 bsg_reply->reply_payload_rcv_len = 0;
1296 rc = -ENOENT;
1297 goto job_error;
1298 }
1299
1300 if (evt_dat->len > job->request_payload.payload_len) {
1301 evt_dat->len = job->request_payload.payload_len;
1302 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1303 "2618 Truncated event data at %d "
1304 "bytes\n",
1305 job->request_payload.payload_len);
1306 }
1307
1308 event_reply->type = evt_dat->type;
1309 event_reply->immed_data = evt_dat->immed_dat;
1310 if (evt_dat->len > 0)
1311 bsg_reply->reply_payload_rcv_len =
1312 sg_copy_from_buffer(job->request_payload.sg_list,
1313 job->request_payload.sg_cnt,
1314 evt_dat->data, evt_dat->len);
1315 else
1316 bsg_reply->reply_payload_rcv_len = 0;
1317
1318 if (evt_dat) {
1319 kfree(evt_dat->data);
1320 kfree(evt_dat);
1321 }
1322
1323 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1324 lpfc_bsg_event_unref(evt);
1325 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1326 job->dd_data = NULL;
1327 bsg_reply->result = 0;
1328 bsg_job_done(job, bsg_reply->result,
1329 bsg_reply->reply_payload_rcv_len);
1330 return 0;
1331
1332 job_error:
1333 job->dd_data = NULL;
1334 bsg_reply->result = rc;
1335 return rc;
1336 }
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355 static void
1356 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1357 struct lpfc_iocbq *cmdiocbq,
1358 struct lpfc_iocbq *rspiocbq)
1359 {
1360 struct bsg_job_data *dd_data;
1361 struct bsg_job *job;
1362 struct fc_bsg_reply *bsg_reply;
1363 struct lpfc_dmabuf *bmp, *cmp;
1364 struct lpfc_nodelist *ndlp;
1365 unsigned long flags;
1366 int rc = 0;
1367 u32 ulp_status, ulp_word4;
1368
1369 dd_data = cmdiocbq->context_un.dd_data;
1370
1371
1372 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1373 job = dd_data->set_job;
1374 if (job) {
1375
1376 job->dd_data = NULL;
1377 }
1378 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1379
1380
1381 spin_lock_irqsave(&phba->hbalock, flags);
1382 cmdiocbq->cmd_flag &= ~LPFC_IO_CMD_OUTSTANDING;
1383 spin_unlock_irqrestore(&phba->hbalock, flags);
1384
1385 ndlp = dd_data->context_un.iocb.ndlp;
1386 cmp = cmdiocbq->cmd_dmabuf;
1387 bmp = cmdiocbq->bpl_dmabuf;
1388
1389 ulp_status = get_job_ulpstatus(phba, rspiocbq);
1390 ulp_word4 = get_job_word4(phba, rspiocbq);
1391
1392
1393
1394 if (job) {
1395 bsg_reply = job->reply;
1396 if (ulp_status) {
1397 if (ulp_status == IOSTAT_LOCAL_REJECT) {
1398 switch (ulp_word4 & IOERR_PARAM_MASK) {
1399 case IOERR_SEQUENCE_TIMEOUT:
1400 rc = -ETIMEDOUT;
1401 break;
1402 case IOERR_INVALID_RPI:
1403 rc = -EFAULT;
1404 break;
1405 default:
1406 rc = -EACCES;
1407 break;
1408 }
1409 } else {
1410 rc = -EACCES;
1411 }
1412 } else {
1413 bsg_reply->reply_payload_rcv_len = 0;
1414 }
1415 }
1416
1417 lpfc_free_bsg_buffers(phba, cmp);
1418 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1419 kfree(bmp);
1420 lpfc_sli_release_iocbq(phba, cmdiocbq);
1421 lpfc_nlp_put(ndlp);
1422 kfree(dd_data);
1423
1424
1425
1426 if (job) {
1427 bsg_reply->result = rc;
1428 bsg_job_done(job, bsg_reply->result,
1429 bsg_reply->reply_payload_rcv_len);
1430 }
1431 return;
1432 }
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443 static int
1444 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
1445 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
1446 int num_entry)
1447 {
1448 struct lpfc_iocbq *ctiocb = NULL;
1449 int rc = 0;
1450 struct lpfc_nodelist *ndlp = NULL;
1451 struct bsg_job_data *dd_data;
1452 unsigned long flags;
1453 uint32_t creg_val;
1454 u16 ulp_context, iotag;
1455
1456 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1457 if (!ndlp) {
1458 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1459 "2721 ndlp null for oxid %x SID %x\n",
1460 phba->ct_ctx[tag].rxid,
1461 phba->ct_ctx[tag].SID);
1462 return IOCB_ERROR;
1463 }
1464
1465
1466 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1467 if (!dd_data) {
1468 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1469 "2736 Failed allocation of dd_data\n");
1470 rc = -ENOMEM;
1471 goto no_dd_data;
1472 }
1473
1474
1475 ctiocb = lpfc_sli_get_iocbq(phba);
1476 if (!ctiocb) {
1477 rc = -ENOMEM;
1478 goto no_ctiocb;
1479 }
1480
1481 if (phba->sli_rev == LPFC_SLI_REV4) {
1482
1483 if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
1484 rc = IOCB_ERROR;
1485 goto issue_ct_rsp_exit;
1486 }
1487
1488 lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp,
1489 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
1490 phba->ct_ctx[tag].oxid, num_entry,
1491 FC_RCTL_DD_SOL_CTL, 1,
1492 CMD_XMIT_SEQUENCE64_WQE);
1493
1494
1495 phba->ct_ctx[tag].valid = UNSOL_INVALID;
1496 iotag = get_wqe_reqtag(ctiocb);
1497 } else {
1498 lpfc_sli_prep_xmit_seq64(phba, ctiocb, bmp, 0, tag, num_entry,
1499 FC_RCTL_DD_SOL_CTL, 1,
1500 CMD_XMIT_SEQUENCE64_CX);
1501 ctiocb->num_bdes = num_entry;
1502 iotag = ctiocb->iocb.ulpIoTag;
1503 }
1504
1505 ulp_context = get_job_ulpcontext(phba, ctiocb);
1506
1507
1508 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1509 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1510 ulp_context, iotag, tag, phba->link_state);
1511
1512 ctiocb->cmd_flag |= LPFC_IO_LIBDFC;
1513 ctiocb->vport = phba->pport;
1514 ctiocb->context_un.dd_data = dd_data;
1515 ctiocb->cmd_dmabuf = cmp;
1516 ctiocb->bpl_dmabuf = bmp;
1517 ctiocb->ndlp = ndlp;
1518 ctiocb->cmd_cmpl = lpfc_issue_ct_rsp_cmp;
1519
1520 dd_data->type = TYPE_IOCB;
1521 dd_data->set_job = job;
1522 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1523 dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp);
1524 if (!dd_data->context_un.iocb.ndlp) {
1525 rc = -IOCB_ERROR;
1526 goto issue_ct_rsp_exit;
1527 }
1528 dd_data->context_un.iocb.rmp = NULL;
1529 job->dd_data = dd_data;
1530
1531 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1532 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1533 rc = -IOCB_ERROR;
1534 goto issue_ct_rsp_exit;
1535 }
1536 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1537 writel(creg_val, phba->HCregaddr);
1538 readl(phba->HCregaddr);
1539 }
1540
1541 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1542 if (rc == IOCB_SUCCESS) {
1543 spin_lock_irqsave(&phba->hbalock, flags);
1544
1545 if (ctiocb->cmd_flag & LPFC_IO_LIBDFC) {
1546
1547 ctiocb->cmd_flag |= LPFC_IO_CMD_OUTSTANDING;
1548 }
1549 spin_unlock_irqrestore(&phba->hbalock, flags);
1550 return 0;
1551 }
1552
1553
1554 job->dd_data = NULL;
1555 lpfc_nlp_put(ndlp);
1556
1557 issue_ct_rsp_exit:
1558 lpfc_sli_release_iocbq(phba, ctiocb);
1559 no_ctiocb:
1560 kfree(dd_data);
1561 no_dd_data:
1562 return rc;
1563 }
1564
1565
1566
1567
1568
1569 static int
1570 lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
1571 {
1572 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1573 struct lpfc_hba *phba = vport->phba;
1574 struct fc_bsg_request *bsg_request = job->request;
1575 struct fc_bsg_reply *bsg_reply = job->reply;
1576 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1577 bsg_request->rqst_data.h_vendor.vendor_cmd;
1578 struct ulp_bde64 *bpl;
1579 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
1580 int bpl_entries;
1581 uint32_t tag = mgmt_resp->tag;
1582 unsigned long reqbfrcnt =
1583 (unsigned long)job->request_payload.payload_len;
1584 int rc = 0;
1585
1586
1587 bsg_reply->reply_payload_rcv_len = 0;
1588
1589 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1590 rc = -ERANGE;
1591 goto send_mgmt_rsp_exit;
1592 }
1593
1594 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1595 if (!bmp) {
1596 rc = -ENOMEM;
1597 goto send_mgmt_rsp_exit;
1598 }
1599
1600 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1601 if (!bmp->virt) {
1602 rc = -ENOMEM;
1603 goto send_mgmt_rsp_free_bmp;
1604 }
1605
1606 INIT_LIST_HEAD(&bmp->list);
1607 bpl = (struct ulp_bde64 *) bmp->virt;
1608 bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
1609 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1610 1, bpl, &bpl_entries);
1611 if (!cmp) {
1612 rc = -ENOMEM;
1613 goto send_mgmt_rsp_free_bmp;
1614 }
1615 lpfc_bsg_copy_data(cmp, &job->request_payload,
1616 job->request_payload.payload_len, 1);
1617
1618 rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
1619
1620 if (rc == IOCB_SUCCESS)
1621 return 0;
1622
1623 rc = -EACCES;
1624
1625 lpfc_free_bsg_buffers(phba, cmp);
1626
1627 send_mgmt_rsp_free_bmp:
1628 if (bmp->virt)
1629 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1630 kfree(bmp);
1631 send_mgmt_rsp_exit:
1632
1633 bsg_reply->result = rc;
1634 job->dd_data = NULL;
1635 return rc;
1636 }
1637
1638
1639
1640
1641
1642
1643
1644
1645 static int
1646 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1647 {
1648 struct lpfc_vport **vports;
1649 struct Scsi_Host *shost;
1650 struct lpfc_sli *psli;
1651 struct lpfc_queue *qp = NULL;
1652 struct lpfc_sli_ring *pring;
1653 int i = 0;
1654
1655 psli = &phba->sli;
1656 if (!psli)
1657 return -ENODEV;
1658
1659
1660 if ((phba->link_state == LPFC_HBA_ERROR) ||
1661 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1662 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1663 return -EACCES;
1664
1665 vports = lpfc_create_vport_work_array(phba);
1666 if (vports) {
1667 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1668 shost = lpfc_shost_from_vport(vports[i]);
1669 scsi_block_requests(shost);
1670 }
1671 lpfc_destroy_vport_work_array(phba, vports);
1672 } else {
1673 shost = lpfc_shost_from_vport(phba->pport);
1674 scsi_block_requests(shost);
1675 }
1676
1677 if (phba->sli_rev != LPFC_SLI_REV4) {
1678 pring = &psli->sli3_ring[LPFC_FCP_RING];
1679 lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock);
1680 return 0;
1681 }
1682 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1683 pring = qp->pring;
1684 if (!pring || (pring->ringno != LPFC_FCP_RING))
1685 continue;
1686 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1687 &pring->ring_lock))
1688 break;
1689 }
1690 return 0;
1691 }
1692
1693
1694
1695
1696
1697
1698
1699
1700 static void
1701 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1702 {
1703 struct Scsi_Host *shost;
1704 struct lpfc_vport **vports;
1705 int i;
1706
1707 vports = lpfc_create_vport_work_array(phba);
1708 if (vports) {
1709 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1710 shost = lpfc_shost_from_vport(vports[i]);
1711 scsi_unblock_requests(shost);
1712 }
1713 lpfc_destroy_vport_work_array(phba, vports);
1714 } else {
1715 shost = lpfc_shost_from_vport(phba->pport);
1716 scsi_unblock_requests(shost);
1717 }
1718 return;
1719 }
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734 static int
1735 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
1736 {
1737 struct fc_bsg_request *bsg_request = job->request;
1738 struct fc_bsg_reply *bsg_reply = job->reply;
1739 struct diag_mode_set *loopback_mode;
1740 uint32_t link_flags;
1741 uint32_t timeout;
1742 LPFC_MBOXQ_t *pmboxq = NULL;
1743 int mbxstatus = MBX_SUCCESS;
1744 int i = 0;
1745 int rc = 0;
1746
1747
1748 bsg_reply->reply_payload_rcv_len = 0;
1749
1750 if (job->request_len < sizeof(struct fc_bsg_request) +
1751 sizeof(struct diag_mode_set)) {
1752 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1753 "2738 Received DIAG MODE request size:%d "
1754 "below the minimum size:%d\n",
1755 job->request_len,
1756 (int)(sizeof(struct fc_bsg_request) +
1757 sizeof(struct diag_mode_set)));
1758 rc = -EINVAL;
1759 goto job_error;
1760 }
1761
1762 rc = lpfc_bsg_diag_mode_enter(phba);
1763 if (rc)
1764 goto job_error;
1765
1766
1767 loopback_mode = (struct diag_mode_set *)
1768 bsg_request->rqst_data.h_vendor.vendor_cmd;
1769 link_flags = loopback_mode->type;
1770 timeout = loopback_mode->timeout * 100;
1771
1772 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1773 if (!pmboxq) {
1774 rc = -ENOMEM;
1775 goto loopback_mode_exit;
1776 }
1777 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1778 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1779 pmboxq->u.mb.mbxOwner = OWN_HOST;
1780
1781 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1782
1783 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1784
1785 i = 0;
1786 while (phba->link_state != LPFC_LINK_DOWN) {
1787 if (i++ > timeout) {
1788 rc = -ETIMEDOUT;
1789 goto loopback_mode_exit;
1790 }
1791 msleep(10);
1792 }
1793
1794 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1795 if (link_flags == INTERNAL_LOOP_BACK)
1796 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1797 else
1798 pmboxq->u.mb.un.varInitLnk.link_flags =
1799 FLAGS_TOPOLOGY_MODE_LOOP;
1800
1801 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1802 pmboxq->u.mb.mbxOwner = OWN_HOST;
1803
1804 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1805 LPFC_MBOX_TMO);
1806
1807 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1808 rc = -ENODEV;
1809 else {
1810 spin_lock_irq(&phba->hbalock);
1811 phba->link_flag |= LS_LOOPBACK_MODE;
1812 spin_unlock_irq(&phba->hbalock);
1813
1814 msleep(100);
1815
1816 i = 0;
1817 while (phba->link_state != LPFC_HBA_READY) {
1818 if (i++ > timeout) {
1819 rc = -ETIMEDOUT;
1820 break;
1821 }
1822
1823 msleep(10);
1824 }
1825 }
1826
1827 } else
1828 rc = -ENODEV;
1829
1830 loopback_mode_exit:
1831 lpfc_bsg_diag_mode_exit(phba);
1832
1833
1834
1835
1836 if (pmboxq && mbxstatus != MBX_TIMEOUT)
1837 mempool_free(pmboxq, phba->mbox_mem_pool);
1838
1839 job_error:
1840
1841 bsg_reply->result = rc;
1842
1843 if (rc == 0)
1844 bsg_job_done(job, bsg_reply->result,
1845 bsg_reply->reply_payload_rcv_len);
1846 return rc;
1847 }
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857 static int
1858 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1859 {
1860 LPFC_MBOXQ_t *pmboxq;
1861 struct lpfc_mbx_set_link_diag_state *link_diag_state;
1862 uint32_t req_len, alloc_len;
1863 int mbxstatus = MBX_SUCCESS, rc;
1864
1865 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1866 if (!pmboxq)
1867 return -ENOMEM;
1868
1869 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1870 sizeof(struct lpfc_sli4_cfg_mhdr));
1871 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1872 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1873 req_len, LPFC_SLI4_MBX_EMBED);
1874 if (alloc_len != req_len) {
1875 rc = -ENOMEM;
1876 goto link_diag_state_set_out;
1877 }
1878 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1879 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1880 diag, phba->sli4_hba.lnk_info.lnk_tp,
1881 phba->sli4_hba.lnk_info.lnk_no);
1882
1883 link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1884 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1885 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
1886 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1887 phba->sli4_hba.lnk_info.lnk_no);
1888 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1889 phba->sli4_hba.lnk_info.lnk_tp);
1890 if (diag)
1891 bf_set(lpfc_mbx_set_diag_state_diag,
1892 &link_diag_state->u.req, 1);
1893 else
1894 bf_set(lpfc_mbx_set_diag_state_diag,
1895 &link_diag_state->u.req, 0);
1896
1897 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1898
1899 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1900 rc = 0;
1901 else
1902 rc = -ENODEV;
1903
1904 link_diag_state_set_out:
1905 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1906 mempool_free(pmboxq, phba->mbox_mem_pool);
1907
1908 return rc;
1909 }
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920 static int
1921 lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
1922 uint32_t link_no)
1923 {
1924 LPFC_MBOXQ_t *pmboxq;
1925 uint32_t req_len, alloc_len;
1926 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1927 int mbxstatus = MBX_SUCCESS, rc = 0;
1928
1929 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1930 if (!pmboxq)
1931 return -ENOMEM;
1932 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1933 sizeof(struct lpfc_sli4_cfg_mhdr));
1934 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1935 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1936 req_len, LPFC_SLI4_MBX_EMBED);
1937 if (alloc_len != req_len) {
1938 mempool_free(pmboxq, phba->mbox_mem_pool);
1939 return -ENOMEM;
1940 }
1941 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1942 bf_set(lpfc_mbx_set_diag_state_link_num,
1943 &link_diag_loopback->u.req, link_no);
1944
1945 if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
1946 bf_set(lpfc_mbx_set_diag_state_link_type,
1947 &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED);
1948 } else {
1949 bf_set(lpfc_mbx_set_diag_state_link_type,
1950 &link_diag_loopback->u.req,
1951 phba->sli4_hba.lnk_info.lnk_tp);
1952 }
1953
1954 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
1955 mode);
1956
1957 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1958 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
1959 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1960 "3127 Failed setup loopback mode mailbox "
1961 "command, rc:x%x, status:x%x\n", mbxstatus,
1962 pmboxq->u.mb.mbxStatus);
1963 rc = -ENODEV;
1964 }
1965 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1966 mempool_free(pmboxq, phba->mbox_mem_pool);
1967 return rc;
1968 }
1969
1970
1971
1972
1973
1974
1975
1976
1977 static int
1978 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
1979 {
1980 int rc;
1981
1982 if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
1983 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1984 "3136 Port still had vfi registered: "
1985 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
1986 phba->pport->fc_myDID, phba->fcf.fcfi,
1987 phba->sli4_hba.vfi_ids[phba->pport->vfi],
1988 phba->vpi_ids[phba->pport->vpi]);
1989 return -EINVAL;
1990 }
1991 rc = lpfc_issue_reg_vfi(phba->pport);
1992 return rc;
1993 }
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003 static int
2004 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
2005 {
2006 struct fc_bsg_request *bsg_request = job->request;
2007 struct fc_bsg_reply *bsg_reply = job->reply;
2008 struct diag_mode_set *loopback_mode;
2009 uint32_t link_flags, timeout, link_no;
2010 int i, rc = 0;
2011
2012
2013 bsg_reply->reply_payload_rcv_len = 0;
2014
2015 if (job->request_len < sizeof(struct fc_bsg_request) +
2016 sizeof(struct diag_mode_set)) {
2017 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2018 "3011 Received DIAG MODE request size:%d "
2019 "below the minimum size:%d\n",
2020 job->request_len,
2021 (int)(sizeof(struct fc_bsg_request) +
2022 sizeof(struct diag_mode_set)));
2023 rc = -EINVAL;
2024 goto job_done;
2025 }
2026
2027 loopback_mode = (struct diag_mode_set *)
2028 bsg_request->rqst_data.h_vendor.vendor_cmd;
2029 link_flags = loopback_mode->type;
2030 timeout = loopback_mode->timeout * 100;
2031
2032 if (loopback_mode->physical_link == -1)
2033 link_no = phba->sli4_hba.lnk_info.lnk_no;
2034 else
2035 link_no = loopback_mode->physical_link;
2036
2037 if (link_flags == DISABLE_LOOP_BACK) {
2038 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2039 LPFC_DIAG_LOOPBACK_TYPE_DISABLE,
2040 link_no);
2041 if (!rc) {
2042
2043 phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4);
2044 }
2045 goto job_done;
2046 } else {
2047
2048 if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) {
2049 rc = -EPERM;
2050 goto job_done;
2051 }
2052 }
2053
2054 rc = lpfc_bsg_diag_mode_enter(phba);
2055 if (rc)
2056 goto job_done;
2057
2058
2059 spin_lock_irq(&phba->hbalock);
2060 phba->link_flag |= LS_LOOPBACK_MODE;
2061 spin_unlock_irq(&phba->hbalock);
2062
2063
2064 rc = lpfc_selective_reset(phba);
2065 if (rc)
2066 goto job_done;
2067
2068
2069 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2070 "3129 Bring link to diagnostic state.\n");
2071
2072 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2073 if (rc) {
2074 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2075 "3130 Failed to bring link to diagnostic "
2076 "state, rc:x%x\n", rc);
2077 goto loopback_mode_exit;
2078 }
2079
2080
2081 i = 0;
2082 while (phba->link_state != LPFC_LINK_DOWN) {
2083 if (i++ > timeout) {
2084 rc = -ETIMEDOUT;
2085 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2086 "3131 Timeout waiting for link to "
2087 "diagnostic mode, timeout:%d ms\n",
2088 timeout * 10);
2089 goto loopback_mode_exit;
2090 }
2091 msleep(10);
2092 }
2093
2094
2095 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2096 "3132 Set up loopback mode:x%x\n", link_flags);
2097
2098 switch (link_flags) {
2099 case INTERNAL_LOOP_BACK:
2100 if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2101 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2102 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2103 link_no);
2104 } else {
2105
2106 if (phba->sli4_hba.conf_trunk) {
2107 rc = -ELNRNG;
2108 goto loopback_mode_exit;
2109 }
2110
2111 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2112 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2113 link_no);
2114 }
2115
2116 if (!rc) {
2117
2118 phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2119 }
2120
2121 break;
2122 case EXTERNAL_LOOP_BACK:
2123 if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2124 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2125 LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED,
2126 link_no);
2127 } else {
2128
2129 if (phba->sli4_hba.conf_trunk) {
2130 rc = -ELNRNG;
2131 goto loopback_mode_exit;
2132 }
2133
2134 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2135 LPFC_DIAG_LOOPBACK_TYPE_SERDES,
2136 link_no);
2137 }
2138
2139 if (!rc) {
2140
2141 phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2142 }
2143
2144 break;
2145 default:
2146 rc = -EINVAL;
2147 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2148 "3141 Loopback mode:x%x not supported\n",
2149 link_flags);
2150 goto loopback_mode_exit;
2151 }
2152
2153 if (!rc) {
2154
2155 msleep(100);
2156 i = 0;
2157 while (phba->link_state < LPFC_LINK_UP) {
2158 if (i++ > timeout) {
2159 rc = -ETIMEDOUT;
2160 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2161 "3137 Timeout waiting for link up "
2162 "in loopback mode, timeout:%d ms\n",
2163 timeout * 10);
2164 break;
2165 }
2166 msleep(10);
2167 }
2168 }
2169
2170
2171 if (!rc) {
2172
2173 phba->pport->fc_myDID = 1;
2174 rc = lpfc_sli4_diag_fcport_reg_setup(phba);
2175 } else
2176 goto loopback_mode_exit;
2177
2178 if (!rc) {
2179
2180 msleep(100);
2181 i = 0;
2182 while (phba->link_state != LPFC_HBA_READY) {
2183 if (i++ > timeout) {
2184 rc = -ETIMEDOUT;
2185 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2186 "3133 Timeout waiting for port "
2187 "loopback mode ready, timeout:%d ms\n",
2188 timeout * 10);
2189 break;
2190 }
2191 msleep(10);
2192 }
2193 }
2194
2195 loopback_mode_exit:
2196
2197 if (rc) {
2198 spin_lock_irq(&phba->hbalock);
2199 phba->link_flag &= ~LS_LOOPBACK_MODE;
2200 spin_unlock_irq(&phba->hbalock);
2201 }
2202 lpfc_bsg_diag_mode_exit(phba);
2203
2204 job_done:
2205
2206 bsg_reply->result = rc;
2207
2208 if (rc == 0)
2209 bsg_job_done(job, bsg_reply->result,
2210 bsg_reply->reply_payload_rcv_len);
2211 return rc;
2212 }
2213
2214
2215
2216
2217
2218
2219
2220
2221 static int
2222 lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
2223 {
2224 struct Scsi_Host *shost;
2225 struct lpfc_vport *vport;
2226 struct lpfc_hba *phba;
2227 int rc;
2228
2229 shost = fc_bsg_to_shost(job);
2230 if (!shost)
2231 return -ENODEV;
2232 vport = shost_priv(shost);
2233 if (!vport)
2234 return -ENODEV;
2235 phba = vport->phba;
2236 if (!phba)
2237 return -ENODEV;
2238
2239 if (phba->sli_rev < LPFC_SLI_REV4)
2240 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
2241 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
2242 LPFC_SLI_INTF_IF_TYPE_2)
2243 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
2244 else
2245 rc = -ENODEV;
2246
2247 return rc;
2248 }
2249
2250
2251
2252
2253
2254
2255
2256
2257 static int
2258 lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
2259 {
2260 struct fc_bsg_request *bsg_request = job->request;
2261 struct fc_bsg_reply *bsg_reply = job->reply;
2262 struct Scsi_Host *shost;
2263 struct lpfc_vport *vport;
2264 struct lpfc_hba *phba;
2265 struct diag_mode_set *loopback_mode_end_cmd;
2266 uint32_t timeout;
2267 int rc, i;
2268
2269 shost = fc_bsg_to_shost(job);
2270 if (!shost)
2271 return -ENODEV;
2272 vport = shost_priv(shost);
2273 if (!vport)
2274 return -ENODEV;
2275 phba = vport->phba;
2276 if (!phba)
2277 return -ENODEV;
2278
2279 if (phba->sli_rev < LPFC_SLI_REV4)
2280 return -ENODEV;
2281 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2282 LPFC_SLI_INTF_IF_TYPE_2)
2283 return -ENODEV;
2284
2285
2286 spin_lock_irq(&phba->hbalock);
2287 phba->link_flag &= ~LS_LOOPBACK_MODE;
2288 spin_unlock_irq(&phba->hbalock);
2289 loopback_mode_end_cmd = (struct diag_mode_set *)
2290 bsg_request->rqst_data.h_vendor.vendor_cmd;
2291 timeout = loopback_mode_end_cmd->timeout * 100;
2292
2293 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2294 if (rc) {
2295 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2296 "3139 Failed to bring link to diagnostic "
2297 "state, rc:x%x\n", rc);
2298 goto loopback_mode_end_exit;
2299 }
2300
2301
2302 i = 0;
2303 while (phba->link_state != LPFC_LINK_DOWN) {
2304 if (i++ > timeout) {
2305 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2306 "3140 Timeout waiting for link to "
2307 "diagnostic mode_end, timeout:%d ms\n",
2308 timeout * 10);
2309
2310 break;
2311 }
2312 msleep(10);
2313 }
2314
2315
2316 rc = lpfc_selective_reset(phba);
2317 phba->pport->fc_myDID = 0;
2318
2319 loopback_mode_end_exit:
2320
2321 bsg_reply->result = rc;
2322
2323 if (rc == 0)
2324 bsg_job_done(job, bsg_reply->result,
2325 bsg_reply->reply_payload_rcv_len);
2326 return rc;
2327 }
2328
2329
2330
2331
2332
2333
2334
2335
2336 static int
2337 lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
2338 {
2339 struct fc_bsg_request *bsg_request = job->request;
2340 struct fc_bsg_reply *bsg_reply = job->reply;
2341 struct Scsi_Host *shost;
2342 struct lpfc_vport *vport;
2343 struct lpfc_hba *phba;
2344 LPFC_MBOXQ_t *pmboxq;
2345 struct sli4_link_diag *link_diag_test_cmd;
2346 uint32_t req_len, alloc_len;
2347 struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2348 union lpfc_sli4_cfg_shdr *shdr;
2349 uint32_t shdr_status, shdr_add_status;
2350 struct diag_status *diag_status_reply;
2351 int mbxstatus, rc = -ENODEV, rc1 = 0;
2352
2353 shost = fc_bsg_to_shost(job);
2354 if (!shost)
2355 goto job_error;
2356
2357 vport = shost_priv(shost);
2358 if (!vport)
2359 goto job_error;
2360
2361 phba = vport->phba;
2362 if (!phba)
2363 goto job_error;
2364
2365
2366 if (phba->sli_rev < LPFC_SLI_REV4)
2367 goto job_error;
2368
2369 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2370 LPFC_SLI_INTF_IF_TYPE_2)
2371 goto job_error;
2372
2373 if (job->request_len < sizeof(struct fc_bsg_request) +
2374 sizeof(struct sli4_link_diag)) {
2375 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2376 "3013 Received LINK DIAG TEST request "
2377 " size:%d below the minimum size:%d\n",
2378 job->request_len,
2379 (int)(sizeof(struct fc_bsg_request) +
2380 sizeof(struct sli4_link_diag)));
2381 rc = -EINVAL;
2382 goto job_error;
2383 }
2384
2385 rc = lpfc_bsg_diag_mode_enter(phba);
2386 if (rc)
2387 goto job_error;
2388
2389 link_diag_test_cmd = (struct sli4_link_diag *)
2390 bsg_request->rqst_data.h_vendor.vendor_cmd;
2391
2392 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2393
2394 if (rc)
2395 goto job_error;
2396
2397 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2398 if (!pmboxq) {
2399 rc = -ENOMEM;
2400 goto link_diag_test_exit;
2401 }
2402
2403 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2404 sizeof(struct lpfc_sli4_cfg_mhdr));
2405 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2406 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2407 req_len, LPFC_SLI4_MBX_EMBED);
2408 if (alloc_len != req_len) {
2409 rc = -ENOMEM;
2410 goto link_diag_test_exit;
2411 }
2412
2413 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2414 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2415 phba->sli4_hba.lnk_info.lnk_no);
2416 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2417 phba->sli4_hba.lnk_info.lnk_tp);
2418 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2419 link_diag_test_cmd->test_id);
2420 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2421 link_diag_test_cmd->loops);
2422 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2423 link_diag_test_cmd->test_version);
2424 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2425 link_diag_test_cmd->error_action);
2426
2427 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2428
2429 shdr = (union lpfc_sli4_cfg_shdr *)
2430 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2431 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2432 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2433 if (shdr_status || shdr_add_status || mbxstatus) {
2434 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2435 "3010 Run link diag test mailbox failed with "
2436 "mbx_status x%x status x%x, add_status x%x\n",
2437 mbxstatus, shdr_status, shdr_add_status);
2438 }
2439
2440 diag_status_reply = (struct diag_status *)
2441 bsg_reply->reply_data.vendor_reply.vendor_rsp;
2442
2443 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) {
2444 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2445 "3012 Received Run link diag test reply "
2446 "below minimum size (%d): reply_len:%d\n",
2447 (int)(sizeof(*bsg_reply) +
2448 sizeof(*diag_status_reply)),
2449 job->reply_len);
2450 rc = -EINVAL;
2451 goto job_error;
2452 }
2453
2454 diag_status_reply->mbox_status = mbxstatus;
2455 diag_status_reply->shdr_status = shdr_status;
2456 diag_status_reply->shdr_add_status = shdr_add_status;
2457
2458 link_diag_test_exit:
2459 rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2460
2461 if (pmboxq)
2462 mempool_free(pmboxq, phba->mbox_mem_pool);
2463
2464 lpfc_bsg_diag_mode_exit(phba);
2465
2466 job_error:
2467
2468 if (rc1 && !rc)
2469 rc = rc1;
2470 bsg_reply->result = rc;
2471
2472 if (rc == 0)
2473 bsg_job_done(job, bsg_reply->result,
2474 bsg_reply->reply_payload_rcv_len);
2475 return rc;
2476 }
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2487 {
2488 LPFC_MBOXQ_t *mbox;
2489 struct lpfc_dmabuf *dmabuff;
2490 int status;
2491
2492 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2493 if (!mbox)
2494 return -ENOMEM;
2495
2496 if (phba->sli_rev < LPFC_SLI_REV4)
2497 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2498 (uint8_t *)&phba->pport->fc_sparam,
2499 mbox, *rpi);
2500 else {
2501 *rpi = lpfc_sli4_alloc_rpi(phba);
2502 if (*rpi == LPFC_RPI_ALLOC_ERROR) {
2503 mempool_free(mbox, phba->mbox_mem_pool);
2504 return -EBUSY;
2505 }
2506 status = lpfc_reg_rpi(phba, phba->pport->vpi,
2507 phba->pport->fc_myDID,
2508 (uint8_t *)&phba->pport->fc_sparam,
2509 mbox, *rpi);
2510 }
2511
2512 if (status) {
2513 mempool_free(mbox, phba->mbox_mem_pool);
2514 if (phba->sli_rev == LPFC_SLI_REV4)
2515 lpfc_sli4_free_rpi(phba, *rpi);
2516 return -ENOMEM;
2517 }
2518
2519 dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf;
2520 mbox->ctx_buf = NULL;
2521 mbox->ctx_ndlp = NULL;
2522 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2523
2524 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2525 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2526 kfree(dmabuff);
2527 if (status != MBX_TIMEOUT)
2528 mempool_free(mbox, phba->mbox_mem_pool);
2529 if (phba->sli_rev == LPFC_SLI_REV4)
2530 lpfc_sli4_free_rpi(phba, *rpi);
2531 return -ENODEV;
2532 }
2533
2534 if (phba->sli_rev < LPFC_SLI_REV4)
2535 *rpi = mbox->u.mb.un.varWords[0];
2536
2537 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2538 kfree(dmabuff);
2539 mempool_free(mbox, phba->mbox_mem_pool);
2540 return 0;
2541 }
2542
2543
2544
2545
2546
2547
2548
2549
2550 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2551 {
2552 LPFC_MBOXQ_t *mbox;
2553 int status;
2554
2555
2556 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2557 if (mbox == NULL)
2558 return -ENOMEM;
2559
2560 if (phba->sli_rev < LPFC_SLI_REV4)
2561 lpfc_unreg_login(phba, 0, rpi, mbox);
2562 else
2563 lpfc_unreg_login(phba, phba->pport->vpi,
2564 phba->sli4_hba.rpi_ids[rpi], mbox);
2565
2566 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2567
2568 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2569 if (status != MBX_TIMEOUT)
2570 mempool_free(mbox, phba->mbox_mem_pool);
2571 return -EIO;
2572 }
2573 mempool_free(mbox, phba->mbox_mem_pool);
2574 if (phba->sli_rev == LPFC_SLI_REV4)
2575 lpfc_sli4_free_rpi(phba, rpi);
2576 return 0;
2577 }
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2592 uint16_t *txxri, uint16_t * rxxri)
2593 {
2594 struct lpfc_bsg_event *evt;
2595 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2596 struct lpfc_dmabuf *dmabuf;
2597 struct ulp_bde64 *bpl = NULL;
2598 struct lpfc_sli_ct_request *ctreq = NULL;
2599 int ret_val = 0;
2600 int time_left;
2601 int iocb_stat = IOCB_SUCCESS;
2602 unsigned long flags;
2603 u32 status;
2604
2605 *txxri = 0;
2606 *rxxri = 0;
2607 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2608 SLI_CT_ELX_LOOPBACK);
2609 if (!evt)
2610 return -ENOMEM;
2611
2612 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2613 list_add(&evt->node, &phba->ct_ev_waiters);
2614 lpfc_bsg_event_ref(evt);
2615 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2616
2617 cmdiocbq = lpfc_sli_get_iocbq(phba);
2618 rspiocbq = lpfc_sli_get_iocbq(phba);
2619
2620 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2621 if (dmabuf) {
2622 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2623 if (dmabuf->virt) {
2624 INIT_LIST_HEAD(&dmabuf->list);
2625 bpl = (struct ulp_bde64 *) dmabuf->virt;
2626 memset(bpl, 0, sizeof(*bpl));
2627 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2628 bpl->addrHigh =
2629 le32_to_cpu(putPaddrHigh(dmabuf->phys +
2630 sizeof(*bpl)));
2631 bpl->addrLow =
2632 le32_to_cpu(putPaddrLow(dmabuf->phys +
2633 sizeof(*bpl)));
2634 bpl->tus.f.bdeFlags = 0;
2635 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2636 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2637 }
2638 }
2639
2640 if (cmdiocbq == NULL || rspiocbq == NULL ||
2641 dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2642 dmabuf->virt == NULL) {
2643 ret_val = -ENOMEM;
2644 goto err_get_xri_exit;
2645 }
2646
2647 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2648
2649 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2650 ctreq->RevisionId.bits.InId = 0;
2651 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2652 ctreq->FsSubType = 0;
2653 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2654 ctreq->CommandResponse.bits.Size = 0;
2655
2656 cmdiocbq->bpl_dmabuf = dmabuf;
2657 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
2658 cmdiocbq->vport = phba->pport;
2659 cmdiocbq->cmd_cmpl = NULL;
2660
2661 lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, dmabuf, rpi, 0, 1,
2662 FC_RCTL_DD_SOL_CTL, 0, CMD_XMIT_SEQUENCE64_CR);
2663
2664 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2665 rspiocbq, (phba->fc_ratov * 2)
2666 + LPFC_DRVR_TIMEOUT);
2667
2668 status = get_job_ulpstatus(phba, rspiocbq);
2669 if (iocb_stat != IOCB_SUCCESS || status != IOCB_SUCCESS) {
2670 ret_val = -EIO;
2671 goto err_get_xri_exit;
2672 }
2673 *txxri = get_job_ulpcontext(phba, rspiocbq);
2674
2675 evt->waiting = 1;
2676 evt->wait_time_stamp = jiffies;
2677 time_left = wait_event_interruptible_timeout(
2678 evt->wq, !list_empty(&evt->events_to_see),
2679 msecs_to_jiffies(1000 *
2680 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
2681 if (list_empty(&evt->events_to_see))
2682 ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2683 else {
2684 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2685 list_move(evt->events_to_see.prev, &evt->events_to_get);
2686 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2687 *rxxri = (list_entry(evt->events_to_get.prev,
2688 typeof(struct event_data),
2689 node))->immed_dat;
2690 }
2691 evt->waiting = 0;
2692
2693 err_get_xri_exit:
2694 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2695 lpfc_bsg_event_unref(evt);
2696 lpfc_bsg_event_unref(evt);
2697 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2698
2699 if (dmabuf) {
2700 if (dmabuf->virt)
2701 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2702 kfree(dmabuf);
2703 }
2704
2705 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2706 lpfc_sli_release_iocbq(phba, cmdiocbq);
2707 if (rspiocbq)
2708 lpfc_sli_release_iocbq(phba, rspiocbq);
2709 return ret_val;
2710 }
2711
2712
2713
2714
2715
2716
2717
2718
2719 static struct lpfc_dmabuf *
2720 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2721 {
2722 struct lpfc_dmabuf *dmabuf;
2723 struct pci_dev *pcidev = phba->pcidev;
2724
2725
2726 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2727 if (!dmabuf)
2728 return NULL;
2729
2730 INIT_LIST_HEAD(&dmabuf->list);
2731
2732
2733 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2734 &(dmabuf->phys), GFP_KERNEL);
2735
2736 if (!dmabuf->virt) {
2737 kfree(dmabuf);
2738 return NULL;
2739 }
2740
2741 return dmabuf;
2742 }
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752 static void
2753 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2754 {
2755 struct pci_dev *pcidev = phba->pcidev;
2756
2757 if (!dmabuf)
2758 return;
2759
2760 if (dmabuf->virt)
2761 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2762 dmabuf->virt, dmabuf->phys);
2763 kfree(dmabuf);
2764 return;
2765 }
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775 static void
2776 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2777 struct list_head *dmabuf_list)
2778 {
2779 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2780
2781 if (list_empty(dmabuf_list))
2782 return;
2783
2784 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2785 list_del_init(&dmabuf->list);
2786 lpfc_bsg_dma_page_free(phba, dmabuf);
2787 }
2788 return;
2789 }
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802 static struct lpfc_dmabufext *
2803 diag_cmd_data_alloc(struct lpfc_hba *phba,
2804 struct ulp_bde64 *bpl, uint32_t size,
2805 int nocopydata)
2806 {
2807 struct lpfc_dmabufext *mlist = NULL;
2808 struct lpfc_dmabufext *dmp;
2809 int cnt, offset = 0, i = 0;
2810 struct pci_dev *pcidev;
2811
2812 pcidev = phba->pcidev;
2813
2814 while (size) {
2815
2816 if (size > BUF_SZ_4K)
2817 cnt = BUF_SZ_4K;
2818 else
2819 cnt = size;
2820
2821
2822 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2823 if (!dmp)
2824 goto out;
2825
2826 INIT_LIST_HEAD(&dmp->dma.list);
2827
2828
2829 if (mlist)
2830 list_add_tail(&dmp->dma.list, &mlist->dma.list);
2831 else
2832 mlist = dmp;
2833
2834
2835 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2836 cnt,
2837 &(dmp->dma.phys),
2838 GFP_KERNEL);
2839
2840 if (!dmp->dma.virt)
2841 goto out;
2842
2843 dmp->size = cnt;
2844
2845 if (nocopydata) {
2846 bpl->tus.f.bdeFlags = 0;
2847 } else {
2848 memset((uint8_t *)dmp->dma.virt, 0, cnt);
2849 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2850 }
2851
2852
2853 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2854 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2855 bpl->tus.f.bdeSize = (ushort) cnt;
2856 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2857 bpl++;
2858
2859 i++;
2860 offset += cnt;
2861 size -= cnt;
2862 }
2863
2864 if (mlist) {
2865 mlist->flag = i;
2866 return mlist;
2867 }
2868 out:
2869 diag_cmd_data_free(phba, mlist);
2870 return NULL;
2871 }
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882 static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2883 size_t len)
2884 {
2885 struct lpfc_sli_ring *pring;
2886 struct lpfc_iocbq *cmdiocbq;
2887 IOCB_t *cmd = NULL;
2888 struct list_head head, *curr, *next;
2889 struct lpfc_dmabuf *rxbmp;
2890 struct lpfc_dmabuf *dmp;
2891 struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2892 struct ulp_bde64 *rxbpl = NULL;
2893 uint32_t num_bde;
2894 struct lpfc_dmabufext *rxbuffer = NULL;
2895 int ret_val = 0;
2896 int iocb_stat;
2897 int i = 0;
2898
2899 pring = lpfc_phba_elsring(phba);
2900
2901 cmdiocbq = lpfc_sli_get_iocbq(phba);
2902 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2903 if (rxbmp != NULL) {
2904 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2905 if (rxbmp->virt) {
2906 INIT_LIST_HEAD(&rxbmp->list);
2907 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2908 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2909 }
2910 }
2911
2912 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
2913 ret_val = -ENOMEM;
2914 goto err_post_rxbufs_exit;
2915 }
2916
2917
2918 num_bde = (uint32_t)rxbuffer->flag;
2919 dmp = &rxbuffer->dma;
2920 cmd = &cmdiocbq->iocb;
2921 i = 0;
2922
2923 INIT_LIST_HEAD(&head);
2924 list_add_tail(&head, &dmp->list);
2925 list_for_each_safe(curr, next, &head) {
2926 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2927 list_del(curr);
2928
2929 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2930 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
2931 cmd->un.quexri64cx.buff.bde.addrHigh =
2932 putPaddrHigh(mp[i]->phys);
2933 cmd->un.quexri64cx.buff.bde.addrLow =
2934 putPaddrLow(mp[i]->phys);
2935 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2936 ((struct lpfc_dmabufext *)mp[i])->size;
2937 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2938 cmd->ulpCommand = CMD_QUE_XRI64_CX;
2939 cmd->ulpPU = 0;
2940 cmd->ulpLe = 1;
2941 cmd->ulpBdeCount = 1;
2942 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2943
2944 } else {
2945 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
2946 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
2947 cmd->un.cont64[i].tus.f.bdeSize =
2948 ((struct lpfc_dmabufext *)mp[i])->size;
2949 cmd->ulpBdeCount = ++i;
2950
2951 if ((--num_bde > 0) && (i < 2))
2952 continue;
2953
2954 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
2955 cmd->ulpLe = 1;
2956 }
2957
2958 cmd->ulpClass = CLASS3;
2959 cmd->ulpContext = rxxri;
2960
2961 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2962 0);
2963 if (iocb_stat == IOCB_ERROR) {
2964 diag_cmd_data_free(phba,
2965 (struct lpfc_dmabufext *)mp[0]);
2966 if (mp[1])
2967 diag_cmd_data_free(phba,
2968 (struct lpfc_dmabufext *)mp[1]);
2969 dmp = list_entry(next, struct lpfc_dmabuf, list);
2970 ret_val = -EIO;
2971 goto err_post_rxbufs_exit;
2972 }
2973
2974 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
2975 if (mp[1]) {
2976 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
2977 mp[1] = NULL;
2978 }
2979
2980
2981 cmdiocbq = lpfc_sli_get_iocbq(phba);
2982 if (!cmdiocbq) {
2983 dmp = list_entry(next, struct lpfc_dmabuf, list);
2984 ret_val = -EIO;
2985 goto err_post_rxbufs_exit;
2986 }
2987 cmd = &cmdiocbq->iocb;
2988 i = 0;
2989 }
2990 list_del(&head);
2991
2992 err_post_rxbufs_exit:
2993
2994 if (rxbmp) {
2995 if (rxbmp->virt)
2996 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2997 kfree(rxbmp);
2998 }
2999
3000 if (cmdiocbq)
3001 lpfc_sli_release_iocbq(phba, cmdiocbq);
3002 return ret_val;
3003 }
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024 static int
3025 lpfc_bsg_diag_loopback_run(struct bsg_job *job)
3026 {
3027 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3028 struct fc_bsg_reply *bsg_reply = job->reply;
3029 struct lpfc_hba *phba = vport->phba;
3030 struct lpfc_bsg_event *evt;
3031 struct event_data *evdat;
3032 struct lpfc_sli *psli = &phba->sli;
3033 uint32_t size;
3034 uint32_t full_size;
3035 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
3036 uint16_t rpi = 0;
3037 struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
3038 union lpfc_wqe128 *cmdwqe, *rspwqe;
3039 struct lpfc_sli_ct_request *ctreq;
3040 struct lpfc_dmabuf *txbmp;
3041 struct ulp_bde64 *txbpl = NULL;
3042 struct lpfc_dmabufext *txbuffer = NULL;
3043 struct list_head head;
3044 struct lpfc_dmabuf *curr;
3045 uint16_t txxri = 0, rxxri;
3046 uint32_t num_bde;
3047 uint8_t *ptr = NULL, *rx_databuf = NULL;
3048 int rc = 0;
3049 int time_left;
3050 int iocb_stat = IOCB_SUCCESS;
3051 unsigned long flags;
3052 void *dataout = NULL;
3053 uint32_t total_mem;
3054
3055
3056 bsg_reply->reply_payload_rcv_len = 0;
3057
3058 if (job->request_len <
3059 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
3060 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3061 "2739 Received DIAG TEST request below minimum "
3062 "size\n");
3063 rc = -EINVAL;
3064 goto loopback_test_exit;
3065 }
3066
3067 if (job->request_payload.payload_len !=
3068 job->reply_payload.payload_len) {
3069 rc = -EINVAL;
3070 goto loopback_test_exit;
3071 }
3072
3073 if ((phba->link_state == LPFC_HBA_ERROR) ||
3074 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
3075 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
3076 rc = -EACCES;
3077 goto loopback_test_exit;
3078 }
3079
3080 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
3081 rc = -EACCES;
3082 goto loopback_test_exit;
3083 }
3084
3085 size = job->request_payload.payload_len;
3086 full_size = size + ELX_LOOPBACK_HEADER_SZ;
3087
3088 if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
3089 rc = -ERANGE;
3090 goto loopback_test_exit;
3091 }
3092
3093 if (full_size >= BUF_SZ_4K) {
3094
3095
3096
3097
3098
3099
3100
3101 if (size <= (64 * 1024))
3102 total_mem = full_size;
3103 else
3104 total_mem = 64 * 1024;
3105 } else
3106
3107 total_mem = BUF_SZ_4K;
3108
3109 dataout = kmalloc(total_mem, GFP_KERNEL);
3110 if (dataout == NULL) {
3111 rc = -ENOMEM;
3112 goto loopback_test_exit;
3113 }
3114
3115 ptr = dataout;
3116 ptr += ELX_LOOPBACK_HEADER_SZ;
3117 sg_copy_to_buffer(job->request_payload.sg_list,
3118 job->request_payload.sg_cnt,
3119 ptr, size);
3120 rc = lpfcdiag_loop_self_reg(phba, &rpi);
3121 if (rc)
3122 goto loopback_test_exit;
3123
3124 if (phba->sli_rev < LPFC_SLI_REV4) {
3125 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
3126 if (rc) {
3127 lpfcdiag_loop_self_unreg(phba, rpi);
3128 goto loopback_test_exit;
3129 }
3130
3131 rc = lpfcdiag_sli3_loop_post_rxbufs(phba, rxxri, full_size);
3132 if (rc) {
3133 lpfcdiag_loop_self_unreg(phba, rpi);
3134 goto loopback_test_exit;
3135 }
3136 }
3137 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
3138 SLI_CT_ELX_LOOPBACK);
3139 if (!evt) {
3140 lpfcdiag_loop_self_unreg(phba, rpi);
3141 rc = -ENOMEM;
3142 goto loopback_test_exit;
3143 }
3144
3145 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3146 list_add(&evt->node, &phba->ct_ev_waiters);
3147 lpfc_bsg_event_ref(evt);
3148 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3149
3150 cmdiocbq = lpfc_sli_get_iocbq(phba);
3151 if (phba->sli_rev < LPFC_SLI_REV4)
3152 rspiocbq = lpfc_sli_get_iocbq(phba);
3153 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3154
3155 if (txbmp) {
3156 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
3157 if (txbmp->virt) {
3158 INIT_LIST_HEAD(&txbmp->list);
3159 txbpl = (struct ulp_bde64 *) txbmp->virt;
3160 txbuffer = diag_cmd_data_alloc(phba,
3161 txbpl, full_size, 0);
3162 }
3163 }
3164
3165 if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
3166 rc = -ENOMEM;
3167 goto err_loopback_test_exit;
3168 }
3169 if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
3170 rc = -ENOMEM;
3171 goto err_loopback_test_exit;
3172 }
3173
3174 cmdwqe = &cmdiocbq->wqe;
3175 memset(cmdwqe, 0, sizeof(union lpfc_wqe));
3176 if (phba->sli_rev < LPFC_SLI_REV4) {
3177 rspwqe = &rspiocbq->wqe;
3178 memset(rspwqe, 0, sizeof(union lpfc_wqe));
3179 }
3180
3181 INIT_LIST_HEAD(&head);
3182 list_add_tail(&head, &txbuffer->dma.list);
3183 list_for_each_entry(curr, &head, list) {
3184 segment_len = ((struct lpfc_dmabufext *)curr)->size;
3185 if (current_offset == 0) {
3186 ctreq = curr->virt;
3187 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
3188 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
3189 ctreq->RevisionId.bits.InId = 0;
3190 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
3191 ctreq->FsSubType = 0;
3192 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
3193 ctreq->CommandResponse.bits.Size = size;
3194 segment_offset = ELX_LOOPBACK_HEADER_SZ;
3195 } else
3196 segment_offset = 0;
3197
3198 BUG_ON(segment_offset >= segment_len);
3199 memcpy(curr->virt + segment_offset,
3200 ptr + current_offset,
3201 segment_len - segment_offset);
3202
3203 current_offset += segment_len - segment_offset;
3204 BUG_ON(current_offset > size);
3205 }
3206 list_del(&head);
3207
3208
3209 num_bde = (uint32_t)txbuffer->flag;
3210
3211 cmdiocbq->num_bdes = num_bde;
3212 cmdiocbq->cmd_flag |= LPFC_IO_LIBDFC;
3213 cmdiocbq->cmd_flag |= LPFC_IO_LOOPBACK;
3214 cmdiocbq->vport = phba->pport;
3215 cmdiocbq->cmd_cmpl = NULL;
3216 cmdiocbq->bpl_dmabuf = txbmp;
3217
3218 if (phba->sli_rev < LPFC_SLI_REV4) {
3219 lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp, 0, txxri,
3220 num_bde, FC_RCTL_DD_UNSOL_CTL, 1,
3221 CMD_XMIT_SEQUENCE64_CX);
3222
3223 } else {
3224 lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, txbmp,
3225 phba->sli4_hba.rpi_ids[rpi], 0xffff,
3226 full_size, FC_RCTL_DD_UNSOL_CTL, 1,
3227 CMD_XMIT_SEQUENCE64_WQE);
3228 cmdiocbq->sli4_xritag = NO_XRI;
3229 }
3230
3231 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3232 rspiocbq, (phba->fc_ratov * 2) +
3233 LPFC_DRVR_TIMEOUT);
3234 if (iocb_stat != IOCB_SUCCESS ||
3235 (phba->sli_rev < LPFC_SLI_REV4 &&
3236 (get_job_ulpstatus(phba, rspiocbq) != IOSTAT_SUCCESS))) {
3237 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3238 "3126 Failed loopback test issue iocb: "
3239 "iocb_stat:x%x\n", iocb_stat);
3240 rc = -EIO;
3241 goto err_loopback_test_exit;
3242 }
3243
3244 evt->waiting = 1;
3245 time_left = wait_event_interruptible_timeout(
3246 evt->wq, !list_empty(&evt->events_to_see),
3247 msecs_to_jiffies(1000 *
3248 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
3249 evt->waiting = 0;
3250 if (list_empty(&evt->events_to_see)) {
3251 rc = (time_left) ? -EINTR : -ETIMEDOUT;
3252 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3253 "3125 Not receiving unsolicited event, "
3254 "rc:x%x\n", rc);
3255 } else {
3256 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3257 list_move(evt->events_to_see.prev, &evt->events_to_get);
3258 evdat = list_entry(evt->events_to_get.prev,
3259 typeof(*evdat), node);
3260 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3261 rx_databuf = evdat->data;
3262 if (evdat->len != full_size) {
3263 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3264 "1603 Loopback test did not receive expected "
3265 "data length. actual length 0x%x expected "
3266 "length 0x%x\n",
3267 evdat->len, full_size);
3268 rc = -EIO;
3269 } else if (rx_databuf == NULL)
3270 rc = -EIO;
3271 else {
3272 rc = IOCB_SUCCESS;
3273
3274 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3275 bsg_reply->reply_payload_rcv_len =
3276 sg_copy_from_buffer(job->reply_payload.sg_list,
3277 job->reply_payload.sg_cnt,
3278 rx_databuf, size);
3279 bsg_reply->reply_payload_rcv_len = size;
3280 }
3281 }
3282
3283 err_loopback_test_exit:
3284 lpfcdiag_loop_self_unreg(phba, rpi);
3285
3286 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3287 lpfc_bsg_event_unref(evt);
3288 lpfc_bsg_event_unref(evt);
3289 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3290
3291 if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
3292 lpfc_sli_release_iocbq(phba, cmdiocbq);
3293
3294 if (rspiocbq != NULL)
3295 lpfc_sli_release_iocbq(phba, rspiocbq);
3296
3297 if (txbmp != NULL) {
3298 if (txbpl != NULL) {
3299 if (txbuffer != NULL)
3300 diag_cmd_data_free(phba, txbuffer);
3301 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
3302 }
3303 kfree(txbmp);
3304 }
3305
3306 loopback_test_exit:
3307 kfree(dataout);
3308
3309 bsg_reply->result = rc;
3310 job->dd_data = NULL;
3311
3312 if (rc == IOCB_SUCCESS)
3313 bsg_job_done(job, bsg_reply->result,
3314 bsg_reply->reply_payload_rcv_len);
3315 return rc;
3316 }
3317
3318
3319
3320
3321
3322 static int
3323 lpfc_bsg_get_dfc_rev(struct bsg_job *job)
3324 {
3325 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3326 struct fc_bsg_reply *bsg_reply = job->reply;
3327 struct lpfc_hba *phba = vport->phba;
3328 struct get_mgmt_rev_reply *event_reply;
3329 int rc = 0;
3330
3331 if (job->request_len <
3332 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
3333 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3334 "2740 Received GET_DFC_REV request below "
3335 "minimum size\n");
3336 rc = -EINVAL;
3337 goto job_error;
3338 }
3339
3340 event_reply = (struct get_mgmt_rev_reply *)
3341 bsg_reply->reply_data.vendor_reply.vendor_rsp;
3342
3343 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
3344 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3345 "2741 Received GET_DFC_REV reply below "
3346 "minimum size\n");
3347 rc = -EINVAL;
3348 goto job_error;
3349 }
3350
3351 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3352 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3353 job_error:
3354 bsg_reply->result = rc;
3355 if (rc == 0)
3356 bsg_job_done(job, bsg_reply->result,
3357 bsg_reply->reply_payload_rcv_len);
3358 return rc;
3359 }
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372 static void
3373 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3374 {
3375 struct bsg_job_data *dd_data;
3376 struct fc_bsg_reply *bsg_reply;
3377 struct bsg_job *job;
3378 uint32_t size;
3379 unsigned long flags;
3380 uint8_t *pmb, *pmb_buf;
3381
3382 dd_data = pmboxq->ctx_ndlp;
3383
3384
3385
3386
3387
3388 pmb = (uint8_t *)&pmboxq->u.mb;
3389 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3390 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3391
3392
3393
3394 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3395 job = dd_data->set_job;
3396 if (job) {
3397
3398 job->dd_data = NULL;
3399 }
3400 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3401
3402
3403
3404 if (job) {
3405 bsg_reply = job->reply;
3406 size = job->reply_payload.payload_len;
3407 bsg_reply->reply_payload_rcv_len =
3408 sg_copy_from_buffer(job->reply_payload.sg_list,
3409 job->reply_payload.sg_cnt,
3410 pmb_buf, size);
3411 }
3412
3413 dd_data->set_job = NULL;
3414 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3415 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3416 kfree(dd_data);
3417
3418
3419
3420 if (job) {
3421 bsg_reply->result = 0;
3422 bsg_job_done(job, bsg_reply->result,
3423 bsg_reply->reply_payload_rcv_len);
3424 }
3425 return;
3426 }
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3438 MAILBOX_t *mb, struct lpfc_vport *vport)
3439 {
3440
3441 switch (mb->mbxCommand) {
3442
3443 case MBX_INIT_LINK:
3444 case MBX_DOWN_LINK:
3445 case MBX_CONFIG_LINK:
3446 case MBX_CONFIG_RING:
3447 case MBX_RESET_RING:
3448 case MBX_UNREG_LOGIN:
3449 case MBX_CLEAR_LA:
3450 case MBX_DUMP_CONTEXT:
3451 case MBX_RUN_DIAGS:
3452 case MBX_RESTART:
3453 case MBX_SET_MASK:
3454 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3455 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3456 "2743 Command 0x%x is illegal in on-line "
3457 "state\n",
3458 mb->mbxCommand);
3459 return -EPERM;
3460 }
3461 break;
3462 case MBX_WRITE_NV:
3463 case MBX_WRITE_VPARMS:
3464 case MBX_LOAD_SM:
3465 case MBX_READ_NV:
3466 case MBX_READ_CONFIG:
3467 case MBX_READ_RCONFIG:
3468 case MBX_READ_STATUS:
3469 case MBX_READ_XRI:
3470 case MBX_READ_REV:
3471 case MBX_READ_LNK_STAT:
3472 case MBX_DUMP_MEMORY:
3473 case MBX_DOWN_LOAD:
3474 case MBX_UPDATE_CFG:
3475 case MBX_KILL_BOARD:
3476 case MBX_READ_TOPOLOGY:
3477 case MBX_LOAD_AREA:
3478 case MBX_LOAD_EXP_ROM:
3479 case MBX_BEACON:
3480 case MBX_DEL_LD_ENTRY:
3481 case MBX_SET_DEBUG:
3482 case MBX_WRITE_WWN:
3483 case MBX_SLI4_CONFIG:
3484 case MBX_READ_EVENT_LOG:
3485 case MBX_READ_EVENT_LOG_STATUS:
3486 case MBX_WRITE_EVENT_LOG:
3487 case MBX_PORT_CAPABILITIES:
3488 case MBX_PORT_IOV_CONTROL:
3489 case MBX_RUN_BIU_DIAG64:
3490 break;
3491 case MBX_SET_VARIABLE:
3492 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3493 "1226 mbox: set_variable 0x%x, 0x%x\n",
3494 mb->un.varWords[0],
3495 mb->un.varWords[1]);
3496 break;
3497 case MBX_READ_SPARM64:
3498 case MBX_REG_LOGIN:
3499 case MBX_REG_LOGIN64:
3500 case MBX_CONFIG_PORT:
3501 case MBX_RUN_BIU_DIAG:
3502 default:
3503 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3504 "2742 Unknown Command 0x%x\n",
3505 mb->mbxCommand);
3506 return -EPERM;
3507 }
3508
3509 return 0;
3510 }
3511
3512
3513
3514
3515
3516
3517
3518
3519 static void
3520 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3521 {
3522 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3523 return;
3524
3525
3526 lpfc_bsg_dma_page_list_free(phba,
3527 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3528 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3529
3530 memset((char *)&phba->mbox_ext_buf_ctx, 0,
3531 sizeof(struct lpfc_mbox_ext_buf_ctx));
3532 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3533
3534 return;
3535 }
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545 static struct bsg_job *
3546 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3547 {
3548 struct bsg_job_data *dd_data;
3549 struct bsg_job *job;
3550 struct fc_bsg_reply *bsg_reply;
3551 uint8_t *pmb, *pmb_buf;
3552 unsigned long flags;
3553 uint32_t size;
3554 int rc = 0;
3555 struct lpfc_dmabuf *dmabuf;
3556 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3557 uint8_t *pmbx;
3558
3559 dd_data = pmboxq->ctx_buf;
3560
3561
3562 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3563 job = dd_data->set_job;
3564 if (job) {
3565 bsg_reply = job->reply;
3566
3567 job->dd_data = NULL;
3568 }
3569 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3570
3571
3572
3573
3574
3575
3576 pmb = (uint8_t *)&pmboxq->u.mb;
3577 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3578
3579 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3580
3581 dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3582 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3583 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3584 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3585 pmbx = (uint8_t *)dmabuf->virt;
3586
3587 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3588 &pmbx[sizeof(MAILBOX_t)],
3589 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3590 }
3591
3592
3593
3594 if (job) {
3595 size = job->reply_payload.payload_len;
3596 bsg_reply->reply_payload_rcv_len =
3597 sg_copy_from_buffer(job->reply_payload.sg_list,
3598 job->reply_payload.sg_cnt,
3599 pmb_buf, size);
3600
3601
3602 bsg_reply->result = 0;
3603
3604 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3605 "2937 SLI_CONFIG ext-buffer mailbox command "
3606 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3607 phba->mbox_ext_buf_ctx.nembType,
3608 phba->mbox_ext_buf_ctx.mboxType, size);
3609 lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3610 phba->mbox_ext_buf_ctx.nembType,
3611 phba->mbox_ext_buf_ctx.mboxType,
3612 dma_ebuf, sta_pos_addr,
3613 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3614 } else {
3615 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3616 "2938 SLI_CONFIG ext-buffer mailbox "
3617 "command (x%x/x%x) failure, rc:x%x\n",
3618 phba->mbox_ext_buf_ctx.nembType,
3619 phba->mbox_ext_buf_ctx.mboxType, rc);
3620 }
3621
3622
3623
3624 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3625 kfree(dd_data);
3626 return job;
3627 }
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637 static void
3638 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3639 {
3640 struct bsg_job *job;
3641 struct fc_bsg_reply *bsg_reply;
3642
3643 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3644
3645
3646 if (!job)
3647 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3648
3649 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3650 "2939 SLI_CONFIG ext-buffer rd mailbox command "
3651 "complete, ctxState:x%x, mbxStatus:x%x\n",
3652 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3653
3654 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3655 lpfc_bsg_mbox_ext_session_reset(phba);
3656
3657
3658 mempool_free(pmboxq, phba->mbox_mem_pool);
3659
3660
3661 if (job) {
3662 bsg_reply = job->reply;
3663 bsg_job_done(job, bsg_reply->result,
3664 bsg_reply->reply_payload_rcv_len);
3665 }
3666 return;
3667 }
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677 static void
3678 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3679 {
3680 struct bsg_job *job;
3681 struct fc_bsg_reply *bsg_reply;
3682
3683 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3684
3685
3686 if (!job)
3687 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3688
3689 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3690 "2940 SLI_CONFIG ext-buffer wr mailbox command "
3691 "complete, ctxState:x%x, mbxStatus:x%x\n",
3692 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3693
3694
3695 mempool_free(pmboxq, phba->mbox_mem_pool);
3696 lpfc_bsg_mbox_ext_session_reset(phba);
3697
3698
3699 if (job) {
3700 bsg_reply = job->reply;
3701 bsg_job_done(job, bsg_reply->result,
3702 bsg_reply->reply_payload_rcv_len);
3703 }
3704
3705 return;
3706 }
3707
3708 static void
3709 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3710 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3711 struct lpfc_dmabuf *ext_dmabuf)
3712 {
3713 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3714
3715
3716 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3717
3718 if (nemb_tp == nemb_mse) {
3719 if (index == 0) {
3720 sli_cfg_mbx->un.sli_config_emb0_subsys.
3721 mse[index].pa_hi =
3722 putPaddrHigh(mbx_dmabuf->phys +
3723 sizeof(MAILBOX_t));
3724 sli_cfg_mbx->un.sli_config_emb0_subsys.
3725 mse[index].pa_lo =
3726 putPaddrLow(mbx_dmabuf->phys +
3727 sizeof(MAILBOX_t));
3728 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3729 "2943 SLI_CONFIG(mse)[%d], "
3730 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3731 index,
3732 sli_cfg_mbx->un.sli_config_emb0_subsys.
3733 mse[index].buf_len,
3734 sli_cfg_mbx->un.sli_config_emb0_subsys.
3735 mse[index].pa_hi,
3736 sli_cfg_mbx->un.sli_config_emb0_subsys.
3737 mse[index].pa_lo);
3738 } else {
3739 sli_cfg_mbx->un.sli_config_emb0_subsys.
3740 mse[index].pa_hi =
3741 putPaddrHigh(ext_dmabuf->phys);
3742 sli_cfg_mbx->un.sli_config_emb0_subsys.
3743 mse[index].pa_lo =
3744 putPaddrLow(ext_dmabuf->phys);
3745 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3746 "2944 SLI_CONFIG(mse)[%d], "
3747 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3748 index,
3749 sli_cfg_mbx->un.sli_config_emb0_subsys.
3750 mse[index].buf_len,
3751 sli_cfg_mbx->un.sli_config_emb0_subsys.
3752 mse[index].pa_hi,
3753 sli_cfg_mbx->un.sli_config_emb0_subsys.
3754 mse[index].pa_lo);
3755 }
3756 } else {
3757 if (index == 0) {
3758 sli_cfg_mbx->un.sli_config_emb1_subsys.
3759 hbd[index].pa_hi =
3760 putPaddrHigh(mbx_dmabuf->phys +
3761 sizeof(MAILBOX_t));
3762 sli_cfg_mbx->un.sli_config_emb1_subsys.
3763 hbd[index].pa_lo =
3764 putPaddrLow(mbx_dmabuf->phys +
3765 sizeof(MAILBOX_t));
3766 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3767 "3007 SLI_CONFIG(hbd)[%d], "
3768 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3769 index,
3770 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3771 &sli_cfg_mbx->un.
3772 sli_config_emb1_subsys.hbd[index]),
3773 sli_cfg_mbx->un.sli_config_emb1_subsys.
3774 hbd[index].pa_hi,
3775 sli_cfg_mbx->un.sli_config_emb1_subsys.
3776 hbd[index].pa_lo);
3777
3778 } else {
3779 sli_cfg_mbx->un.sli_config_emb1_subsys.
3780 hbd[index].pa_hi =
3781 putPaddrHigh(ext_dmabuf->phys);
3782 sli_cfg_mbx->un.sli_config_emb1_subsys.
3783 hbd[index].pa_lo =
3784 putPaddrLow(ext_dmabuf->phys);
3785 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3786 "3008 SLI_CONFIG(hbd)[%d], "
3787 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3788 index,
3789 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3790 &sli_cfg_mbx->un.
3791 sli_config_emb1_subsys.hbd[index]),
3792 sli_cfg_mbx->un.sli_config_emb1_subsys.
3793 hbd[index].pa_hi,
3794 sli_cfg_mbx->un.sli_config_emb1_subsys.
3795 hbd[index].pa_lo);
3796 }
3797 }
3798 return;
3799 }
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811 static int
3812 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
3813 enum nemb_type nemb_tp,
3814 struct lpfc_dmabuf *dmabuf)
3815 {
3816 struct fc_bsg_request *bsg_request = job->request;
3817 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3818 struct dfc_mbox_req *mbox_req;
3819 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3820 uint32_t ext_buf_cnt, ext_buf_index;
3821 struct lpfc_dmabuf *ext_dmabuf = NULL;
3822 struct bsg_job_data *dd_data = NULL;
3823 LPFC_MBOXQ_t *pmboxq = NULL;
3824 MAILBOX_t *pmb;
3825 uint8_t *pmbx;
3826 int rc, i;
3827
3828 mbox_req =
3829 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
3830
3831
3832 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3833
3834 if (nemb_tp == nemb_mse) {
3835 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3836 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3837 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3838 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3839 "2945 Handled SLI_CONFIG(mse) rd, "
3840 "ext_buf_cnt(%d) out of range(%d)\n",
3841 ext_buf_cnt,
3842 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3843 rc = -ERANGE;
3844 goto job_error;
3845 }
3846 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3847 "2941 Handled SLI_CONFIG(mse) rd, "
3848 "ext_buf_cnt:%d\n", ext_buf_cnt);
3849 } else {
3850
3851 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
3852 LPFC_SLI_INTF_IF_TYPE_2) {
3853 rc = -ENODEV;
3854 goto job_error;
3855 }
3856
3857 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3858 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3859 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3860 "2946 Handled SLI_CONFIG(hbd) rd, "
3861 "ext_buf_cnt(%d) out of range(%d)\n",
3862 ext_buf_cnt,
3863 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3864 rc = -ERANGE;
3865 goto job_error;
3866 }
3867 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3868 "2942 Handled SLI_CONFIG(hbd) rd, "
3869 "ext_buf_cnt:%d\n", ext_buf_cnt);
3870 }
3871
3872
3873 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3874 sta_pre_addr, dmabuf, ext_buf_cnt);
3875
3876
3877 if (ext_buf_cnt == 0) {
3878 rc = -EPERM;
3879 goto job_error;
3880 } else if (ext_buf_cnt > 1) {
3881
3882 for (i = 1; i < ext_buf_cnt; i++) {
3883 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3884 if (!ext_dmabuf) {
3885 rc = -ENOMEM;
3886 goto job_error;
3887 }
3888 list_add_tail(&ext_dmabuf->list,
3889 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3890 }
3891 }
3892
3893
3894 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3895 if (!dd_data) {
3896 rc = -ENOMEM;
3897 goto job_error;
3898 }
3899
3900
3901 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3902 if (!pmboxq) {
3903 rc = -ENOMEM;
3904 goto job_error;
3905 }
3906 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3907
3908
3909 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3910
3911
3912 if (ext_buf_cnt > 1) {
3913 ext_buf_index = 1;
3914 list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3915 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3916 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3917 ext_buf_index, dmabuf,
3918 curr_dmabuf);
3919 ext_buf_index++;
3920 }
3921 }
3922
3923
3924 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3925 sta_pos_addr, dmabuf, ext_buf_cnt);
3926
3927
3928 pmb = &pmboxq->u.mb;
3929 pmbx = (uint8_t *)dmabuf->virt;
3930 memcpy(pmb, pmbx, sizeof(*pmb));
3931 pmb->mbxOwner = OWN_HOST;
3932 pmboxq->vport = phba->pport;
3933
3934
3935 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3936 phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3937 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3938 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3939 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3940 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3941
3942
3943 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3944
3945
3946 pmboxq->ctx_buf = dd_data;
3947 dd_data->type = TYPE_MBOX;
3948 dd_data->set_job = job;
3949 dd_data->context_un.mbox.pmboxq = pmboxq;
3950 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3951 job->dd_data = dd_data;
3952
3953
3954 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3955
3956
3957
3958
3959
3960 if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
3961 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
3962 (nemb_tp == nemb_mse))
3963 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3964 &pmbx[sizeof(MAILBOX_t)],
3965 sli_cfg_mbx->un.sli_config_emb0_subsys.
3966 mse[0].buf_len);
3967
3968 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3969 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3970 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3971 "2947 Issued SLI_CONFIG ext-buffer "
3972 "mailbox command, rc:x%x\n", rc);
3973 return SLI_CONFIG_HANDLED;
3974 }
3975 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3976 "2948 Failed to issue SLI_CONFIG ext-buffer "
3977 "mailbox command, rc:x%x\n", rc);
3978 rc = -EPIPE;
3979
3980 job_error:
3981 if (pmboxq)
3982 mempool_free(pmboxq, phba->mbox_mem_pool);
3983 lpfc_bsg_dma_page_list_free(phba,
3984 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3985 kfree(dd_data);
3986 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
3987 return rc;
3988 }
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000 static int
4001 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
4002 enum nemb_type nemb_tp,
4003 struct lpfc_dmabuf *dmabuf)
4004 {
4005 struct fc_bsg_request *bsg_request = job->request;
4006 struct fc_bsg_reply *bsg_reply = job->reply;
4007 struct dfc_mbox_req *mbox_req;
4008 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4009 uint32_t ext_buf_cnt;
4010 struct bsg_job_data *dd_data = NULL;
4011 LPFC_MBOXQ_t *pmboxq = NULL;
4012 MAILBOX_t *pmb;
4013 uint8_t *mbx;
4014 int rc = SLI_CONFIG_NOT_HANDLED, i;
4015
4016 mbox_req =
4017 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4018
4019
4020 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4021
4022 if (nemb_tp == nemb_mse) {
4023 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
4024 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
4025 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
4026 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4027 "2953 Failed SLI_CONFIG(mse) wr, "
4028 "ext_buf_cnt(%d) out of range(%d)\n",
4029 ext_buf_cnt,
4030 LPFC_MBX_SLI_CONFIG_MAX_MSE);
4031 return -ERANGE;
4032 }
4033 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4034 "2949 Handled SLI_CONFIG(mse) wr, "
4035 "ext_buf_cnt:%d\n", ext_buf_cnt);
4036 } else {
4037
4038 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
4039 LPFC_SLI_INTF_IF_TYPE_2)
4040 return -ENODEV;
4041
4042 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
4043 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
4044 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4045 "2954 Failed SLI_CONFIG(hbd) wr, "
4046 "ext_buf_cnt(%d) out of range(%d)\n",
4047 ext_buf_cnt,
4048 LPFC_MBX_SLI_CONFIG_MAX_HBD);
4049 return -ERANGE;
4050 }
4051 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4052 "2950 Handled SLI_CONFIG(hbd) wr, "
4053 "ext_buf_cnt:%d\n", ext_buf_cnt);
4054 }
4055
4056
4057 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4058 sta_pre_addr, dmabuf, ext_buf_cnt);
4059
4060 if (ext_buf_cnt == 0)
4061 return -EPERM;
4062
4063
4064 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
4065
4066
4067 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4068 sta_pos_addr, dmabuf, ext_buf_cnt);
4069
4070
4071 for (i = 1; i < ext_buf_cnt; i++) {
4072 if (nemb_tp == nemb_mse)
4073 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4074 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
4075 i, sli_cfg_mbx->un.sli_config_emb0_subsys.
4076 mse[i].buf_len);
4077 else
4078 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4079 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
4080 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4081 &sli_cfg_mbx->un.sli_config_emb1_subsys.
4082 hbd[i]));
4083 }
4084
4085
4086 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
4087 phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
4088 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
4089 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
4090 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
4091 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
4092
4093 if (ext_buf_cnt == 1) {
4094
4095 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4096 if (!dd_data) {
4097 rc = -ENOMEM;
4098 goto job_error;
4099 }
4100
4101
4102 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4103 if (!pmboxq) {
4104 rc = -ENOMEM;
4105 goto job_error;
4106 }
4107 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4108 pmb = &pmboxq->u.mb;
4109 mbx = (uint8_t *)dmabuf->virt;
4110 memcpy(pmb, mbx, sizeof(*pmb));
4111 pmb->mbxOwner = OWN_HOST;
4112 pmboxq->vport = phba->pport;
4113
4114
4115 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4116
4117
4118 pmboxq->ctx_buf = dd_data;
4119 dd_data->type = TYPE_MBOX;
4120 dd_data->set_job = job;
4121 dd_data->context_un.mbox.pmboxq = pmboxq;
4122 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
4123 job->dd_data = dd_data;
4124
4125
4126
4127 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4128 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4129 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4130 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4131 "2955 Issued SLI_CONFIG ext-buffer "
4132 "mailbox command, rc:x%x\n", rc);
4133 return SLI_CONFIG_HANDLED;
4134 }
4135 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4136 "2956 Failed to issue SLI_CONFIG ext-buffer "
4137 "mailbox command, rc:x%x\n", rc);
4138 rc = -EPIPE;
4139 goto job_error;
4140 }
4141
4142
4143
4144 bsg_reply->result = 0;
4145 bsg_job_done(job, bsg_reply->result,
4146 bsg_reply->reply_payload_rcv_len);
4147 return SLI_CONFIG_HANDLED;
4148
4149 job_error:
4150 if (pmboxq)
4151 mempool_free(pmboxq, phba->mbox_mem_pool);
4152 kfree(dd_data);
4153
4154 return rc;
4155 }
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167 static int
4168 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4169 struct lpfc_dmabuf *dmabuf)
4170 {
4171 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4172 uint32_t subsys;
4173 uint32_t opcode;
4174 int rc = SLI_CONFIG_NOT_HANDLED;
4175
4176
4177 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
4178
4179 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4180
4181 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
4182 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
4183 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
4184 &sli_cfg_mbx->un.sli_config_emb0_subsys);
4185 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
4186 &sli_cfg_mbx->un.sli_config_emb0_subsys);
4187 if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
4188 switch (opcode) {
4189 case FCOE_OPCODE_READ_FCF:
4190 case FCOE_OPCODE_GET_DPORT_RESULTS:
4191 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4192 "2957 Handled SLI_CONFIG "
4193 "subsys_fcoe, opcode:x%x\n",
4194 opcode);
4195 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4196 nemb_mse, dmabuf);
4197 break;
4198 case FCOE_OPCODE_ADD_FCF:
4199 case FCOE_OPCODE_SET_DPORT_MODE:
4200 case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
4201 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4202 "2958 Handled SLI_CONFIG "
4203 "subsys_fcoe, opcode:x%x\n",
4204 opcode);
4205 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4206 nemb_mse, dmabuf);
4207 break;
4208 default:
4209 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4210 "2959 Reject SLI_CONFIG "
4211 "subsys_fcoe, opcode:x%x\n",
4212 opcode);
4213 rc = -EPERM;
4214 break;
4215 }
4216 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4217 switch (opcode) {
4218 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
4219 case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
4220 case COMN_OPCODE_GET_PROFILE_CONFIG:
4221 case COMN_OPCODE_SET_FEATURES:
4222 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4223 "3106 Handled SLI_CONFIG "
4224 "subsys_comn, opcode:x%x\n",
4225 opcode);
4226 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4227 nemb_mse, dmabuf);
4228 break;
4229 default:
4230 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4231 "3107 Reject SLI_CONFIG "
4232 "subsys_comn, opcode:x%x\n",
4233 opcode);
4234 rc = -EPERM;
4235 break;
4236 }
4237 } else {
4238 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4239 "2977 Reject SLI_CONFIG "
4240 "subsys:x%d, opcode:x%x\n",
4241 subsys, opcode);
4242 rc = -EPERM;
4243 }
4244 } else {
4245 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
4246 &sli_cfg_mbx->un.sli_config_emb1_subsys);
4247 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
4248 &sli_cfg_mbx->un.sli_config_emb1_subsys);
4249 if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4250 switch (opcode) {
4251 case COMN_OPCODE_READ_OBJECT:
4252 case COMN_OPCODE_READ_OBJECT_LIST:
4253 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4254 "2960 Handled SLI_CONFIG "
4255 "subsys_comn, opcode:x%x\n",
4256 opcode);
4257 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4258 nemb_hbd, dmabuf);
4259 break;
4260 case COMN_OPCODE_WRITE_OBJECT:
4261 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4262 "2961 Handled SLI_CONFIG "
4263 "subsys_comn, opcode:x%x\n",
4264 opcode);
4265 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4266 nemb_hbd, dmabuf);
4267 break;
4268 default:
4269 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4270 "2962 Not handled SLI_CONFIG "
4271 "subsys_comn, opcode:x%x\n",
4272 opcode);
4273 rc = SLI_CONFIG_NOT_HANDLED;
4274 break;
4275 }
4276 } else {
4277 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4278 "2978 Not handled SLI_CONFIG "
4279 "subsys:x%d, opcode:x%x\n",
4280 subsys, opcode);
4281 rc = SLI_CONFIG_NOT_HANDLED;
4282 }
4283 }
4284
4285
4286 if (rc != SLI_CONFIG_HANDLED)
4287 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4288
4289 return rc;
4290 }
4291
4292
4293
4294
4295
4296
4297
4298
4299 static void
4300 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4301 {
4302 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4303 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4304 else
4305 lpfc_bsg_mbox_ext_session_reset(phba);
4306 return;
4307 }
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317 static int
4318 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
4319 {
4320 struct fc_bsg_reply *bsg_reply = job->reply;
4321 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4322 struct lpfc_dmabuf *dmabuf;
4323 uint8_t *pbuf;
4324 uint32_t size;
4325 uint32_t index;
4326
4327 index = phba->mbox_ext_buf_ctx.seqNum;
4328 phba->mbox_ext_buf_ctx.seqNum++;
4329
4330 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4331 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4332
4333 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4334 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
4335 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
4336 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4337 "2963 SLI_CONFIG (mse) ext-buffer rd get "
4338 "buffer[%d], size:%d\n", index, size);
4339 } else {
4340 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4341 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
4342 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4343 "2964 SLI_CONFIG (hbd) ext-buffer rd get "
4344 "buffer[%d], size:%d\n", index, size);
4345 }
4346 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
4347 return -EPIPE;
4348 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
4349 struct lpfc_dmabuf, list);
4350 list_del_init(&dmabuf->list);
4351
4352
4353 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4354 mbox_rd, dma_ebuf, sta_pos_addr,
4355 dmabuf, index);
4356
4357 pbuf = (uint8_t *)dmabuf->virt;
4358 bsg_reply->reply_payload_rcv_len =
4359 sg_copy_from_buffer(job->reply_payload.sg_list,
4360 job->reply_payload.sg_cnt,
4361 pbuf, size);
4362
4363 lpfc_bsg_dma_page_free(phba, dmabuf);
4364
4365 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4366 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4367 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4368 "command session done\n");
4369 lpfc_bsg_mbox_ext_session_reset(phba);
4370 }
4371
4372 bsg_reply->result = 0;
4373 bsg_job_done(job, bsg_reply->result,
4374 bsg_reply->reply_payload_rcv_len);
4375
4376 return SLI_CONFIG_HANDLED;
4377 }
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388 static int
4389 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
4390 struct lpfc_dmabuf *dmabuf)
4391 {
4392 struct fc_bsg_reply *bsg_reply = job->reply;
4393 struct bsg_job_data *dd_data = NULL;
4394 LPFC_MBOXQ_t *pmboxq = NULL;
4395 MAILBOX_t *pmb;
4396 enum nemb_type nemb_tp;
4397 uint8_t *pbuf;
4398 uint32_t size;
4399 uint32_t index;
4400 int rc;
4401
4402 index = phba->mbox_ext_buf_ctx.seqNum;
4403 phba->mbox_ext_buf_ctx.seqNum++;
4404 nemb_tp = phba->mbox_ext_buf_ctx.nembType;
4405
4406 pbuf = (uint8_t *)dmabuf->virt;
4407 size = job->request_payload.payload_len;
4408 sg_copy_to_buffer(job->request_payload.sg_list,
4409 job->request_payload.sg_cnt,
4410 pbuf, size);
4411
4412 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4413 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4414 "2966 SLI_CONFIG (mse) ext-buffer wr set "
4415 "buffer[%d], size:%d\n",
4416 phba->mbox_ext_buf_ctx.seqNum, size);
4417
4418 } else {
4419 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4420 "2967 SLI_CONFIG (hbd) ext-buffer wr set "
4421 "buffer[%d], size:%d\n",
4422 phba->mbox_ext_buf_ctx.seqNum, size);
4423
4424 }
4425
4426
4427 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
4428 phba->mbox_ext_buf_ctx.mbx_dmabuf,
4429 dmabuf);
4430 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4431
4432
4433 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4434 mbox_wr, dma_ebuf, sta_pos_addr,
4435 dmabuf, index);
4436
4437 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4438 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4439 "2968 SLI_CONFIG ext-buffer wr all %d "
4440 "ebuffers received\n",
4441 phba->mbox_ext_buf_ctx.numBuf);
4442
4443 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4444 if (!dd_data) {
4445 rc = -ENOMEM;
4446 goto job_error;
4447 }
4448
4449
4450 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4451 if (!pmboxq) {
4452 rc = -ENOMEM;
4453 goto job_error;
4454 }
4455 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4456 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4457 pmb = &pmboxq->u.mb;
4458 memcpy(pmb, pbuf, sizeof(*pmb));
4459 pmb->mbxOwner = OWN_HOST;
4460 pmboxq->vport = phba->pport;
4461
4462
4463 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4464
4465
4466 pmboxq->ctx_buf = dd_data;
4467 dd_data->type = TYPE_MBOX;
4468 dd_data->set_job = job;
4469 dd_data->context_un.mbox.pmboxq = pmboxq;
4470 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4471 job->dd_data = dd_data;
4472
4473
4474 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4475
4476 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4477 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4478 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4479 "2969 Issued SLI_CONFIG ext-buffer "
4480 "mailbox command, rc:x%x\n", rc);
4481 return SLI_CONFIG_HANDLED;
4482 }
4483 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4484 "2970 Failed to issue SLI_CONFIG ext-buffer "
4485 "mailbox command, rc:x%x\n", rc);
4486 rc = -EPIPE;
4487 goto job_error;
4488 }
4489
4490
4491 bsg_reply->result = 0;
4492 bsg_job_done(job, bsg_reply->result,
4493 bsg_reply->reply_payload_rcv_len);
4494 return SLI_CONFIG_HANDLED;
4495
4496 job_error:
4497 if (pmboxq)
4498 mempool_free(pmboxq, phba->mbox_mem_pool);
4499 lpfc_bsg_dma_page_free(phba, dmabuf);
4500 kfree(dd_data);
4501
4502 return rc;
4503 }
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514 static int
4515 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
4516 struct lpfc_dmabuf *dmabuf)
4517 {
4518 int rc;
4519
4520 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4521 "2971 SLI_CONFIG buffer (type:x%x)\n",
4522 phba->mbox_ext_buf_ctx.mboxType);
4523
4524 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4525 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4526 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4527 "2972 SLI_CONFIG rd buffer state "
4528 "mismatch:x%x\n",
4529 phba->mbox_ext_buf_ctx.state);
4530 lpfc_bsg_mbox_ext_abort(phba);
4531 return -EPIPE;
4532 }
4533 rc = lpfc_bsg_read_ebuf_get(phba, job);
4534 if (rc == SLI_CONFIG_HANDLED)
4535 lpfc_bsg_dma_page_free(phba, dmabuf);
4536 } else {
4537 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4538 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4539 "2973 SLI_CONFIG wr buffer state "
4540 "mismatch:x%x\n",
4541 phba->mbox_ext_buf_ctx.state);
4542 lpfc_bsg_mbox_ext_abort(phba);
4543 return -EPIPE;
4544 }
4545 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4546 }
4547 return rc;
4548 }
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559 static int
4560 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
4561 struct lpfc_dmabuf *dmabuf)
4562 {
4563 struct fc_bsg_request *bsg_request = job->request;
4564 struct dfc_mbox_req *mbox_req;
4565 int rc = SLI_CONFIG_NOT_HANDLED;
4566
4567 mbox_req =
4568 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4569
4570
4571 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4572 return rc;
4573
4574
4575 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4576 if (mbox_req->extSeqNum == 1) {
4577 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4578 "2974 SLI_CONFIG mailbox: tag:%d, "
4579 "seq:%d\n", mbox_req->extMboxTag,
4580 mbox_req->extSeqNum);
4581 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4582 return rc;
4583 } else
4584 goto sli_cfg_ext_error;
4585 }
4586
4587
4588
4589
4590
4591
4592 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4593 goto sli_cfg_ext_error;
4594 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4595 goto sli_cfg_ext_error;
4596 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4597 goto sli_cfg_ext_error;
4598
4599 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4600 "2975 SLI_CONFIG mailbox external buffer: "
4601 "extSta:x%x, tag:%d, seq:%d\n",
4602 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4603 mbox_req->extSeqNum);
4604 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4605 return rc;
4606
4607 sli_cfg_ext_error:
4608
4609 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4610 "2976 SLI_CONFIG mailbox broken pipe: "
4611 "ctxSta:x%x, ctxNumBuf:%d "
4612 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4613 phba->mbox_ext_buf_ctx.state,
4614 phba->mbox_ext_buf_ctx.numBuf,
4615 phba->mbox_ext_buf_ctx.mbxTag,
4616 phba->mbox_ext_buf_ctx.seqNum,
4617 mbox_req->extMboxTag, mbox_req->extSeqNum);
4618
4619 lpfc_bsg_mbox_ext_session_reset(phba);
4620
4621 return -EPIPE;
4622 }
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637 static int
4638 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4639 struct lpfc_vport *vport)
4640 {
4641 struct fc_bsg_request *bsg_request = job->request;
4642 struct fc_bsg_reply *bsg_reply = job->reply;
4643 LPFC_MBOXQ_t *pmboxq = NULL;
4644 MAILBOX_t *pmb;
4645
4646 uint8_t *pmbx = NULL;
4647 struct bsg_job_data *dd_data = NULL;
4648 struct lpfc_dmabuf *dmabuf = NULL;
4649 struct dfc_mbox_req *mbox_req;
4650 struct READ_EVENT_LOG_VAR *rdEventLog;
4651 uint32_t transmit_length, receive_length, mode;
4652 struct lpfc_mbx_sli4_config *sli4_config;
4653 struct lpfc_mbx_nembed_cmd *nembed_sge;
4654 struct ulp_bde64 *bde;
4655 uint8_t *ext = NULL;
4656 int rc = 0;
4657 uint8_t *from;
4658 uint32_t size;
4659
4660
4661 bsg_reply->reply_payload_rcv_len = 0;
4662
4663
4664 if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4665 job->request_payload.payload_len > BSG_MBOX_SIZE) {
4666 rc = -ERANGE;
4667 goto job_done;
4668 }
4669
4670
4671
4672
4673
4674 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4675 rc = -EAGAIN;
4676 goto job_done;
4677 }
4678
4679 mbox_req =
4680 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4681
4682
4683 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4684 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4685 rc = -ERANGE;
4686 goto job_done;
4687 }
4688
4689 dmabuf = lpfc_bsg_dma_page_alloc(phba);
4690 if (!dmabuf || !dmabuf->virt) {
4691 rc = -ENOMEM;
4692 goto job_done;
4693 }
4694
4695
4696 pmbx = (uint8_t *)dmabuf->virt;
4697 size = job->request_payload.payload_len;
4698 sg_copy_to_buffer(job->request_payload.sg_list,
4699 job->request_payload.sg_cnt, pmbx, size);
4700
4701
4702 if (phba->sli_rev == LPFC_SLI_REV4) {
4703 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4704 if (rc == SLI_CONFIG_HANDLED)
4705 goto job_cont;
4706 if (rc)
4707 goto job_done;
4708
4709 }
4710
4711 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4712 if (rc != 0)
4713 goto job_done;
4714
4715
4716 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4717 if (!dd_data) {
4718 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4719 "2727 Failed allocation of dd_data\n");
4720 rc = -ENOMEM;
4721 goto job_done;
4722 }
4723
4724 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4725 if (!pmboxq) {
4726 rc = -ENOMEM;
4727 goto job_done;
4728 }
4729 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4730
4731 pmb = &pmboxq->u.mb;
4732 memcpy(pmb, pmbx, sizeof(*pmb));
4733 pmb->mbxOwner = OWN_HOST;
4734 pmboxq->vport = vport;
4735
4736
4737
4738
4739 if (phba->pport->stopped &&
4740 pmb->mbxCommand != MBX_DUMP_MEMORY &&
4741 pmb->mbxCommand != MBX_RESTART &&
4742 pmb->mbxCommand != MBX_WRITE_VPARMS &&
4743 pmb->mbxCommand != MBX_WRITE_WWN)
4744 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4745 "2797 mbox: Issued mailbox cmd "
4746 "0x%x while in stopped state.\n",
4747 pmb->mbxCommand);
4748
4749
4750 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4751 from = pmbx;
4752 ext = from + sizeof(MAILBOX_t);
4753 pmboxq->ctx_buf = ext;
4754 pmboxq->in_ext_byte_len =
4755 mbox_req->inExtWLen * sizeof(uint32_t);
4756 pmboxq->out_ext_byte_len =
4757 mbox_req->outExtWLen * sizeof(uint32_t);
4758 pmboxq->mbox_offset_word = mbox_req->mbOffset;
4759 }
4760
4761
4762
4763
4764
4765 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4766 transmit_length = pmb->un.varWords[1];
4767 receive_length = pmb->un.varWords[4];
4768
4769
4770
4771 if ((transmit_length > receive_length) ||
4772 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4773 rc = -ERANGE;
4774 goto job_done;
4775 }
4776 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4777 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4778 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4779 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4780
4781 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4782 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4783 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4784 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4785 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4786 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4787 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4788 rdEventLog = &pmb->un.varRdEventLog;
4789 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4790 mode = bf_get(lpfc_event_log, rdEventLog);
4791
4792
4793
4794
4795 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4796 rc = -ERANGE;
4797 goto job_done;
4798 }
4799
4800
4801 if (mode == 0) {
4802 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4803 + sizeof(MAILBOX_t));
4804 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4805 + sizeof(MAILBOX_t));
4806 }
4807 } else if (phba->sli_rev == LPFC_SLI_REV4) {
4808
4809
4810
4811
4812 if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4813 (pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4814
4815
4816
4817 receive_length = pmb->un.varWords[2];
4818
4819
4820
4821 if (receive_length == 0) {
4822 rc = -ERANGE;
4823 goto job_done;
4824 }
4825 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4826 + sizeof(MAILBOX_t));
4827 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4828 + sizeof(MAILBOX_t));
4829 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4830 pmb->un.varUpdateCfg.co) {
4831 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4832
4833
4834 if (bde->tus.f.bdeSize >
4835 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4836 rc = -ERANGE;
4837 goto job_done;
4838 }
4839 bde->addrHigh = putPaddrHigh(dmabuf->phys
4840 + sizeof(MAILBOX_t));
4841 bde->addrLow = putPaddrLow(dmabuf->phys
4842 + sizeof(MAILBOX_t));
4843 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4844
4845 sli4_config = &pmboxq->u.mqe.un.sli4_config;
4846 if (!bf_get(lpfc_mbox_hdr_emb,
4847 &sli4_config->header.cfg_mhdr)) {
4848
4849
4850
4851 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4852 &pmb->un.varWords[0];
4853 receive_length = nembed_sge->sge[0].length;
4854
4855
4856
4857
4858 if ((receive_length == 0) ||
4859 (receive_length >
4860 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4861 rc = -ERANGE;
4862 goto job_done;
4863 }
4864
4865 nembed_sge->sge[0].pa_hi =
4866 putPaddrHigh(dmabuf->phys
4867 + sizeof(MAILBOX_t));
4868 nembed_sge->sge[0].pa_lo =
4869 putPaddrLow(dmabuf->phys
4870 + sizeof(MAILBOX_t));
4871 }
4872 }
4873 }
4874
4875 dd_data->context_un.mbox.dmabuffers = dmabuf;
4876
4877
4878 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4879
4880
4881 pmboxq->ctx_ndlp = dd_data;
4882 dd_data->type = TYPE_MBOX;
4883 dd_data->set_job = job;
4884 dd_data->context_un.mbox.pmboxq = pmboxq;
4885 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4886 dd_data->context_un.mbox.ext = ext;
4887 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4888 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4889 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4890 job->dd_data = dd_data;
4891
4892 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4893 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4894 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4895 if (rc != MBX_SUCCESS) {
4896 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4897 goto job_done;
4898 }
4899
4900
4901 memcpy(pmbx, pmb, sizeof(*pmb));
4902 bsg_reply->reply_payload_rcv_len =
4903 sg_copy_from_buffer(job->reply_payload.sg_list,
4904 job->reply_payload.sg_cnt,
4905 pmbx, size);
4906
4907 rc = 0;
4908 goto job_done;
4909 }
4910
4911 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4912 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4913 return 1;
4914
4915 job_done:
4916
4917 if (pmboxq)
4918 mempool_free(pmboxq, phba->mbox_mem_pool);
4919 lpfc_bsg_dma_page_free(phba, dmabuf);
4920 kfree(dd_data);
4921
4922 job_cont:
4923 return rc;
4924 }
4925
4926
4927
4928
4929
4930 static int
4931 lpfc_bsg_mbox_cmd(struct bsg_job *job)
4932 {
4933 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
4934 struct fc_bsg_request *bsg_request = job->request;
4935 struct fc_bsg_reply *bsg_reply = job->reply;
4936 struct lpfc_hba *phba = vport->phba;
4937 struct dfc_mbox_req *mbox_req;
4938 int rc = 0;
4939
4940
4941 bsg_reply->reply_payload_rcv_len = 0;
4942 if (job->request_len <
4943 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
4944 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4945 "2737 Mix-and-match backward compatibility "
4946 "between MBOX_REQ old size:%d and "
4947 "new request size:%d\n",
4948 (int)(job->request_len -
4949 sizeof(struct fc_bsg_request)),
4950 (int)sizeof(struct dfc_mbox_req));
4951 mbox_req = (struct dfc_mbox_req *)
4952 bsg_request->rqst_data.h_vendor.vendor_cmd;
4953 mbox_req->extMboxTag = 0;
4954 mbox_req->extSeqNum = 0;
4955 }
4956
4957 rc = lpfc_bsg_issue_mbox(phba, job, vport);
4958
4959 if (rc == 0) {
4960
4961 bsg_reply->result = 0;
4962 job->dd_data = NULL;
4963 bsg_job_done(job, bsg_reply->result,
4964 bsg_reply->reply_payload_rcv_len);
4965 } else if (rc == 1)
4966
4967 rc = 0;
4968 else {
4969
4970 bsg_reply->result = rc;
4971 job->dd_data = NULL;
4972 }
4973
4974 return rc;
4975 }
4976
4977 static int
4978 lpfc_forced_link_speed(struct bsg_job *job)
4979 {
4980 struct Scsi_Host *shost = fc_bsg_to_shost(job);
4981 struct lpfc_vport *vport = shost_priv(shost);
4982 struct lpfc_hba *phba = vport->phba;
4983 struct fc_bsg_reply *bsg_reply = job->reply;
4984 struct forced_link_speed_support_reply *forced_reply;
4985 int rc = 0;
4986
4987 if (job->request_len <
4988 sizeof(struct fc_bsg_request) +
4989 sizeof(struct get_forced_link_speed_support)) {
4990 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4991 "0048 Received FORCED_LINK_SPEED request "
4992 "below minimum size\n");
4993 rc = -EINVAL;
4994 goto job_error;
4995 }
4996
4997 forced_reply = (struct forced_link_speed_support_reply *)
4998 bsg_reply->reply_data.vendor_reply.vendor_rsp;
4999
5000 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) {
5001 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5002 "0049 Received FORCED_LINK_SPEED reply below "
5003 "minimum size\n");
5004 rc = -EINVAL;
5005 goto job_error;
5006 }
5007
5008 forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
5009 ? LPFC_FORCED_LINK_SPEED_SUPPORTED
5010 : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
5011 job_error:
5012 bsg_reply->result = rc;
5013 if (rc == 0)
5014 bsg_job_done(job, bsg_reply->result,
5015 bsg_reply->reply_payload_rcv_len);
5016 return rc;
5017 }
5018
5019
5020
5021
5022
5023
5024
5025 int
5026 lpfc_check_fwlog_support(struct lpfc_hba *phba)
5027 {
5028 struct lpfc_ras_fwlog *ras_fwlog = NULL;
5029
5030 ras_fwlog = &phba->ras_fwlog;
5031
5032 if (!ras_fwlog->ras_hwsupport)
5033 return -EACCES;
5034 else if (!ras_fwlog->ras_enabled)
5035 return -EPERM;
5036 else
5037 return 0;
5038 }
5039
5040
5041
5042
5043
5044
5045
5046 static int
5047 lpfc_bsg_get_ras_config(struct bsg_job *job)
5048 {
5049 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5050 struct lpfc_vport *vport = shost_priv(shost);
5051 struct fc_bsg_reply *bsg_reply = job->reply;
5052 struct lpfc_hba *phba = vport->phba;
5053 struct lpfc_bsg_get_ras_config_reply *ras_reply;
5054 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5055 int rc = 0;
5056
5057 if (job->request_len <
5058 sizeof(struct fc_bsg_request) +
5059 sizeof(struct lpfc_bsg_ras_req)) {
5060 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5061 "6192 FW_LOG request received "
5062 "below minimum size\n");
5063 rc = -EINVAL;
5064 goto ras_job_error;
5065 }
5066
5067
5068 rc = lpfc_check_fwlog_support(phba);
5069 if (rc)
5070 goto ras_job_error;
5071
5072 ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
5073 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5074
5075
5076 spin_lock_irq(&phba->hbalock);
5077 if (ras_fwlog->state == ACTIVE)
5078 ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
5079 else
5080 ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
5081 spin_unlock_irq(&phba->hbalock);
5082
5083 ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
5084 ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
5085
5086 ras_job_error:
5087
5088 bsg_reply->result = rc;
5089
5090
5091 if (!rc)
5092 bsg_job_done(job, bsg_reply->result,
5093 bsg_reply->reply_payload_rcv_len);
5094 return rc;
5095 }
5096
5097
5098
5099
5100
5101
5102
5103 static int
5104 lpfc_bsg_set_ras_config(struct bsg_job *job)
5105 {
5106 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5107 struct lpfc_vport *vport = shost_priv(shost);
5108 struct lpfc_hba *phba = vport->phba;
5109 struct lpfc_bsg_set_ras_config_req *ras_req;
5110 struct fc_bsg_request *bsg_request = job->request;
5111 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5112 struct fc_bsg_reply *bsg_reply = job->reply;
5113 uint8_t action = 0, log_level = 0;
5114 int rc = 0, action_status = 0;
5115
5116 if (job->request_len <
5117 sizeof(struct fc_bsg_request) +
5118 sizeof(struct lpfc_bsg_set_ras_config_req)) {
5119 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5120 "6182 Received RAS_LOG request "
5121 "below minimum size\n");
5122 rc = -EINVAL;
5123 goto ras_job_error;
5124 }
5125
5126
5127 rc = lpfc_check_fwlog_support(phba);
5128 if (rc)
5129 goto ras_job_error;
5130
5131 ras_req = (struct lpfc_bsg_set_ras_config_req *)
5132 bsg_request->rqst_data.h_vendor.vendor_cmd;
5133 action = ras_req->action;
5134 log_level = ras_req->log_level;
5135
5136 if (action == LPFC_RASACTION_STOP_LOGGING) {
5137
5138 spin_lock_irq(&phba->hbalock);
5139 if (ras_fwlog->state != ACTIVE) {
5140 spin_unlock_irq(&phba->hbalock);
5141 rc = -ESRCH;
5142 goto ras_job_error;
5143 }
5144 spin_unlock_irq(&phba->hbalock);
5145
5146
5147 lpfc_ras_stop_fwlog(phba);
5148 } else {
5149
5150
5151
5152
5153
5154
5155 spin_lock_irq(&phba->hbalock);
5156 if (ras_fwlog->state != INACTIVE)
5157 action_status = -EINPROGRESS;
5158 spin_unlock_irq(&phba->hbalock);
5159
5160
5161 rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
5162 LPFC_RAS_ENABLE_LOGGING);
5163 if (rc) {
5164 rc = -EINVAL;
5165 goto ras_job_error;
5166 }
5167
5168
5169 if (action_status == -EINPROGRESS)
5170 rc = action_status;
5171 }
5172 ras_job_error:
5173
5174 bsg_reply->result = rc;
5175
5176
5177 if (!rc)
5178 bsg_job_done(job, bsg_reply->result,
5179 bsg_reply->reply_payload_rcv_len);
5180
5181 return rc;
5182 }
5183
5184
5185
5186
5187
5188
5189
5190
5191 static int
5192 lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
5193 {
5194 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5195 struct lpfc_vport *vport = shost_priv(shost);
5196 struct lpfc_bsg_get_ras_lwpd *ras_reply;
5197 struct lpfc_hba *phba = vport->phba;
5198 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5199 struct fc_bsg_reply *bsg_reply = job->reply;
5200 u32 *lwpd_ptr = NULL;
5201 int rc = 0;
5202
5203 rc = lpfc_check_fwlog_support(phba);
5204 if (rc)
5205 goto ras_job_error;
5206
5207 if (job->request_len <
5208 sizeof(struct fc_bsg_request) +
5209 sizeof(struct lpfc_bsg_ras_req)) {
5210 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5211 "6183 Received RAS_LOG request "
5212 "below minimum size\n");
5213 rc = -EINVAL;
5214 goto ras_job_error;
5215 }
5216
5217 ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
5218 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5219
5220 if (!ras_fwlog->lwpd.virt) {
5221 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5222 "6193 Restart FW Logging\n");
5223 rc = -EINVAL;
5224 goto ras_job_error;
5225 }
5226
5227
5228 lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt);
5229 ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff);
5230
5231
5232 ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff);
5233
5234 ras_job_error:
5235
5236 bsg_reply->result = rc;
5237
5238
5239 if (!rc)
5240 bsg_job_done(job, bsg_reply->result,
5241 bsg_reply->reply_payload_rcv_len);
5242
5243 return rc;
5244 }
5245
5246
5247
5248
5249
5250
5251
5252 static int
5253 lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
5254 {
5255 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5256 struct lpfc_vport *vport = shost_priv(shost);
5257 struct lpfc_hba *phba = vport->phba;
5258 struct fc_bsg_request *bsg_request = job->request;
5259 struct fc_bsg_reply *bsg_reply = job->reply;
5260 struct lpfc_bsg_get_fwlog_req *ras_req;
5261 u32 rd_offset, rd_index, offset;
5262 void *src, *fwlog_buff;
5263 struct lpfc_ras_fwlog *ras_fwlog = NULL;
5264 struct lpfc_dmabuf *dmabuf, *next;
5265 int rc = 0;
5266
5267 ras_fwlog = &phba->ras_fwlog;
5268
5269 rc = lpfc_check_fwlog_support(phba);
5270 if (rc)
5271 goto ras_job_error;
5272
5273
5274 spin_lock_irq(&phba->hbalock);
5275 if (ras_fwlog->state == ACTIVE) {
5276 spin_unlock_irq(&phba->hbalock);
5277 rc = -EINPROGRESS;
5278 goto ras_job_error;
5279 }
5280 spin_unlock_irq(&phba->hbalock);
5281
5282 if (job->request_len <
5283 sizeof(struct fc_bsg_request) +
5284 sizeof(struct lpfc_bsg_get_fwlog_req)) {
5285 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5286 "6184 Received RAS_LOG request "
5287 "below minimum size\n");
5288 rc = -EINVAL;
5289 goto ras_job_error;
5290 }
5291
5292 ras_req = (struct lpfc_bsg_get_fwlog_req *)
5293 bsg_request->rqst_data.h_vendor.vendor_cmd;
5294 rd_offset = ras_req->read_offset;
5295
5296
5297 fwlog_buff = vmalloc(ras_req->read_size);
5298 if (!fwlog_buff) {
5299 rc = -ENOMEM;
5300 goto ras_job_error;
5301 }
5302
5303 rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
5304 offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
5305
5306 list_for_each_entry_safe(dmabuf, next,
5307 &ras_fwlog->fwlog_buff_list, list) {
5308
5309 if (dmabuf->buffer_tag < rd_index)
5310 continue;
5311
5312 src = dmabuf->virt + offset;
5313 memcpy(fwlog_buff, src, ras_req->read_size);
5314 break;
5315 }
5316
5317 bsg_reply->reply_payload_rcv_len =
5318 sg_copy_from_buffer(job->reply_payload.sg_list,
5319 job->reply_payload.sg_cnt,
5320 fwlog_buff, ras_req->read_size);
5321
5322 vfree(fwlog_buff);
5323
5324 ras_job_error:
5325 bsg_reply->result = rc;
5326 if (!rc)
5327 bsg_job_done(job, bsg_reply->result,
5328 bsg_reply->reply_payload_rcv_len);
5329
5330 return rc;
5331 }
5332
5333 static int
5334 lpfc_get_trunk_info(struct bsg_job *job)
5335 {
5336 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5337 struct lpfc_hba *phba = vport->phba;
5338 struct fc_bsg_reply *bsg_reply = job->reply;
5339 struct lpfc_trunk_info *event_reply;
5340 int rc = 0;
5341
5342 if (job->request_len <
5343 sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) {
5344 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5345 "2744 Received GET TRUNK _INFO request below "
5346 "minimum size\n");
5347 rc = -EINVAL;
5348 goto job_error;
5349 }
5350
5351 event_reply = (struct lpfc_trunk_info *)
5352 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5353
5354 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
5355 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5356 "2728 Received GET TRUNK _INFO reply below "
5357 "minimum size\n");
5358 rc = -EINVAL;
5359 goto job_error;
5360 }
5361 if (event_reply == NULL) {
5362 rc = -EINVAL;
5363 goto job_error;
5364 }
5365
5366 bsg_bf_set(lpfc_trunk_info_link_status, event_reply,
5367 (phba->link_state >= LPFC_LINK_UP) ? 1 : 0);
5368
5369 bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply,
5370 (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0);
5371
5372 bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply,
5373 (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0);
5374
5375 bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply,
5376 (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0);
5377
5378 bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply,
5379 (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0);
5380
5381 bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply,
5382 bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba));
5383
5384 bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply,
5385 bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba));
5386
5387 bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply,
5388 bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba));
5389
5390 bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply,
5391 bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba));
5392
5393 event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000;
5394 event_reply->logical_speed =
5395 phba->sli4_hba.link_state.logical_speed / 1000;
5396 job_error:
5397 bsg_reply->result = rc;
5398 if (!rc)
5399 bsg_job_done(job, bsg_reply->result,
5400 bsg_reply->reply_payload_rcv_len);
5401 return rc;
5402
5403 }
5404
5405 static int
5406 lpfc_get_cgnbuf_info(struct bsg_job *job)
5407 {
5408 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5409 struct lpfc_hba *phba = vport->phba;
5410 struct fc_bsg_request *bsg_request = job->request;
5411 struct fc_bsg_reply *bsg_reply = job->reply;
5412 struct get_cgnbuf_info_req *cgnbuf_req;
5413 struct lpfc_cgn_info *cp;
5414 uint8_t *cgn_buff;
5415 int size, cinfosz;
5416 int rc = 0;
5417
5418 if (job->request_len < sizeof(struct fc_bsg_request) +
5419 sizeof(struct get_cgnbuf_info_req)) {
5420 rc = -ENOMEM;
5421 goto job_exit;
5422 }
5423
5424 if (!phba->sli4_hba.pc_sli4_params.cmf) {
5425 rc = -ENOENT;
5426 goto job_exit;
5427 }
5428
5429 if (!phba->cgn_i || !phba->cgn_i->virt) {
5430 rc = -ENOENT;
5431 goto job_exit;
5432 }
5433
5434 cp = phba->cgn_i->virt;
5435 if (cp->cgn_info_version < LPFC_CGN_INFO_V3) {
5436 rc = -EPERM;
5437 goto job_exit;
5438 }
5439
5440 cgnbuf_req = (struct get_cgnbuf_info_req *)
5441 bsg_request->rqst_data.h_vendor.vendor_cmd;
5442
5443
5444 bsg_reply->reply_payload_rcv_len = 0;
5445
5446 if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) {
5447 lpfc_init_congestion_stat(phba);
5448 goto job_exit;
5449 }
5450
5451
5452 cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t);
5453
5454 size = cgnbuf_req->read_size;
5455 if (!size)
5456 goto job_exit;
5457
5458 if (size < cinfosz) {
5459
5460 cinfosz = size;
5461 rc = -E2BIG;
5462 }
5463
5464
5465 cgn_buff = vmalloc(cinfosz);
5466 if (!cgn_buff) {
5467 rc = -ENOMEM;
5468 goto job_exit;
5469 }
5470
5471 memcpy(cgn_buff, cp, cinfosz);
5472
5473 bsg_reply->reply_payload_rcv_len =
5474 sg_copy_from_buffer(job->reply_payload.sg_list,
5475 job->reply_payload.sg_cnt,
5476 cgn_buff, cinfosz);
5477
5478 vfree(cgn_buff);
5479
5480 job_exit:
5481 bsg_reply->result = rc;
5482 if (!rc)
5483 bsg_job_done(job, bsg_reply->result,
5484 bsg_reply->reply_payload_rcv_len);
5485 else
5486 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5487 "2724 GET CGNBUF error: %d\n", rc);
5488 return rc;
5489 }
5490
5491
5492
5493
5494
5495 static int
5496 lpfc_bsg_hst_vendor(struct bsg_job *job)
5497 {
5498 struct fc_bsg_request *bsg_request = job->request;
5499 struct fc_bsg_reply *bsg_reply = job->reply;
5500 int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
5501 int rc;
5502
5503 switch (command) {
5504 case LPFC_BSG_VENDOR_SET_CT_EVENT:
5505 rc = lpfc_bsg_hba_set_event(job);
5506 break;
5507 case LPFC_BSG_VENDOR_GET_CT_EVENT:
5508 rc = lpfc_bsg_hba_get_event(job);
5509 break;
5510 case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
5511 rc = lpfc_bsg_send_mgmt_rsp(job);
5512 break;
5513 case LPFC_BSG_VENDOR_DIAG_MODE:
5514 rc = lpfc_bsg_diag_loopback_mode(job);
5515 break;
5516 case LPFC_BSG_VENDOR_DIAG_MODE_END:
5517 rc = lpfc_sli4_bsg_diag_mode_end(job);
5518 break;
5519 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
5520 rc = lpfc_bsg_diag_loopback_run(job);
5521 break;
5522 case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
5523 rc = lpfc_sli4_bsg_link_diag_test(job);
5524 break;
5525 case LPFC_BSG_VENDOR_GET_MGMT_REV:
5526 rc = lpfc_bsg_get_dfc_rev(job);
5527 break;
5528 case LPFC_BSG_VENDOR_MBOX:
5529 rc = lpfc_bsg_mbox_cmd(job);
5530 break;
5531 case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
5532 rc = lpfc_forced_link_speed(job);
5533 break;
5534 case LPFC_BSG_VENDOR_RAS_GET_LWPD:
5535 rc = lpfc_bsg_get_ras_lwpd(job);
5536 break;
5537 case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
5538 rc = lpfc_bsg_get_ras_fwlog(job);
5539 break;
5540 case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
5541 rc = lpfc_bsg_get_ras_config(job);
5542 break;
5543 case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
5544 rc = lpfc_bsg_set_ras_config(job);
5545 break;
5546 case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
5547 rc = lpfc_get_trunk_info(job);
5548 break;
5549 case LPFC_BSG_VENDOR_GET_CGNBUF_INFO:
5550 rc = lpfc_get_cgnbuf_info(job);
5551 break;
5552 default:
5553 rc = -EINVAL;
5554 bsg_reply->reply_payload_rcv_len = 0;
5555
5556 bsg_reply->result = rc;
5557 break;
5558 }
5559
5560 return rc;
5561 }
5562
5563
5564
5565
5566
5567 int
5568 lpfc_bsg_request(struct bsg_job *job)
5569 {
5570 struct fc_bsg_request *bsg_request = job->request;
5571 struct fc_bsg_reply *bsg_reply = job->reply;
5572 uint32_t msgcode;
5573 int rc;
5574
5575 msgcode = bsg_request->msgcode;
5576 switch (msgcode) {
5577 case FC_BSG_HST_VENDOR:
5578 rc = lpfc_bsg_hst_vendor(job);
5579 break;
5580 case FC_BSG_RPT_ELS:
5581 rc = lpfc_bsg_rport_els(job);
5582 break;
5583 case FC_BSG_RPT_CT:
5584 rc = lpfc_bsg_send_mgmt_cmd(job);
5585 break;
5586 default:
5587 rc = -EINVAL;
5588 bsg_reply->reply_payload_rcv_len = 0;
5589
5590 bsg_reply->result = rc;
5591 break;
5592 }
5593
5594 return rc;
5595 }
5596
5597
5598
5599
5600
5601
5602
5603
5604 int
5605 lpfc_bsg_timeout(struct bsg_job *job)
5606 {
5607 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5608 struct lpfc_hba *phba = vport->phba;
5609 struct lpfc_iocbq *cmdiocb;
5610 struct lpfc_sli_ring *pring;
5611 struct bsg_job_data *dd_data;
5612 unsigned long flags;
5613 int rc = 0;
5614 LIST_HEAD(completions);
5615 struct lpfc_iocbq *check_iocb, *next_iocb;
5616
5617 pring = lpfc_phba_elsring(phba);
5618 if (unlikely(!pring))
5619 return -EIO;
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629
5630 spin_lock_irqsave(&phba->ct_ev_lock, flags);
5631 dd_data = (struct bsg_job_data *)job->dd_data;
5632 if (dd_data) {
5633 dd_data->set_job = NULL;
5634 job->dd_data = NULL;
5635 } else {
5636 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5637 return -EAGAIN;
5638 }
5639
5640 switch (dd_data->type) {
5641 case TYPE_IOCB:
5642
5643
5644
5645
5646 cmdiocb = dd_data->context_un.iocb.cmdiocbq;
5647 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5648
5649 spin_lock_irqsave(&phba->hbalock, flags);
5650
5651 if (!(cmdiocb->cmd_flag & LPFC_IO_CMD_OUTSTANDING)) {
5652 spin_unlock_irqrestore(&phba->hbalock, flags);
5653 return -EAGAIN;
5654 }
5655 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5656 list) {
5657 if (check_iocb == cmdiocb) {
5658 list_move_tail(&check_iocb->list, &completions);
5659 break;
5660 }
5661 }
5662 if (list_empty(&completions))
5663 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
5664 spin_unlock_irqrestore(&phba->hbalock, flags);
5665 if (!list_empty(&completions)) {
5666 lpfc_sli_cancel_iocbs(phba, &completions,
5667 IOSTAT_LOCAL_REJECT,
5668 IOERR_SLI_ABORTED);
5669 }
5670 break;
5671
5672 case TYPE_EVT:
5673 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5674 break;
5675
5676 case TYPE_MBOX:
5677
5678
5679 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5680 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5681 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5682 break;
5683 default:
5684 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5685 break;
5686 }
5687
5688
5689
5690
5691
5692 return rc;
5693 }