0001
0002
0003
0004
0005
0006 #include "qla_def.h"
0007 #include "qla_target.h"
0008 #include "qla_gbl.h"
0009
0010 #include <linux/delay.h>
0011 #include <linux/slab.h>
0012 #include <linux/cpu.h>
0013 #include <linux/t10-pi.h>
0014 #include <scsi/scsi_tcq.h>
0015 #include <scsi/scsi_bsg_fc.h>
0016 #include <scsi/scsi_eh.h>
0017 #include <scsi/fc/fc_fs.h>
0018 #include <linux/nvme-fc-driver.h>
0019
0020 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
0021 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
0022 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
0023 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
0024 sts_entry_t *);
0025 static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha,
0026 struct purex_item *item);
0027 static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha,
0028 uint16_t size);
0029 static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha,
0030 void *pkt);
0031 static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha,
0032 void **pkt, struct rsp_que **rsp);
0033
0034 static void
0035 qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item)
0036 {
0037 void *pkt = &item->iocb;
0038 uint16_t pkt_size = item->size;
0039
0040 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d,
0041 "%s: Enter\n", __func__);
0042
0043 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e,
0044 "-------- ELS REQ -------\n");
0045 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f,
0046 pkt, pkt_size);
0047
0048 fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt);
0049 }
0050
0051 const char *const port_state_str[] = {
0052 [FCS_UNKNOWN] = "Unknown",
0053 [FCS_UNCONFIGURED] = "UNCONFIGURED",
0054 [FCS_DEVICE_DEAD] = "DEAD",
0055 [FCS_DEVICE_LOST] = "LOST",
0056 [FCS_ONLINE] = "ONLINE"
0057 };
0058
0059 static void
0060 qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
0061 {
0062 struct abts_entry_24xx *abts =
0063 (struct abts_entry_24xx *)&pkt->iocb;
0064 struct qla_hw_data *ha = vha->hw;
0065 struct els_entry_24xx *rsp_els;
0066 struct abts_entry_24xx *abts_rsp;
0067 dma_addr_t dma;
0068 uint32_t fctl;
0069 int rval;
0070
0071 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__);
0072
0073 ql_log(ql_log_warn, vha, 0x0287,
0074 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
0075 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
0076 abts->seq_id, abts->seq_cnt);
0077 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
0078 "-------- ABTS RCV -------\n");
0079 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
0080 (uint8_t *)abts, sizeof(*abts));
0081
0082 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
0083 GFP_KERNEL);
0084 if (!rsp_els) {
0085 ql_log(ql_log_warn, vha, 0x0287,
0086 "Failed allocate dma buffer ABTS/ELS RSP.\n");
0087 return;
0088 }
0089
0090
0091 rsp_els->entry_type = ELS_IOCB_TYPE;
0092 rsp_els->entry_count = 1;
0093 rsp_els->nport_handle = cpu_to_le16(~0);
0094 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
0095 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
0096 ql_dbg(ql_dbg_init, vha, 0x0283,
0097 "Sending ELS Response to terminate exchange %#x...\n",
0098 abts->rx_xch_addr_to_abort);
0099 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
0100 "-------- ELS RSP -------\n");
0101 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
0102 (uint8_t *)rsp_els, sizeof(*rsp_els));
0103 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
0104 if (rval) {
0105 ql_log(ql_log_warn, vha, 0x0288,
0106 "%s: iocb failed to execute -> %x\n", __func__, rval);
0107 } else if (rsp_els->comp_status) {
0108 ql_log(ql_log_warn, vha, 0x0289,
0109 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
0110 __func__, rsp_els->comp_status,
0111 rsp_els->error_subcode_1, rsp_els->error_subcode_2);
0112 } else {
0113 ql_dbg(ql_dbg_init, vha, 0x028a,
0114 "%s: abort exchange done.\n", __func__);
0115 }
0116
0117
0118 abts_rsp = (void *)rsp_els;
0119 memset(abts_rsp, 0, sizeof(*abts_rsp));
0120 abts_rsp->entry_type = ABTS_RSP_TYPE;
0121 abts_rsp->entry_count = 1;
0122 abts_rsp->nport_handle = abts->nport_handle;
0123 abts_rsp->vp_idx = abts->vp_idx;
0124 abts_rsp->sof_type = abts->sof_type & 0xf0;
0125 abts_rsp->rx_xch_addr = abts->rx_xch_addr;
0126 abts_rsp->d_id[0] = abts->s_id[0];
0127 abts_rsp->d_id[1] = abts->s_id[1];
0128 abts_rsp->d_id[2] = abts->s_id[2];
0129 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
0130 abts_rsp->s_id[0] = abts->d_id[0];
0131 abts_rsp->s_id[1] = abts->d_id[1];
0132 abts_rsp->s_id[2] = abts->d_id[2];
0133 abts_rsp->cs_ctl = abts->cs_ctl;
0134
0135 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
0136 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
0137 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
0138 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
0139 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
0140 abts_rsp->type = FC_TYPE_BLD;
0141 abts_rsp->rx_id = abts->rx_id;
0142 abts_rsp->ox_id = abts->ox_id;
0143 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
0144 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
0145 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
0146 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
0147 ql_dbg(ql_dbg_init, vha, 0x028b,
0148 "Sending BA ACC response to ABTS %#x...\n",
0149 abts->rx_xch_addr_to_abort);
0150 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
0151 "-------- ELS RSP -------\n");
0152 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
0153 (uint8_t *)abts_rsp, sizeof(*abts_rsp));
0154 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
0155 if (rval) {
0156 ql_log(ql_log_warn, vha, 0x028c,
0157 "%s: iocb failed to execute -> %x\n", __func__, rval);
0158 } else if (abts_rsp->comp_status) {
0159 ql_log(ql_log_warn, vha, 0x028d,
0160 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
0161 __func__, abts_rsp->comp_status,
0162 abts_rsp->payload.error.subcode1,
0163 abts_rsp->payload.error.subcode2);
0164 } else {
0165 ql_dbg(ql_dbg_init, vha, 0x028ea,
0166 "%s: done.\n", __func__);
0167 }
0168
0169 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
0170 }
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183 void __qla_consume_iocb(struct scsi_qla_host *vha,
0184 void **pkt, struct rsp_que **rsp)
0185 {
0186 struct rsp_que *rsp_q = *rsp;
0187 response_t *new_pkt;
0188 uint16_t entry_count_remaining;
0189 struct purex_entry_24xx *purex = *pkt;
0190
0191 entry_count_remaining = purex->entry_count;
0192 while (entry_count_remaining > 0) {
0193 new_pkt = rsp_q->ring_ptr;
0194 *pkt = new_pkt;
0195
0196 rsp_q->ring_index++;
0197 if (rsp_q->ring_index == rsp_q->length) {
0198 rsp_q->ring_index = 0;
0199 rsp_q->ring_ptr = rsp_q->ring;
0200 } else {
0201 rsp_q->ring_ptr++;
0202 }
0203
0204 new_pkt->signature = RESPONSE_PROCESSED;
0205
0206 wmb();
0207 --entry_count_remaining;
0208 }
0209 }
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220 int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha,
0221 void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len)
0222 {
0223 struct purex_entry_24xx *purex = *pkt;
0224 struct rsp_que *rsp_q = *rsp;
0225 sts_cont_entry_t *new_pkt;
0226 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
0227 uint16_t buffer_copy_offset = 0;
0228 uint16_t entry_count_remaining;
0229 u16 tpad;
0230
0231 entry_count_remaining = purex->entry_count;
0232 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
0233 - PURX_ELS_HEADER_SIZE;
0234
0235
0236
0237
0238
0239 tpad = roundup(total_bytes, 4);
0240
0241 if (buf_len < tpad) {
0242 ql_dbg(ql_dbg_async, vha, 0x5084,
0243 "%s buffer is too small %d < %d\n",
0244 __func__, buf_len, tpad);
0245 __qla_consume_iocb(vha, pkt, rsp);
0246 return -EIO;
0247 }
0248
0249 pending_bytes = total_bytes = tpad;
0250 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
0251 sizeof(purex->els_frame_payload) : pending_bytes;
0252
0253 memcpy(buf, &purex->els_frame_payload[0], no_bytes);
0254 buffer_copy_offset += no_bytes;
0255 pending_bytes -= no_bytes;
0256 --entry_count_remaining;
0257
0258 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
0259
0260 wmb();
0261
0262 do {
0263 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
0264 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
0265 *pkt = new_pkt;
0266
0267 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
0268 ql_log(ql_log_warn, vha, 0x507a,
0269 "Unexpected IOCB type, partial data 0x%x\n",
0270 buffer_copy_offset);
0271 break;
0272 }
0273
0274 rsp_q->ring_index++;
0275 if (rsp_q->ring_index == rsp_q->length) {
0276 rsp_q->ring_index = 0;
0277 rsp_q->ring_ptr = rsp_q->ring;
0278 } else {
0279 rsp_q->ring_ptr++;
0280 }
0281 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
0282 sizeof(new_pkt->data) : pending_bytes;
0283 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
0284 memcpy((buf + buffer_copy_offset), new_pkt->data,
0285 no_bytes);
0286 buffer_copy_offset += no_bytes;
0287 pending_bytes -= no_bytes;
0288 --entry_count_remaining;
0289 } else {
0290 ql_log(ql_log_warn, vha, 0x5044,
0291 "Attempt to copy more that we got, optimizing..%x\n",
0292 buffer_copy_offset);
0293 memcpy((buf + buffer_copy_offset), new_pkt->data,
0294 total_bytes - buffer_copy_offset);
0295 }
0296
0297 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
0298
0299 wmb();
0300 }
0301
0302 if (pending_bytes != 0 || entry_count_remaining != 0) {
0303 ql_log(ql_log_fatal, vha, 0x508b,
0304 "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n",
0305 total_bytes, entry_count_remaining);
0306 return -EIO;
0307 }
0308 } while (entry_count_remaining > 0);
0309
0310 be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2);
0311
0312 return 0;
0313 }
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324 irqreturn_t
0325 qla2100_intr_handler(int irq, void *dev_id)
0326 {
0327 scsi_qla_host_t *vha;
0328 struct qla_hw_data *ha;
0329 struct device_reg_2xxx __iomem *reg;
0330 int status;
0331 unsigned long iter;
0332 uint16_t hccr;
0333 uint16_t mb[8];
0334 struct rsp_que *rsp;
0335 unsigned long flags;
0336
0337 rsp = (struct rsp_que *) dev_id;
0338 if (!rsp) {
0339 ql_log(ql_log_info, NULL, 0x505d,
0340 "%s: NULL response queue pointer.\n", __func__);
0341 return (IRQ_NONE);
0342 }
0343
0344 ha = rsp->hw;
0345 reg = &ha->iobase->isp;
0346 status = 0;
0347
0348 spin_lock_irqsave(&ha->hardware_lock, flags);
0349 vha = pci_get_drvdata(ha->pdev);
0350 for (iter = 50; iter--; ) {
0351 hccr = rd_reg_word(®->hccr);
0352 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
0353 break;
0354 if (hccr & HCCR_RISC_PAUSE) {
0355 if (pci_channel_offline(ha->pdev))
0356 break;
0357
0358
0359
0360
0361
0362
0363 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
0364 rd_reg_word(®->hccr);
0365
0366 ha->isp_ops->fw_dump(vha);
0367 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
0368 break;
0369 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0)
0370 break;
0371
0372 if (rd_reg_word(®->semaphore) & BIT_0) {
0373 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
0374 rd_reg_word(®->hccr);
0375
0376
0377 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
0378 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
0379 qla2x00_mbx_completion(vha, mb[0]);
0380 status |= MBX_INTERRUPT;
0381 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
0382 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
0383 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
0384 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
0385 qla2x00_async_event(vha, rsp, mb);
0386 } else {
0387
0388 ql_dbg(ql_dbg_async, vha, 0x5025,
0389 "Unrecognized interrupt type (%d).\n",
0390 mb[0]);
0391 }
0392
0393 wrt_reg_word(®->semaphore, 0);
0394 rd_reg_word(®->semaphore);
0395 } else {
0396 qla2x00_process_response_queue(rsp);
0397
0398 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
0399 rd_reg_word(®->hccr);
0400 }
0401 }
0402 qla2x00_handle_mbx_completion(ha, status);
0403 spin_unlock_irqrestore(&ha->hardware_lock, flags);
0404
0405 return (IRQ_HANDLED);
0406 }
0407
0408 bool
0409 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
0410 {
0411
0412 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
0413 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
0414 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
0415 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
0416 qla_schedule_eeh_work(vha);
0417 }
0418 return true;
0419 } else
0420 return false;
0421 }
0422
0423 bool
0424 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
0425 {
0426 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
0427 }
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437
0438 irqreturn_t
0439 qla2300_intr_handler(int irq, void *dev_id)
0440 {
0441 scsi_qla_host_t *vha;
0442 struct device_reg_2xxx __iomem *reg;
0443 int status;
0444 unsigned long iter;
0445 uint32_t stat;
0446 uint16_t hccr;
0447 uint16_t mb[8];
0448 struct rsp_que *rsp;
0449 struct qla_hw_data *ha;
0450 unsigned long flags;
0451
0452 rsp = (struct rsp_que *) dev_id;
0453 if (!rsp) {
0454 ql_log(ql_log_info, NULL, 0x5058,
0455 "%s: NULL response queue pointer.\n", __func__);
0456 return (IRQ_NONE);
0457 }
0458
0459 ha = rsp->hw;
0460 reg = &ha->iobase->isp;
0461 status = 0;
0462
0463 spin_lock_irqsave(&ha->hardware_lock, flags);
0464 vha = pci_get_drvdata(ha->pdev);
0465 for (iter = 50; iter--; ) {
0466 stat = rd_reg_dword(®->u.isp2300.host_status);
0467 if (qla2x00_check_reg32_for_disconnect(vha, stat))
0468 break;
0469 if (stat & HSR_RISC_PAUSED) {
0470 if (unlikely(pci_channel_offline(ha->pdev)))
0471 break;
0472
0473 hccr = rd_reg_word(®->hccr);
0474
0475 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
0476 ql_log(ql_log_warn, vha, 0x5026,
0477 "Parity error -- HCCR=%x, Dumping "
0478 "firmware.\n", hccr);
0479 else
0480 ql_log(ql_log_warn, vha, 0x5027,
0481 "RISC paused -- HCCR=%x, Dumping "
0482 "firmware.\n", hccr);
0483
0484
0485
0486
0487
0488
0489 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
0490 rd_reg_word(®->hccr);
0491
0492 ha->isp_ops->fw_dump(vha);
0493 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
0494 break;
0495 } else if ((stat & HSR_RISC_INT) == 0)
0496 break;
0497
0498 switch (stat & 0xff) {
0499 case 0x1:
0500 case 0x2:
0501 case 0x10:
0502 case 0x11:
0503 qla2x00_mbx_completion(vha, MSW(stat));
0504 status |= MBX_INTERRUPT;
0505
0506
0507 wrt_reg_word(®->semaphore, 0);
0508 break;
0509 case 0x12:
0510 mb[0] = MSW(stat);
0511 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
0512 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
0513 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
0514 qla2x00_async_event(vha, rsp, mb);
0515 break;
0516 case 0x13:
0517 qla2x00_process_response_queue(rsp);
0518 break;
0519 case 0x15:
0520 mb[0] = MBA_CMPLT_1_16BIT;
0521 mb[1] = MSW(stat);
0522 qla2x00_async_event(vha, rsp, mb);
0523 break;
0524 case 0x16:
0525 mb[0] = MBA_SCSI_COMPLETION;
0526 mb[1] = MSW(stat);
0527 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
0528 qla2x00_async_event(vha, rsp, mb);
0529 break;
0530 default:
0531 ql_dbg(ql_dbg_async, vha, 0x5028,
0532 "Unrecognized interrupt type (%d).\n", stat & 0xff);
0533 break;
0534 }
0535 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
0536 rd_reg_word_relaxed(®->hccr);
0537 }
0538 qla2x00_handle_mbx_completion(ha, status);
0539 spin_unlock_irqrestore(&ha->hardware_lock, flags);
0540
0541 return (IRQ_HANDLED);
0542 }
0543
0544
0545
0546
0547
0548
0549 static void
0550 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
0551 {
0552 uint16_t cnt;
0553 uint32_t mboxes;
0554 __le16 __iomem *wptr;
0555 struct qla_hw_data *ha = vha->hw;
0556 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
0557
0558
0559 WARN_ON_ONCE(ha->mbx_count > 32);
0560 mboxes = (1ULL << ha->mbx_count) - 1;
0561 if (!ha->mcp)
0562 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
0563 else
0564 mboxes = ha->mcp->in_mb;
0565
0566
0567 ha->flags.mbox_int = 1;
0568 ha->mailbox_out[0] = mb0;
0569 mboxes >>= 1;
0570 wptr = MAILBOX_REG(ha, reg, 1);
0571
0572 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
0573 if (IS_QLA2200(ha) && cnt == 8)
0574 wptr = MAILBOX_REG(ha, reg, 8);
0575 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
0576 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
0577 else if (mboxes & BIT_0)
0578 ha->mailbox_out[cnt] = rd_reg_word(wptr);
0579
0580 wptr++;
0581 mboxes >>= 1;
0582 }
0583 }
0584
0585 static void
0586 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
0587 {
0588 static char *event[] =
0589 { "Complete", "Request Notification", "Time Extension" };
0590 int rval;
0591 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
0592 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
0593 __le16 __iomem *wptr;
0594 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
0595
0596
0597 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
0598 wptr = ®24->mailbox1;
0599 else if (IS_QLA8044(vha->hw))
0600 wptr = ®82->mailbox_out[1];
0601 else
0602 return;
0603
0604 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
0605 mb[cnt] = rd_reg_word(wptr);
0606
0607 ql_dbg(ql_dbg_async, vha, 0x5021,
0608 "Inter-Driver Communication %s -- "
0609 "%04x %04x %04x %04x %04x %04x %04x.\n",
0610 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
0611 mb[4], mb[5], mb[6]);
0612 switch (aen) {
0613
0614 case MBA_IDC_COMPLETE:
0615 if (mb[1] >> 15) {
0616 vha->hw->flags.idc_compl_status = 1;
0617 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
0618 complete(&vha->hw->dcbx_comp);
0619 }
0620 break;
0621
0622 case MBA_IDC_NOTIFY:
0623
0624 timeout = (descr >> 8) & 0xf;
0625 ql_dbg(ql_dbg_async, vha, 0x5022,
0626 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
0627 vha->host_no, event[aen & 0xff], timeout);
0628
0629 if (!timeout)
0630 return;
0631 rval = qla2x00_post_idc_ack_work(vha, mb);
0632 if (rval != QLA_SUCCESS)
0633 ql_log(ql_log_warn, vha, 0x5023,
0634 "IDC failed to post ACK.\n");
0635 break;
0636 case MBA_IDC_TIME_EXT:
0637 vha->hw->idc_extend_tmo = descr;
0638 ql_dbg(ql_dbg_async, vha, 0x5087,
0639 "%lu Inter-Driver Communication %s -- "
0640 "Extend timeout by=%d.\n",
0641 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
0642 break;
0643 }
0644 }
0645
0646 #define LS_UNKNOWN 2
0647 const char *
0648 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
0649 {
0650 static const char *const link_speeds[] = {
0651 "1", "2", "?", "4", "8", "16", "32", "64", "10"
0652 };
0653 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
0654
0655 if (IS_QLA2100(ha) || IS_QLA2200(ha))
0656 return link_speeds[0];
0657 else if (speed == 0x13)
0658 return link_speeds[QLA_LAST_SPEED];
0659 else if (speed < QLA_LAST_SPEED)
0660 return link_speeds[speed];
0661 else
0662 return link_speeds[LS_UNKNOWN];
0663 }
0664
0665 static void
0666 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
0667 {
0668 struct qla_hw_data *ha = vha->hw;
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
0682 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
0683 mb[0], mb[1], mb[2], mb[6]);
0684 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
0685 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
0686 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
0687
0688 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
0689 IDC_HEARTBEAT_FAILURE)) {
0690 ha->flags.nic_core_hung = 1;
0691 ql_log(ql_log_warn, vha, 0x5060,
0692 "83XX: F/W Error Reported: Check if reset required.\n");
0693
0694 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
0695 uint32_t protocol_engine_id, fw_err_code, err_level;
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710 protocol_engine_id = (mb[2] & 0xff);
0711 fw_err_code = (((mb[2] & 0xff00) >> 8) |
0712 ((mb[6] & 0x1fff) << 8));
0713 err_level = ((mb[6] & 0xe000) >> 13);
0714 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
0715 "Register: protocol_engine_id=0x%x "
0716 "fw_err_code=0x%x err_level=0x%x.\n",
0717 protocol_engine_id, fw_err_code, err_level);
0718 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
0719 "Register: 0x%x%x.\n", mb[7], mb[3]);
0720 if (err_level == ERR_LEVEL_NON_FATAL) {
0721 ql_log(ql_log_warn, vha, 0x5063,
0722 "Not a fatal error, f/w has recovered itself.\n");
0723 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
0724 ql_log(ql_log_fatal, vha, 0x5064,
0725 "Recoverable Fatal error: Chip reset "
0726 "required.\n");
0727 qla83xx_schedule_work(vha,
0728 QLA83XX_NIC_CORE_RESET);
0729 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
0730 ql_log(ql_log_fatal, vha, 0x5065,
0731 "Unrecoverable Fatal error: Set FAILED "
0732 "state, reboot required.\n");
0733 qla83xx_schedule_work(vha,
0734 QLA83XX_NIC_CORE_UNRECOVERABLE);
0735 }
0736 }
0737
0738 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
0739 uint16_t peg_fw_state, nw_interface_link_up;
0740 uint16_t nw_interface_signal_detect, sfp_status;
0741 uint16_t htbt_counter, htbt_monitor_enable;
0742 uint16_t sfp_additional_info, sfp_multirate;
0743 uint16_t sfp_tx_fault, link_speed, dcbx_status;
0744
0745
0746
0747
0748
0749
0750
0751
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767
0768
0769
0770
0771
0772
0773
0774
0775
0776 peg_fw_state = (mb[2] & 0x00ff);
0777 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
0778 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
0779 sfp_status = ((mb[2] & 0x0c00) >> 10);
0780 htbt_counter = ((mb[2] & 0x7000) >> 12);
0781 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
0782 sfp_additional_info = (mb[6] & 0x0003);
0783 sfp_multirate = ((mb[6] & 0x0004) >> 2);
0784 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
0785 link_speed = ((mb[6] & 0x0070) >> 4);
0786 dcbx_status = ((mb[6] & 0x7000) >> 12);
0787
0788 ql_log(ql_log_warn, vha, 0x5066,
0789 "Peg-to-Fc Status Register:\n"
0790 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
0791 "nw_interface_signal_detect=0x%x"
0792 "\nsfp_statis=0x%x.\n ", peg_fw_state,
0793 nw_interface_link_up, nw_interface_signal_detect,
0794 sfp_status);
0795 ql_log(ql_log_warn, vha, 0x5067,
0796 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
0797 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
0798 htbt_counter, htbt_monitor_enable,
0799 sfp_additional_info, sfp_multirate);
0800 ql_log(ql_log_warn, vha, 0x5068,
0801 "sfp_tx_fault=0x%x, link_state=0x%x, "
0802 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
0803 dcbx_status);
0804
0805 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
0806 }
0807
0808 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
0809 ql_log(ql_log_warn, vha, 0x5069,
0810 "Heartbeat Failure encountered, chip reset "
0811 "required.\n");
0812
0813 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
0814 }
0815 }
0816
0817 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
0818 ql_log(ql_log_info, vha, 0x506a,
0819 "IDC Device-State changed = 0x%x.\n", mb[4]);
0820 if (ha->flags.nic_core_reset_owner)
0821 return;
0822 qla83xx_schedule_work(vha, MBA_IDC_AEN);
0823 }
0824 }
0825
0826 int
0827 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
0828 {
0829 struct qla_hw_data *ha = vha->hw;
0830 scsi_qla_host_t *vp;
0831 uint32_t vp_did;
0832 unsigned long flags;
0833 int ret = 0;
0834
0835 if (!ha->num_vhosts)
0836 return ret;
0837
0838 spin_lock_irqsave(&ha->vport_slock, flags);
0839 list_for_each_entry(vp, &ha->vp_list, list) {
0840 vp_did = vp->d_id.b24;
0841 if (vp_did == rscn_entry) {
0842 ret = 1;
0843 break;
0844 }
0845 }
0846 spin_unlock_irqrestore(&ha->vport_slock, flags);
0847
0848 return ret;
0849 }
0850
0851 fc_port_t *
0852 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
0853 {
0854 fc_port_t *f, *tf;
0855
0856 f = tf = NULL;
0857 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
0858 if (f->loop_id == loop_id)
0859 return f;
0860 return NULL;
0861 }
0862
0863 fc_port_t *
0864 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
0865 {
0866 fc_port_t *f, *tf;
0867
0868 f = tf = NULL;
0869 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
0870 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
0871 if (incl_deleted)
0872 return f;
0873 else if (f->deleted == 0)
0874 return f;
0875 }
0876 }
0877 return NULL;
0878 }
0879
0880 fc_port_t *
0881 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
0882 u8 incl_deleted)
0883 {
0884 fc_port_t *f, *tf;
0885
0886 f = tf = NULL;
0887 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
0888 if (f->d_id.b24 == id->b24) {
0889 if (incl_deleted)
0890 return f;
0891 else if (f->deleted == 0)
0892 return f;
0893 }
0894 }
0895 return NULL;
0896 }
0897
0898
0899 static void
0900 qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
0901 {
0902 struct qla_hw_data *ha = vha->hw;
0903 bool reset_isp_needed = false;
0904
0905 ql_log(ql_log_warn, vha, 0x02f0,
0906 "MPI Heartbeat stop. MPI reset is%s needed. "
0907 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
0908 mb[1] & BIT_8 ? "" : " not",
0909 mb[0], mb[1], mb[2], mb[3]);
0910
0911 if ((mb[1] & BIT_8) == 0)
0912 return;
0913
0914 ql_log(ql_log_warn, vha, 0x02f1,
0915 "MPI Heartbeat stop. FW dump needed\n");
0916
0917 if (ql2xfulldump_on_mpifail) {
0918 ha->isp_ops->fw_dump(vha);
0919 reset_isp_needed = true;
0920 }
0921
0922 ha->isp_ops->mpi_fw_dump(vha, 1);
0923
0924 if (reset_isp_needed) {
0925 vha->hw->flags.fw_init_done = 0;
0926 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
0927 qla2xxx_wake_dpc(vha);
0928 }
0929 }
0930
0931 static struct purex_item *
0932 qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size)
0933 {
0934 struct purex_item *item = NULL;
0935 uint8_t item_hdr_size = sizeof(*item);
0936
0937 if (size > QLA_DEFAULT_PAYLOAD_SIZE) {
0938 item = kzalloc(item_hdr_size +
0939 (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC);
0940 } else {
0941 if (atomic_inc_return(&vha->default_item.in_use) == 1) {
0942 item = &vha->default_item;
0943 goto initialize_purex_header;
0944 } else {
0945 item = kzalloc(item_hdr_size, GFP_ATOMIC);
0946 }
0947 }
0948 if (!item) {
0949 ql_log(ql_log_warn, vha, 0x5092,
0950 ">> Failed allocate purex list item.\n");
0951
0952 return NULL;
0953 }
0954
0955 initialize_purex_header:
0956 item->vha = vha;
0957 item->size = size;
0958 return item;
0959 }
0960
0961 static void
0962 qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
0963 void (*process_item)(struct scsi_qla_host *vha,
0964 struct purex_item *pkt))
0965 {
0966 struct purex_list *list = &vha->purex_list;
0967 ulong flags;
0968
0969 pkt->process_item = process_item;
0970
0971 spin_lock_irqsave(&list->lock, flags);
0972 list_add_tail(&pkt->list, &list->head);
0973 spin_unlock_irqrestore(&list->lock, flags);
0974
0975 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
0976 }
0977
0978
0979
0980
0981
0982
0983
0984
0985 static struct purex_item
0986 *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
0987 {
0988 struct purex_item *item;
0989
0990 item = qla24xx_alloc_purex_item(vha,
0991 QLA_DEFAULT_PAYLOAD_SIZE);
0992 if (!item)
0993 return item;
0994
0995 memcpy(&item->iocb, pkt, sizeof(item->iocb));
0996 return item;
0997 }
0998
0999
1000
1001
1002
1003
1004
1005
1006 static struct purex_item *
1007 qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt,
1008 struct rsp_que **rsp)
1009 {
1010 struct purex_entry_24xx *purex = *pkt;
1011 struct rsp_que *rsp_q = *rsp;
1012 sts_cont_entry_t *new_pkt;
1013 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
1014 uint16_t buffer_copy_offset = 0;
1015 uint16_t entry_count, entry_count_remaining;
1016 struct purex_item *item;
1017 void *fpin_pkt = NULL;
1018
1019 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
1020 - PURX_ELS_HEADER_SIZE;
1021 pending_bytes = total_bytes;
1022 entry_count = entry_count_remaining = purex->entry_count;
1023 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
1024 sizeof(purex->els_frame_payload) : pending_bytes;
1025 ql_log(ql_log_info, vha, 0x509a,
1026 "FPIN ELS, frame_size 0x%x, entry count %d\n",
1027 total_bytes, entry_count);
1028
1029 item = qla24xx_alloc_purex_item(vha, total_bytes);
1030 if (!item)
1031 return item;
1032
1033 fpin_pkt = &item->iocb;
1034
1035 memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes);
1036 buffer_copy_offset += no_bytes;
1037 pending_bytes -= no_bytes;
1038 --entry_count_remaining;
1039
1040 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
1041 wmb();
1042
1043 do {
1044 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
1045 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
1046 ql_dbg(ql_dbg_async, vha, 0x5084,
1047 "Ran out of IOCBs, partial data 0x%x\n",
1048 buffer_copy_offset);
1049 cpu_relax();
1050 continue;
1051 }
1052
1053 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
1054 *pkt = new_pkt;
1055
1056 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
1057 ql_log(ql_log_warn, vha, 0x507a,
1058 "Unexpected IOCB type, partial data 0x%x\n",
1059 buffer_copy_offset);
1060 break;
1061 }
1062
1063 rsp_q->ring_index++;
1064 if (rsp_q->ring_index == rsp_q->length) {
1065 rsp_q->ring_index = 0;
1066 rsp_q->ring_ptr = rsp_q->ring;
1067 } else {
1068 rsp_q->ring_ptr++;
1069 }
1070 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
1071 sizeof(new_pkt->data) : pending_bytes;
1072 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
1073 memcpy(((uint8_t *)fpin_pkt +
1074 buffer_copy_offset), new_pkt->data,
1075 no_bytes);
1076 buffer_copy_offset += no_bytes;
1077 pending_bytes -= no_bytes;
1078 --entry_count_remaining;
1079 } else {
1080 ql_log(ql_log_warn, vha, 0x5044,
1081 "Attempt to copy more that we got, optimizing..%x\n",
1082 buffer_copy_offset);
1083 memcpy(((uint8_t *)fpin_pkt +
1084 buffer_copy_offset), new_pkt->data,
1085 total_bytes - buffer_copy_offset);
1086 }
1087
1088 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
1089 wmb();
1090 }
1091
1092 if (pending_bytes != 0 || entry_count_remaining != 0) {
1093 ql_log(ql_log_fatal, vha, 0x508b,
1094 "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
1095 total_bytes, entry_count_remaining);
1096 qla24xx_free_purex_item(item);
1097 return NULL;
1098 }
1099 } while (entry_count_remaining > 0);
1100 host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
1101 return item;
1102 }
1103
1104
1105
1106
1107
1108
1109
1110 void
1111 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1112 {
1113 uint16_t handle_cnt;
1114 uint16_t cnt, mbx;
1115 uint32_t handles[5];
1116 struct qla_hw_data *ha = vha->hw;
1117 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1118 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1119 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1120 uint32_t rscn_entry, host_pid;
1121 unsigned long flags;
1122 fc_port_t *fcport = NULL;
1123
1124 if (!vha->hw->flags.fw_started)
1125 return;
1126
1127
1128 handle_cnt = 0;
1129 if (IS_CNA_CAPABLE(ha))
1130 goto skip_rio;
1131 switch (mb[0]) {
1132 case MBA_SCSI_COMPLETION:
1133 handles[0] = make_handle(mb[2], mb[1]);
1134 handle_cnt = 1;
1135 break;
1136 case MBA_CMPLT_1_16BIT:
1137 handles[0] = mb[1];
1138 handle_cnt = 1;
1139 mb[0] = MBA_SCSI_COMPLETION;
1140 break;
1141 case MBA_CMPLT_2_16BIT:
1142 handles[0] = mb[1];
1143 handles[1] = mb[2];
1144 handle_cnt = 2;
1145 mb[0] = MBA_SCSI_COMPLETION;
1146 break;
1147 case MBA_CMPLT_3_16BIT:
1148 handles[0] = mb[1];
1149 handles[1] = mb[2];
1150 handles[2] = mb[3];
1151 handle_cnt = 3;
1152 mb[0] = MBA_SCSI_COMPLETION;
1153 break;
1154 case MBA_CMPLT_4_16BIT:
1155 handles[0] = mb[1];
1156 handles[1] = mb[2];
1157 handles[2] = mb[3];
1158 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1159 handle_cnt = 4;
1160 mb[0] = MBA_SCSI_COMPLETION;
1161 break;
1162 case MBA_CMPLT_5_16BIT:
1163 handles[0] = mb[1];
1164 handles[1] = mb[2];
1165 handles[2] = mb[3];
1166 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1167 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
1168 handle_cnt = 5;
1169 mb[0] = MBA_SCSI_COMPLETION;
1170 break;
1171 case MBA_CMPLT_2_32BIT:
1172 handles[0] = make_handle(mb[2], mb[1]);
1173 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
1174 RD_MAILBOX_REG(ha, reg, 6));
1175 handle_cnt = 2;
1176 mb[0] = MBA_SCSI_COMPLETION;
1177 break;
1178 default:
1179 break;
1180 }
1181 skip_rio:
1182 switch (mb[0]) {
1183 case MBA_SCSI_COMPLETION:
1184 if (!vha->flags.online)
1185 break;
1186
1187 for (cnt = 0; cnt < handle_cnt; cnt++)
1188 qla2x00_process_completed_request(vha, rsp->req,
1189 handles[cnt]);
1190 break;
1191
1192 case MBA_RESET:
1193 ql_dbg(ql_dbg_async, vha, 0x5002,
1194 "Asynchronous RESET.\n");
1195
1196 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1197 break;
1198
1199 case MBA_SYSTEM_ERR:
1200 mbx = 0;
1201
1202 vha->hw_err_cnt++;
1203
1204 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
1205 IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1206 u16 m[4];
1207
1208 m[0] = rd_reg_word(®24->mailbox4);
1209 m[1] = rd_reg_word(®24->mailbox5);
1210 m[2] = rd_reg_word(®24->mailbox6);
1211 mbx = m[3] = rd_reg_word(®24->mailbox7);
1212
1213 ql_log(ql_log_warn, vha, 0x5003,
1214 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
1215 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
1216 } else
1217 ql_log(ql_log_warn, vha, 0x5003,
1218 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
1219 mb[1], mb[2], mb[3]);
1220
1221 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
1222 rd_reg_word(®24->mailbox7) & BIT_8)
1223 ha->isp_ops->mpi_fw_dump(vha, 1);
1224 ha->isp_ops->fw_dump(vha);
1225 ha->flags.fw_init_done = 0;
1226 QLA_FW_STOPPED(ha);
1227
1228 if (IS_FWI2_CAPABLE(ha)) {
1229 if (mb[1] == 0 && mb[2] == 0) {
1230 ql_log(ql_log_fatal, vha, 0x5004,
1231 "Unrecoverable Hardware Error: adapter "
1232 "marked OFFLINE!\n");
1233 vha->flags.online = 0;
1234 vha->device_flags |= DFLG_DEV_FAILED;
1235 } else {
1236
1237 if ((mbx & MBX_3) && (ha->port_no == 0))
1238 set_bit(MPI_RESET_NEEDED,
1239 &vha->dpc_flags);
1240
1241 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1242 }
1243 } else if (mb[1] == 0) {
1244 ql_log(ql_log_fatal, vha, 0x5005,
1245 "Unrecoverable Hardware Error: adapter marked "
1246 "OFFLINE!\n");
1247 vha->flags.online = 0;
1248 vha->device_flags |= DFLG_DEV_FAILED;
1249 } else
1250 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1251 break;
1252
1253 case MBA_REQ_TRANSFER_ERR:
1254 ql_log(ql_log_warn, vha, 0x5006,
1255 "ISP Request Transfer Error (%x).\n", mb[1]);
1256
1257 vha->hw_err_cnt++;
1258
1259 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1260 break;
1261
1262 case MBA_RSP_TRANSFER_ERR:
1263 ql_log(ql_log_warn, vha, 0x5007,
1264 "ISP Response Transfer Error (%x).\n", mb[1]);
1265
1266 vha->hw_err_cnt++;
1267
1268 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1269 break;
1270
1271 case MBA_WAKEUP_THRES:
1272 ql_dbg(ql_dbg_async, vha, 0x5008,
1273 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
1274 break;
1275
1276 case MBA_LOOP_INIT_ERR:
1277 ql_log(ql_log_warn, vha, 0x5090,
1278 "LOOP INIT ERROR (%x).\n", mb[1]);
1279 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1280 break;
1281
1282 case MBA_LIP_OCCURRED:
1283 ha->flags.lip_ae = 1;
1284
1285 ql_dbg(ql_dbg_async, vha, 0x5009,
1286 "LIP occurred (%x).\n", mb[1]);
1287
1288 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1289 atomic_set(&vha->loop_state, LOOP_DOWN);
1290 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1291 qla2x00_mark_all_devices_lost(vha);
1292 }
1293
1294 if (vha->vp_idx) {
1295 atomic_set(&vha->vp_state, VP_FAILED);
1296 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1297 }
1298
1299 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1300 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1301
1302 vha->flags.management_server_logged_in = 0;
1303 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1304 break;
1305
1306 case MBA_LOOP_UP:
1307 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1308 ha->link_data_rate = PORT_SPEED_1GB;
1309 else
1310 ha->link_data_rate = mb[1];
1311
1312 ql_log(ql_log_info, vha, 0x500a,
1313 "LOOP UP detected (%s Gbps).\n",
1314 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
1315
1316 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1317 if (mb[2] & BIT_0)
1318 ql_log(ql_log_info, vha, 0x11a0,
1319 "FEC=enabled (link up).\n");
1320 }
1321
1322 vha->flags.management_server_logged_in = 0;
1323 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1324
1325 if (vha->link_down_time < vha->hw->port_down_retry_count) {
1326 vha->short_link_down_cnt++;
1327 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
1328 }
1329
1330 break;
1331
1332 case MBA_LOOP_DOWN:
1333 SAVE_TOPO(ha);
1334 ha->flags.lip_ae = 0;
1335 ha->current_topology = 0;
1336 vha->link_down_time = 0;
1337
1338 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
1339 ? rd_reg_word(®24->mailbox4) : 0;
1340 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4])
1341 : mbx;
1342 ql_log(ql_log_info, vha, 0x500b,
1343 "LOOP DOWN detected (%x %x %x %x).\n",
1344 mb[1], mb[2], mb[3], mbx);
1345
1346 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1347 atomic_set(&vha->loop_state, LOOP_DOWN);
1348 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1349
1350
1351
1352
1353
1354 if (!vha->vp_idx) {
1355 if (ha->flags.fawwpn_enabled &&
1356 (ha->current_topology == ISP_CFG_F)) {
1357 memcpy(vha->port_name, ha->port_name, WWN_SIZE);
1358 fc_host_port_name(vha->host) =
1359 wwn_to_u64(vha->port_name);
1360 ql_dbg(ql_dbg_init + ql_dbg_verbose,
1361 vha, 0x00d8, "LOOP DOWN detected,"
1362 "restore WWPN %016llx\n",
1363 wwn_to_u64(vha->port_name));
1364 }
1365
1366 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
1367 }
1368
1369 vha->device_flags |= DFLG_NO_CABLE;
1370 qla2x00_mark_all_devices_lost(vha);
1371 }
1372
1373 if (vha->vp_idx) {
1374 atomic_set(&vha->vp_state, VP_FAILED);
1375 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1376 }
1377
1378 vha->flags.management_server_logged_in = 0;
1379 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1380 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1381 break;
1382
1383 case MBA_LIP_RESET:
1384 ql_dbg(ql_dbg_async, vha, 0x500c,
1385 "LIP reset occurred (%x).\n", mb[1]);
1386
1387 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1388 atomic_set(&vha->loop_state, LOOP_DOWN);
1389 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1390 qla2x00_mark_all_devices_lost(vha);
1391 }
1392
1393 if (vha->vp_idx) {
1394 atomic_set(&vha->vp_state, VP_FAILED);
1395 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1396 }
1397
1398 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1399
1400 ha->operating_mode = LOOP;
1401 vha->flags.management_server_logged_in = 0;
1402 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1403 break;
1404
1405
1406 case MBA_POINT_TO_POINT:
1407 ha->flags.lip_ae = 0;
1408
1409 if (IS_QLA2100(ha))
1410 break;
1411
1412 if (IS_CNA_CAPABLE(ha)) {
1413 ql_dbg(ql_dbg_async, vha, 0x500d,
1414 "DCBX Completed -- %04x %04x %04x.\n",
1415 mb[1], mb[2], mb[3]);
1416 if (ha->notify_dcbx_comp && !vha->vp_idx)
1417 complete(&ha->dcbx_comp);
1418
1419 } else
1420 ql_dbg(ql_dbg_async, vha, 0x500e,
1421 "Asynchronous P2P MODE received.\n");
1422
1423
1424
1425
1426
1427 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1428 atomic_set(&vha->loop_state, LOOP_DOWN);
1429 if (!atomic_read(&vha->loop_down_timer))
1430 atomic_set(&vha->loop_down_timer,
1431 LOOP_DOWN_TIME);
1432 if (!N2N_TOPO(ha))
1433 qla2x00_mark_all_devices_lost(vha);
1434 }
1435
1436 if (vha->vp_idx) {
1437 atomic_set(&vha->vp_state, VP_FAILED);
1438 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1439 }
1440
1441 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
1442 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1443
1444 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1445 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1446
1447 vha->flags.management_server_logged_in = 0;
1448 break;
1449
1450 case MBA_CHG_IN_CONNECTION:
1451 if (IS_QLA2100(ha))
1452 break;
1453
1454 ql_dbg(ql_dbg_async, vha, 0x500f,
1455 "Configuration change detected: value=%x.\n", mb[1]);
1456
1457 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1458 atomic_set(&vha->loop_state, LOOP_DOWN);
1459 if (!atomic_read(&vha->loop_down_timer))
1460 atomic_set(&vha->loop_down_timer,
1461 LOOP_DOWN_TIME);
1462 qla2x00_mark_all_devices_lost(vha);
1463 }
1464
1465 if (vha->vp_idx) {
1466 atomic_set(&vha->vp_state, VP_FAILED);
1467 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1468 }
1469
1470 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1471 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1472 break;
1473
1474 case MBA_PORT_UPDATE:
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490 if (IS_QLA2XXX_MIDTYPE(ha) &&
1491 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
1492 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
1493 break;
1494
1495 if (mb[2] == 0x7) {
1496 ql_dbg(ql_dbg_async, vha, 0x5010,
1497 "Port %s %04x %04x %04x.\n",
1498 mb[1] == 0xffff ? "unavailable" : "logout",
1499 mb[1], mb[2], mb[3]);
1500
1501 if (mb[1] == 0xffff)
1502 goto global_port_update;
1503
1504 if (mb[1] == NPH_SNS_LID(ha)) {
1505 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1506 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1507 break;
1508 }
1509
1510
1511 if (IS_FWI2_CAPABLE(ha))
1512 handle_cnt = NPH_SNS;
1513 else
1514 handle_cnt = SIMPLE_NAME_SERVER;
1515 if (mb[1] == handle_cnt) {
1516 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1517 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1518 break;
1519 }
1520
1521
1522 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1523 if (!fcport)
1524 break;
1525 if (atomic_read(&fcport->state) != FCS_ONLINE)
1526 break;
1527 ql_dbg(ql_dbg_async, vha, 0x508a,
1528 "Marking port lost loopid=%04x portid=%06x.\n",
1529 fcport->loop_id, fcport->d_id.b24);
1530 if (qla_ini_mode_enabled(vha)) {
1531 fcport->logout_on_delete = 0;
1532 qlt_schedule_sess_for_deletion(fcport);
1533 }
1534 break;
1535
1536 global_port_update:
1537 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1538 atomic_set(&vha->loop_state, LOOP_DOWN);
1539 atomic_set(&vha->loop_down_timer,
1540 LOOP_DOWN_TIME);
1541 vha->device_flags |= DFLG_NO_CABLE;
1542 qla2x00_mark_all_devices_lost(vha);
1543 }
1544
1545 if (vha->vp_idx) {
1546 atomic_set(&vha->vp_state, VP_FAILED);
1547 fc_vport_set_state(vha->fc_vport,
1548 FC_VPORT_FAILED);
1549 qla2x00_mark_all_devices_lost(vha);
1550 }
1551
1552 vha->flags.management_server_logged_in = 0;
1553 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1554 break;
1555 }
1556
1557
1558
1559
1560
1561
1562 atomic_set(&vha->loop_down_timer, 0);
1563 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1564 !ha->flags.n2n_ae &&
1565 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1566 ql_dbg(ql_dbg_async, vha, 0x5011,
1567 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1568 mb[1], mb[2], mb[3]);
1569 break;
1570 }
1571
1572 ql_dbg(ql_dbg_async, vha, 0x5012,
1573 "Port database changed %04x %04x %04x.\n",
1574 mb[1], mb[2], mb[3]);
1575
1576
1577
1578
1579 atomic_set(&vha->loop_state, LOOP_UP);
1580 vha->scan.scan_retry = 0;
1581
1582 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1583 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1584 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1585 break;
1586
1587 case MBA_RSCN_UPDATE:
1588
1589 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1590 break;
1591
1592 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1593 break;
1594
1595 ql_log(ql_log_warn, vha, 0x5013,
1596 "RSCN database changed -- %04x %04x %04x.\n",
1597 mb[1], mb[2], mb[3]);
1598
1599 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1600 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1601 | vha->d_id.b.al_pa;
1602 if (rscn_entry == host_pid) {
1603 ql_dbg(ql_dbg_async, vha, 0x5014,
1604 "Ignoring RSCN update to local host "
1605 "port ID (%06x).\n", host_pid);
1606 break;
1607 }
1608
1609
1610 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1611
1612
1613 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1614 break;
1615
1616 atomic_set(&vha->loop_down_timer, 0);
1617 vha->flags.management_server_logged_in = 0;
1618 {
1619 struct event_arg ea;
1620
1621 memset(&ea, 0, sizeof(ea));
1622 ea.id.b24 = rscn_entry;
1623 ea.id.b.rsvd_1 = rscn_entry >> 24;
1624 qla2x00_handle_rscn(vha, &ea);
1625 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1626 }
1627 break;
1628 case MBA_CONGN_NOTI_RECV:
1629 if (!ha->flags.scm_enabled ||
1630 mb[1] != QLA_CON_PRIMITIVE_RECEIVED)
1631 break;
1632
1633 if (mb[2] == QLA_CONGESTION_ARB_WARNING) {
1634 ql_dbg(ql_dbg_async, vha, 0x509b,
1635 "Congestion Warning %04x %04x.\n", mb[1], mb[2]);
1636 } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) {
1637 ql_log(ql_log_warn, vha, 0x509b,
1638 "Congestion Alarm %04x %04x.\n", mb[1], mb[2]);
1639 }
1640 break;
1641
1642 case MBA_ZIO_RESPONSE:
1643 ql_dbg(ql_dbg_async, vha, 0x5015,
1644 "[R|Z]IO update completion.\n");
1645
1646 if (IS_FWI2_CAPABLE(ha))
1647 qla24xx_process_response_queue(vha, rsp);
1648 else
1649 qla2x00_process_response_queue(rsp);
1650 break;
1651
1652 case MBA_DISCARD_RND_FRAME:
1653 ql_dbg(ql_dbg_async, vha, 0x5016,
1654 "Discard RND Frame -- %04x %04x %04x.\n",
1655 mb[1], mb[2], mb[3]);
1656 vha->interface_err_cnt++;
1657 break;
1658
1659 case MBA_TRACE_NOTIFICATION:
1660 ql_dbg(ql_dbg_async, vha, 0x5017,
1661 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1662 break;
1663
1664 case MBA_ISP84XX_ALERT:
1665 ql_dbg(ql_dbg_async, vha, 0x5018,
1666 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1667 mb[1], mb[2], mb[3]);
1668
1669 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1670 switch (mb[1]) {
1671 case A84_PANIC_RECOVERY:
1672 ql_log(ql_log_info, vha, 0x5019,
1673 "Alert 84XX: panic recovery %04x %04x.\n",
1674 mb[2], mb[3]);
1675 break;
1676 case A84_OP_LOGIN_COMPLETE:
1677 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1678 ql_log(ql_log_info, vha, 0x501a,
1679 "Alert 84XX: firmware version %x.\n",
1680 ha->cs84xx->op_fw_version);
1681 break;
1682 case A84_DIAG_LOGIN_COMPLETE:
1683 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1684 ql_log(ql_log_info, vha, 0x501b,
1685 "Alert 84XX: diagnostic firmware version %x.\n",
1686 ha->cs84xx->diag_fw_version);
1687 break;
1688 case A84_GOLD_LOGIN_COMPLETE:
1689 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1690 ha->cs84xx->fw_update = 1;
1691 ql_log(ql_log_info, vha, 0x501c,
1692 "Alert 84XX: gold firmware version %x.\n",
1693 ha->cs84xx->gold_fw_version);
1694 break;
1695 default:
1696 ql_log(ql_log_warn, vha, 0x501d,
1697 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1698 mb[1], mb[2], mb[3]);
1699 }
1700 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1701 break;
1702 case MBA_DCBX_START:
1703 ql_dbg(ql_dbg_async, vha, 0x501e,
1704 "DCBX Started -- %04x %04x %04x.\n",
1705 mb[1], mb[2], mb[3]);
1706 break;
1707 case MBA_DCBX_PARAM_UPDATE:
1708 ql_dbg(ql_dbg_async, vha, 0x501f,
1709 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1710 mb[1], mb[2], mb[3]);
1711 break;
1712 case MBA_FCF_CONF_ERR:
1713 ql_dbg(ql_dbg_async, vha, 0x5020,
1714 "FCF Configuration Error -- %04x %04x %04x.\n",
1715 mb[1], mb[2], mb[3]);
1716 break;
1717 case MBA_IDC_NOTIFY:
1718 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1719 mb[4] = rd_reg_word(®24->mailbox4);
1720 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1721 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1722 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1723 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1724
1725
1726
1727 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1728 atomic_set(&vha->loop_down_timer,
1729 LOOP_DOWN_TIME);
1730 qla2xxx_wake_dpc(vha);
1731 }
1732 }
1733 fallthrough;
1734 case MBA_IDC_COMPLETE:
1735 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1736 complete(&ha->lb_portup_comp);
1737 fallthrough;
1738 case MBA_IDC_TIME_EXT:
1739 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1740 IS_QLA8044(ha))
1741 qla81xx_idc_event(vha, mb[0], mb[1]);
1742 break;
1743
1744 case MBA_IDC_AEN:
1745 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1746 vha->hw_err_cnt++;
1747 qla27xx_handle_8200_aen(vha, mb);
1748 } else if (IS_QLA83XX(ha)) {
1749 mb[4] = rd_reg_word(®24->mailbox4);
1750 mb[5] = rd_reg_word(®24->mailbox5);
1751 mb[6] = rd_reg_word(®24->mailbox6);
1752 mb[7] = rd_reg_word(®24->mailbox7);
1753 qla83xx_handle_8200_aen(vha, mb);
1754 } else {
1755 ql_dbg(ql_dbg_async, vha, 0x5052,
1756 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
1757 mb[0], mb[1], mb[2], mb[3]);
1758 }
1759 break;
1760
1761 case MBA_DPORT_DIAGNOSTICS:
1762 if ((mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_NOERR ||
1763 (mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_ERR)
1764 vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS;
1765 ql_dbg(ql_dbg_async, vha, 0x5052,
1766 "D-Port Diagnostics: %04x %04x %04x %04x\n",
1767 mb[0], mb[1], mb[2], mb[3]);
1768 memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
1769 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1770 static char *results[] = {
1771 "start", "done(pass)", "done(error)", "undefined" };
1772 static char *types[] = {
1773 "none", "dynamic", "static", "other" };
1774 uint result = mb[1] >> 0 & 0x3;
1775 uint type = mb[1] >> 6 & 0x3;
1776 uint sw = mb[1] >> 15 & 0x1;
1777 ql_dbg(ql_dbg_async, vha, 0x5052,
1778 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
1779 results[result], types[type], sw);
1780 if (result == 2) {
1781 static char *reasons[] = {
1782 "reserved", "unexpected reject",
1783 "unexpected phase", "retry exceeded",
1784 "timed out", "not supported",
1785 "user stopped" };
1786 uint reason = mb[2] >> 0 & 0xf;
1787 uint phase = mb[2] >> 12 & 0xf;
1788 ql_dbg(ql_dbg_async, vha, 0x5052,
1789 "D-Port Diagnostics: reason=%s phase=%u \n",
1790 reason < 7 ? reasons[reason] : "other",
1791 phase >> 1);
1792 }
1793 }
1794 break;
1795
1796 case MBA_TEMPERATURE_ALERT:
1797 ql_dbg(ql_dbg_async, vha, 0x505e,
1798 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1799 break;
1800
1801 case MBA_TRANS_INSERT:
1802 ql_dbg(ql_dbg_async, vha, 0x5091,
1803 "Transceiver Insertion: %04x\n", mb[1]);
1804 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
1805 break;
1806
1807 case MBA_TRANS_REMOVE:
1808 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n");
1809 break;
1810
1811 default:
1812 ql_dbg(ql_dbg_async, vha, 0x5057,
1813 "Unknown AEN:%04x %04x %04x %04x\n",
1814 mb[0], mb[1], mb[2], mb[3]);
1815 }
1816
1817 qlt_async_event(mb[0], vha, mb);
1818
1819 if (!vha->vp_idx && ha->num_vhosts)
1820 qla2x00_alert_all_vps(rsp, mb);
1821 }
1822
1823
1824
1825
1826
1827
1828
1829 void
1830 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1831 struct req_que *req, uint32_t index)
1832 {
1833 srb_t *sp;
1834 struct qla_hw_data *ha = vha->hw;
1835
1836
1837 if (index >= req->num_outstanding_cmds) {
1838 ql_log(ql_log_warn, vha, 0x3014,
1839 "Invalid SCSI command index (%x).\n", index);
1840
1841 if (IS_P3P_TYPE(ha))
1842 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1843 else
1844 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1845 return;
1846 }
1847
1848 sp = req->outstanding_cmds[index];
1849 if (sp) {
1850
1851 req->outstanding_cmds[index] = NULL;
1852
1853
1854 sp->done(sp, DID_OK << 16);
1855 } else {
1856 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1857
1858 if (IS_P3P_TYPE(ha))
1859 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1860 else
1861 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1862 }
1863 }
1864
1865 srb_t *
1866 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1867 struct req_que *req, void *iocb)
1868 {
1869 struct qla_hw_data *ha = vha->hw;
1870 sts_entry_t *pkt = iocb;
1871 srb_t *sp;
1872 uint16_t index;
1873
1874 if (pkt->handle == QLA_SKIP_HANDLE)
1875 return NULL;
1876
1877 index = LSW(pkt->handle);
1878 if (index >= req->num_outstanding_cmds) {
1879 ql_log(ql_log_warn, vha, 0x5031,
1880 "%s: Invalid command index (%x) type %8ph.\n",
1881 func, index, iocb);
1882 if (IS_P3P_TYPE(ha))
1883 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1884 else
1885 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1886 return NULL;
1887 }
1888 sp = req->outstanding_cmds[index];
1889 if (!sp) {
1890 ql_log(ql_log_warn, vha, 0x5032,
1891 "%s: Invalid completion handle (%x) -- timed-out.\n",
1892 func, index);
1893 return NULL;
1894 }
1895 if (sp->handle != index) {
1896 ql_log(ql_log_warn, vha, 0x5033,
1897 "%s: SRB handle (%x) mismatch %x.\n", func,
1898 sp->handle, index);
1899 return NULL;
1900 }
1901
1902 req->outstanding_cmds[index] = NULL;
1903 return sp;
1904 }
1905
1906 static void
1907 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1908 struct mbx_entry *mbx)
1909 {
1910 const char func[] = "MBX-IOCB";
1911 const char *type;
1912 fc_port_t *fcport;
1913 srb_t *sp;
1914 struct srb_iocb *lio;
1915 uint16_t *data;
1916 uint16_t status;
1917
1918 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1919 if (!sp)
1920 return;
1921
1922 lio = &sp->u.iocb_cmd;
1923 type = sp->name;
1924 fcport = sp->fcport;
1925 data = lio->u.logio.data;
1926
1927 data[0] = MBS_COMMAND_ERROR;
1928 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1929 QLA_LOGIO_LOGIN_RETRIED : 0;
1930 if (mbx->entry_status) {
1931 ql_dbg(ql_dbg_async, vha, 0x5043,
1932 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1933 "entry-status=%x status=%x state-flag=%x "
1934 "status-flags=%x.\n", type, sp->handle,
1935 fcport->d_id.b.domain, fcport->d_id.b.area,
1936 fcport->d_id.b.al_pa, mbx->entry_status,
1937 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1938 le16_to_cpu(mbx->status_flags));
1939
1940 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1941 mbx, sizeof(*mbx));
1942
1943 goto logio_done;
1944 }
1945
1946 status = le16_to_cpu(mbx->status);
1947 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1948 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1949 status = 0;
1950 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1951 ql_dbg(ql_dbg_async, vha, 0x5045,
1952 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1953 type, sp->handle, fcport->d_id.b.domain,
1954 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1955 le16_to_cpu(mbx->mb1));
1956
1957 data[0] = MBS_COMMAND_COMPLETE;
1958 if (sp->type == SRB_LOGIN_CMD) {
1959 fcport->port_type = FCT_TARGET;
1960 if (le16_to_cpu(mbx->mb1) & BIT_0)
1961 fcport->port_type = FCT_INITIATOR;
1962 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1963 fcport->flags |= FCF_FCP2_DEVICE;
1964 }
1965 goto logio_done;
1966 }
1967
1968 data[0] = le16_to_cpu(mbx->mb0);
1969 switch (data[0]) {
1970 case MBS_PORT_ID_USED:
1971 data[1] = le16_to_cpu(mbx->mb1);
1972 break;
1973 case MBS_LOOP_ID_USED:
1974 break;
1975 default:
1976 data[0] = MBS_COMMAND_ERROR;
1977 break;
1978 }
1979
1980 ql_log(ql_log_warn, vha, 0x5046,
1981 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1982 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1983 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1984 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1985 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1986 le16_to_cpu(mbx->mb7));
1987
1988 logio_done:
1989 sp->done(sp, 0);
1990 }
1991
1992 static void
1993 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1994 struct mbx_24xx_entry *pkt)
1995 {
1996 const char func[] = "MBX-IOCB2";
1997 struct qla_hw_data *ha = vha->hw;
1998 srb_t *sp;
1999 struct srb_iocb *si;
2000 u16 sz, i;
2001 int res;
2002
2003 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2004 if (!sp)
2005 return;
2006
2007 if (sp->type == SRB_SCSI_CMD ||
2008 sp->type == SRB_NVME_CMD ||
2009 sp->type == SRB_TM_CMD) {
2010 ql_log(ql_log_warn, vha, 0x509d,
2011 "Inconsistent event entry type %d\n", sp->type);
2012 if (IS_P3P_TYPE(ha))
2013 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2014 else
2015 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2016 return;
2017 }
2018
2019 si = &sp->u.iocb_cmd;
2020 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
2021
2022 for (i = 0; i < sz; i++)
2023 si->u.mbx.in_mb[i] = pkt->mb[i];
2024
2025 res = (si->u.mbx.in_mb[0] & MBS_MASK);
2026
2027 sp->done(sp, res);
2028 }
2029
2030 static void
2031 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2032 struct nack_to_isp *pkt)
2033 {
2034 const char func[] = "nack";
2035 srb_t *sp;
2036 int res = 0;
2037
2038 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2039 if (!sp)
2040 return;
2041
2042 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
2043 res = QLA_FUNCTION_FAILED;
2044
2045 sp->done(sp, res);
2046 }
2047
2048 static void
2049 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
2050 sts_entry_t *pkt, int iocb_type)
2051 {
2052 const char func[] = "CT_IOCB";
2053 const char *type;
2054 srb_t *sp;
2055 struct bsg_job *bsg_job;
2056 struct fc_bsg_reply *bsg_reply;
2057 uint16_t comp_status;
2058 int res = 0;
2059
2060 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2061 if (!sp)
2062 return;
2063
2064 switch (sp->type) {
2065 case SRB_CT_CMD:
2066 bsg_job = sp->u.bsg_job;
2067 bsg_reply = bsg_job->reply;
2068
2069 type = "ct pass-through";
2070
2071 comp_status = le16_to_cpu(pkt->comp_status);
2072
2073
2074
2075
2076
2077 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2078 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2079
2080 if (comp_status != CS_COMPLETE) {
2081 if (comp_status == CS_DATA_UNDERRUN) {
2082 res = DID_OK << 16;
2083 bsg_reply->reply_payload_rcv_len =
2084 le16_to_cpu(pkt->rsp_info_len);
2085
2086 ql_log(ql_log_warn, vha, 0x5048,
2087 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
2088 type, comp_status,
2089 bsg_reply->reply_payload_rcv_len);
2090 } else {
2091 ql_log(ql_log_warn, vha, 0x5049,
2092 "CT pass-through-%s error comp_status=0x%x.\n",
2093 type, comp_status);
2094 res = DID_ERROR << 16;
2095 bsg_reply->reply_payload_rcv_len = 0;
2096 }
2097 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
2098 pkt, sizeof(*pkt));
2099 } else {
2100 res = DID_OK << 16;
2101 bsg_reply->reply_payload_rcv_len =
2102 bsg_job->reply_payload.payload_len;
2103 bsg_job->reply_len = 0;
2104 }
2105 break;
2106 case SRB_CT_PTHRU_CMD:
2107
2108
2109
2110
2111 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
2112 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2113 sp->name);
2114 break;
2115 }
2116
2117 sp->done(sp, res);
2118 }
2119
2120 static void
2121 qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
2122 struct sts_entry_24xx *pkt, int iocb_type)
2123 {
2124 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
2125 const char func[] = "ELS_CT_IOCB";
2126 const char *type;
2127 srb_t *sp;
2128 struct bsg_job *bsg_job;
2129 struct fc_bsg_reply *bsg_reply;
2130 uint16_t comp_status;
2131 uint32_t fw_status[3];
2132 int res, logit = 1;
2133 struct srb_iocb *els;
2134 uint n;
2135 scsi_qla_host_t *vha;
2136 struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt;
2137
2138 sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
2139 if (!sp)
2140 return;
2141 bsg_job = sp->u.bsg_job;
2142 vha = sp->vha;
2143
2144 type = NULL;
2145
2146 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
2147 fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
2148 fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
2149
2150 switch (sp->type) {
2151 case SRB_ELS_CMD_RPT:
2152 case SRB_ELS_CMD_HST:
2153 type = "rpt hst";
2154 break;
2155 case SRB_ELS_CMD_HST_NOLOGIN:
2156 type = "els";
2157 {
2158 struct els_entry_24xx *els = (void *)pkt;
2159 struct qla_bsg_auth_els_request *p =
2160 (struct qla_bsg_auth_els_request *)bsg_job->request;
2161
2162 ql_dbg(ql_dbg_user, vha, 0x700f,
2163 "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n",
2164 __func__, sc_to_str(p->e.sub_cmd),
2165 e->d_id[2], e->d_id[1], e->d_id[0],
2166 comp_status, p->e.extra_rx_xchg_address, bsg_job);
2167
2168 if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) {
2169 if (sp->remap.remapped) {
2170 n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2171 bsg_job->reply_payload.sg_cnt,
2172 sp->remap.rsp.buf,
2173 sp->remap.rsp.len);
2174 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e,
2175 "%s: SG copied %x of %x\n",
2176 __func__, n, sp->remap.rsp.len);
2177 } else {
2178 ql_dbg(ql_dbg_user, vha, 0x700f,
2179 "%s: NOT REMAPPED (error)...!!!\n",
2180 __func__);
2181 }
2182 }
2183 }
2184 break;
2185 case SRB_CT_CMD:
2186 type = "ct pass-through";
2187 break;
2188 case SRB_ELS_DCMD:
2189 type = "Driver ELS logo";
2190 if (iocb_type != ELS_IOCB_TYPE) {
2191 ql_dbg(ql_dbg_user, vha, 0x5047,
2192 "Completing %s: (%p) type=%d.\n",
2193 type, sp, sp->type);
2194 sp->done(sp, 0);
2195 return;
2196 }
2197 break;
2198 case SRB_CT_PTHRU_CMD:
2199
2200
2201
2202 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
2203 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2204 sp->name);
2205 sp->done(sp, res);
2206 return;
2207 default:
2208 ql_dbg(ql_dbg_user, vha, 0x503e,
2209 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
2210 return;
2211 }
2212
2213 if (iocb_type == ELS_IOCB_TYPE) {
2214 els = &sp->u.iocb_cmd;
2215 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
2216 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
2217 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
2218 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
2219 if (comp_status == CS_COMPLETE) {
2220 res = DID_OK << 16;
2221 } else {
2222 if (comp_status == CS_DATA_UNDERRUN) {
2223 res = DID_OK << 16;
2224 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
2225 ese->total_byte_count));
2226
2227 if (sp->remap.remapped &&
2228 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) {
2229 ql_dbg(ql_dbg_user, vha, 0x503f,
2230 "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x",
2231 __func__, e->s_id[0], e->s_id[2], e->s_id[1],
2232 e->d_id[2], e->d_id[1], e->d_id[0]);
2233 logit = 0;
2234 }
2235
2236 } else if (comp_status == CS_PORT_LOGGED_OUT) {
2237 ql_dbg(ql_dbg_disc, vha, 0x911e,
2238 "%s %d schedule session deletion\n",
2239 __func__, __LINE__);
2240
2241 els->u.els_plogi.len = 0;
2242 res = DID_IMM_RETRY << 16;
2243 qlt_schedule_sess_for_deletion(sp->fcport);
2244 } else {
2245 els->u.els_plogi.len = 0;
2246 res = DID_ERROR << 16;
2247 }
2248
2249 if (sp->remap.remapped &&
2250 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
2251 if (logit) {
2252 ql_dbg(ql_dbg_user, vha, 0x503f,
2253 "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
2254 type, sp->handle, comp_status);
2255
2256 ql_dbg(ql_dbg_user, vha, 0x503f,
2257 "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
2258 fw_status[1], fw_status[2],
2259 le32_to_cpu(((struct els_sts_entry_24xx *)
2260 pkt)->total_byte_count),
2261 e->s_id[0], e->s_id[2], e->s_id[1],
2262 e->d_id[2], e->d_id[1], e->d_id[0]);
2263 }
2264 if (sp->fcport && sp->fcport->flags & FCF_FCSP_DEVICE &&
2265 sp->type == SRB_ELS_CMD_HST_NOLOGIN) {
2266 ql_dbg(ql_dbg_edif, vha, 0x911e,
2267 "%s rcv reject. Sched delete\n", __func__);
2268 qlt_schedule_sess_for_deletion(sp->fcport);
2269 }
2270 } else if (logit) {
2271 ql_log(ql_log_info, vha, 0x503f,
2272 "%s IOCB Done hdl=%x comp_status=0x%x\n",
2273 type, sp->handle, comp_status);
2274 ql_log(ql_log_info, vha, 0x503f,
2275 "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
2276 fw_status[1], fw_status[2],
2277 le32_to_cpu(((struct els_sts_entry_24xx *)
2278 pkt)->total_byte_count),
2279 e->s_id[0], e->s_id[2], e->s_id[1],
2280 e->d_id[2], e->d_id[1], e->d_id[0]);
2281 }
2282 }
2283 goto els_ct_done;
2284 }
2285
2286
2287
2288
2289 bsg_job = sp->u.bsg_job;
2290 bsg_reply = bsg_job->reply;
2291 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2292 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
2293
2294 if (comp_status != CS_COMPLETE) {
2295 if (comp_status == CS_DATA_UNDERRUN) {
2296 res = DID_OK << 16;
2297 bsg_reply->reply_payload_rcv_len =
2298 le32_to_cpu(ese->total_byte_count);
2299
2300 ql_dbg(ql_dbg_user, vha, 0x503f,
2301 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2302 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
2303 type, sp->handle, comp_status, fw_status[1], fw_status[2],
2304 le32_to_cpu(ese->total_byte_count));
2305 } else {
2306 ql_dbg(ql_dbg_user, vha, 0x5040,
2307 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2308 "error subcode 1=0x%x error subcode 2=0x%x.\n",
2309 type, sp->handle, comp_status,
2310 le32_to_cpu(ese->error_subcode_1),
2311 le32_to_cpu(ese->error_subcode_2));
2312 res = DID_ERROR << 16;
2313 bsg_reply->reply_payload_rcv_len = 0;
2314 }
2315 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
2316 fw_status, sizeof(fw_status));
2317 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
2318 pkt, sizeof(*pkt));
2319 }
2320 else {
2321 res = DID_OK << 16;
2322 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2323 bsg_job->reply_len = 0;
2324 }
2325 els_ct_done:
2326
2327 sp->done(sp, res);
2328 }
2329
2330 static void
2331 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
2332 struct logio_entry_24xx *logio)
2333 {
2334 const char func[] = "LOGIO-IOCB";
2335 const char *type;
2336 fc_port_t *fcport;
2337 srb_t *sp;
2338 struct srb_iocb *lio;
2339 uint16_t *data;
2340 uint32_t iop[2];
2341 int logit = 1;
2342
2343 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
2344 if (!sp)
2345 return;
2346
2347 lio = &sp->u.iocb_cmd;
2348 type = sp->name;
2349 fcport = sp->fcport;
2350 data = lio->u.logio.data;
2351
2352 data[0] = MBS_COMMAND_ERROR;
2353 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
2354 QLA_LOGIO_LOGIN_RETRIED : 0;
2355 if (logio->entry_status) {
2356 ql_log(ql_log_warn, fcport->vha, 0x5034,
2357 "Async-%s error entry - %8phC hdl=%x"
2358 "portid=%02x%02x%02x entry-status=%x.\n",
2359 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
2360 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2361 logio->entry_status);
2362 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
2363 logio, sizeof(*logio));
2364
2365 goto logio_done;
2366 }
2367
2368 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
2369 ql_dbg(ql_dbg_async, sp->vha, 0x5036,
2370 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
2371 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2372 le32_to_cpu(logio->io_parameter[0]));
2373
2374 vha->hw->exch_starvation = 0;
2375 data[0] = MBS_COMMAND_COMPLETE;
2376
2377 if (sp->type == SRB_PRLI_CMD) {
2378 lio->u.logio.iop[0] =
2379 le32_to_cpu(logio->io_parameter[0]);
2380 lio->u.logio.iop[1] =
2381 le32_to_cpu(logio->io_parameter[1]);
2382 goto logio_done;
2383 }
2384
2385 if (sp->type != SRB_LOGIN_CMD)
2386 goto logio_done;
2387
2388 lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]);
2389 if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP)
2390 fcport->flags |= FCF_FCSP_DEVICE;
2391
2392 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2393 if (iop[0] & BIT_4) {
2394 fcport->port_type = FCT_TARGET;
2395 if (iop[0] & BIT_8)
2396 fcport->flags |= FCF_FCP2_DEVICE;
2397 } else if (iop[0] & BIT_5)
2398 fcport->port_type = FCT_INITIATOR;
2399
2400 if (iop[0] & BIT_7)
2401 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2402
2403 if (logio->io_parameter[7] || logio->io_parameter[8])
2404 fcport->supported_classes |= FC_COS_CLASS2;
2405 if (logio->io_parameter[9] || logio->io_parameter[10])
2406 fcport->supported_classes |= FC_COS_CLASS3;
2407
2408 goto logio_done;
2409 }
2410
2411 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2412 iop[1] = le32_to_cpu(logio->io_parameter[1]);
2413 lio->u.logio.iop[0] = iop[0];
2414 lio->u.logio.iop[1] = iop[1];
2415 switch (iop[0]) {
2416 case LSC_SCODE_PORTID_USED:
2417 data[0] = MBS_PORT_ID_USED;
2418 data[1] = LSW(iop[1]);
2419 logit = 0;
2420 break;
2421 case LSC_SCODE_NPORT_USED:
2422 data[0] = MBS_LOOP_ID_USED;
2423 logit = 0;
2424 break;
2425 case LSC_SCODE_CMD_FAILED:
2426 if (iop[1] == 0x0606) {
2427
2428
2429
2430
2431 data[0] = MBS_COMMAND_COMPLETE;
2432 goto logio_done;
2433 }
2434 data[0] = MBS_COMMAND_ERROR;
2435 break;
2436 case LSC_SCODE_NOXCB:
2437 vha->hw->exch_starvation++;
2438 if (vha->hw->exch_starvation > 5) {
2439 ql_log(ql_log_warn, vha, 0xd046,
2440 "Exchange starvation. Resetting RISC\n");
2441
2442 vha->hw->exch_starvation = 0;
2443
2444 if (IS_P3P_TYPE(vha->hw))
2445 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2446 else
2447 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2448 qla2xxx_wake_dpc(vha);
2449 }
2450 fallthrough;
2451 default:
2452 data[0] = MBS_COMMAND_ERROR;
2453 break;
2454 }
2455
2456 if (logit)
2457 ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: "
2458 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2459 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2460 le16_to_cpu(logio->comp_status),
2461 le32_to_cpu(logio->io_parameter[0]),
2462 le32_to_cpu(logio->io_parameter[1]));
2463 else
2464 ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: "
2465 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2466 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2467 le16_to_cpu(logio->comp_status),
2468 le32_to_cpu(logio->io_parameter[0]),
2469 le32_to_cpu(logio->io_parameter[1]));
2470
2471 logio_done:
2472 sp->done(sp, 0);
2473 }
2474
2475 static void
2476 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
2477 {
2478 const char func[] = "TMF-IOCB";
2479 const char *type;
2480 fc_port_t *fcport;
2481 srb_t *sp;
2482 struct srb_iocb *iocb;
2483 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2484 u16 comp_status;
2485
2486 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
2487 if (!sp)
2488 return;
2489
2490 comp_status = le16_to_cpu(sts->comp_status);
2491 iocb = &sp->u.iocb_cmd;
2492 type = sp->name;
2493 fcport = sp->fcport;
2494 iocb->u.tmf.data = QLA_SUCCESS;
2495
2496 if (sts->entry_status) {
2497 ql_log(ql_log_warn, fcport->vha, 0x5038,
2498 "Async-%s error - hdl=%x entry-status(%x).\n",
2499 type, sp->handle, sts->entry_status);
2500 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2501 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
2502 ql_log(ql_log_warn, fcport->vha, 0x5039,
2503 "Async-%s error - hdl=%x completion status(%x).\n",
2504 type, sp->handle, comp_status);
2505 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2506 } else if ((le16_to_cpu(sts->scsi_status) &
2507 SS_RESPONSE_INFO_LEN_VALID)) {
2508 host_to_fcp_swap(sts->data, sizeof(sts->data));
2509 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2510 ql_log(ql_log_warn, fcport->vha, 0x503b,
2511 "Async-%s error - hdl=%x not enough response(%d).\n",
2512 type, sp->handle, sts->rsp_data_len);
2513 } else if (sts->data[3]) {
2514 ql_log(ql_log_warn, fcport->vha, 0x503c,
2515 "Async-%s error - hdl=%x response(%x).\n",
2516 type, sp->handle, sts->data[3]);
2517 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2518 }
2519 }
2520
2521 switch (comp_status) {
2522 case CS_PORT_LOGGED_OUT:
2523 case CS_PORT_CONFIG_CHG:
2524 case CS_PORT_BUSY:
2525 case CS_INCOMPLETE:
2526 case CS_PORT_UNAVAILABLE:
2527 case CS_TIMEOUT:
2528 case CS_RESET:
2529 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2530 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2531 "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n",
2532 fcport->d_id.b.domain, fcport->d_id.b.area,
2533 fcport->d_id.b.al_pa,
2534 port_state_str[FCS_ONLINE],
2535 comp_status);
2536
2537 qlt_schedule_sess_for_deletion(fcport);
2538 }
2539 break;
2540
2541 default:
2542 break;
2543 }
2544
2545 if (iocb->u.tmf.data != QLA_SUCCESS)
2546 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
2547 sts, sizeof(*sts));
2548
2549 sp->done(sp, 0);
2550 }
2551
2552 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2553 void *tsk, srb_t *sp)
2554 {
2555 fc_port_t *fcport;
2556 struct srb_iocb *iocb;
2557 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2558 uint16_t state_flags;
2559 struct nvmefc_fcp_req *fd;
2560 uint16_t ret = QLA_SUCCESS;
2561 __le16 comp_status = sts->comp_status;
2562 int logit = 0;
2563
2564 iocb = &sp->u.iocb_cmd;
2565 fcport = sp->fcport;
2566 iocb->u.nvme.comp_status = comp_status;
2567 state_flags = le16_to_cpu(sts->state_flags);
2568 fd = iocb->u.nvme.desc;
2569
2570 if (unlikely(iocb->u.nvme.aen_op))
2571 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
2572 else
2573 sp->qpair->cmd_completion_cnt++;
2574
2575 if (unlikely(comp_status != CS_COMPLETE))
2576 logit = 1;
2577
2578 fd->transferred_length = fd->payload_length -
2579 le32_to_cpu(sts->residual_len);
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
2590 iocb->u.nvme.rsp_pyld_len = 0;
2591 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
2592 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
2593
2594 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2595 } else if ((state_flags & SF_FCP_RSP_DMA)) {
2596
2597
2598
2599
2600 iocb->u.nvme.rsp_pyld_len = 0;
2601 fd->transferred_length = 0;
2602 ql_dbg(ql_dbg_io, fcport->vha, 0x307a,
2603 "Unexpected values in NVMe_RSP IU.\n");
2604 logit = 1;
2605 } else if (state_flags & SF_NVME_ERSP) {
2606 uint32_t *inbuf, *outbuf;
2607 uint16_t iter;
2608
2609 inbuf = (uint32_t *)&sts->nvme_ersp_data;
2610 outbuf = (uint32_t *)fd->rspaddr;
2611 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2612 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
2613 sizeof(struct nvme_fc_ersp_iu))) {
2614 if (ql_mask_match(ql_dbg_io)) {
2615 WARN_ONCE(1, "Unexpected response payload length %u.\n",
2616 iocb->u.nvme.rsp_pyld_len);
2617 ql_log(ql_log_warn, fcport->vha, 0x5100,
2618 "Unexpected response payload length %u.\n",
2619 iocb->u.nvme.rsp_pyld_len);
2620 }
2621 iocb->u.nvme.rsp_pyld_len =
2622 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
2623 }
2624 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
2625 for (; iter; iter--)
2626 *outbuf++ = swab32(*inbuf++);
2627 }
2628
2629 if (state_flags & SF_NVME_ERSP) {
2630 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
2631 u32 tgt_xfer_len;
2632
2633 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
2634 if (fd->transferred_length != tgt_xfer_len) {
2635 ql_log(ql_log_warn, fcport->vha, 0x3079,
2636 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
2637 tgt_xfer_len, fd->transferred_length);
2638 logit = 1;
2639 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
2640
2641
2642
2643
2644 logit = 0;
2645 }
2646 }
2647
2648 if (unlikely(logit))
2649 ql_dbg(ql_dbg_io, fcport->vha, 0x5060,
2650 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
2651 sp->name, sp->handle, comp_status,
2652 fd->transferred_length, le32_to_cpu(sts->residual_len),
2653 sts->ox_id);
2654
2655
2656
2657
2658
2659 switch (le16_to_cpu(comp_status)) {
2660 case CS_COMPLETE:
2661 break;
2662
2663 case CS_RESET:
2664 case CS_PORT_UNAVAILABLE:
2665 case CS_PORT_LOGGED_OUT:
2666 fcport->nvme_flag |= NVME_FLAG_RESETTING;
2667 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2668 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2669 "Port to be marked lost on fcport=%06x, current "
2670 "port state= %s comp_status %x.\n",
2671 fcport->d_id.b24, port_state_str[FCS_ONLINE],
2672 comp_status);
2673
2674 qlt_schedule_sess_for_deletion(fcport);
2675 }
2676 fallthrough;
2677 case CS_ABORTED:
2678 case CS_PORT_BUSY:
2679 fd->transferred_length = 0;
2680 iocb->u.nvme.rsp_pyld_len = 0;
2681 ret = QLA_ABORTED;
2682 break;
2683 case CS_DATA_UNDERRUN:
2684 break;
2685 default:
2686 ret = QLA_FUNCTION_FAILED;
2687 break;
2688 }
2689 sp->done(sp, ret);
2690 }
2691
2692 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
2693 struct vp_ctrl_entry_24xx *vce)
2694 {
2695 const char func[] = "CTRLVP-IOCB";
2696 srb_t *sp;
2697 int rval = QLA_SUCCESS;
2698
2699 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
2700 if (!sp)
2701 return;
2702
2703 if (vce->entry_status != 0) {
2704 ql_dbg(ql_dbg_vport, vha, 0x10c4,
2705 "%s: Failed to complete IOCB -- error status (%x)\n",
2706 sp->name, vce->entry_status);
2707 rval = QLA_FUNCTION_FAILED;
2708 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
2709 ql_dbg(ql_dbg_vport, vha, 0x10c5,
2710 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
2711 sp->name, le16_to_cpu(vce->comp_status),
2712 le16_to_cpu(vce->vp_idx_failed));
2713 rval = QLA_FUNCTION_FAILED;
2714 } else {
2715 ql_dbg(ql_dbg_vport, vha, 0x10c6,
2716 "Done %s.\n", __func__);
2717 }
2718
2719 sp->rc = rval;
2720 sp->done(sp, rval);
2721 }
2722
2723
2724 static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
2725 struct rsp_que *rsp,
2726 sts_entry_t *pkt)
2727 {
2728 sts21_entry_t *sts21_entry;
2729 sts22_entry_t *sts22_entry;
2730 uint16_t handle_cnt;
2731 uint16_t cnt;
2732
2733 switch (pkt->entry_type) {
2734 case STATUS_TYPE:
2735 qla2x00_status_entry(vha, rsp, pkt);
2736 break;
2737 case STATUS_TYPE_21:
2738 sts21_entry = (sts21_entry_t *)pkt;
2739 handle_cnt = sts21_entry->handle_count;
2740 for (cnt = 0; cnt < handle_cnt; cnt++)
2741 qla2x00_process_completed_request(vha, rsp->req,
2742 sts21_entry->handle[cnt]);
2743 break;
2744 case STATUS_TYPE_22:
2745 sts22_entry = (sts22_entry_t *)pkt;
2746 handle_cnt = sts22_entry->handle_count;
2747 for (cnt = 0; cnt < handle_cnt; cnt++)
2748 qla2x00_process_completed_request(vha, rsp->req,
2749 sts22_entry->handle[cnt]);
2750 break;
2751 case STATUS_CONT_TYPE:
2752 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2753 break;
2754 case MBX_IOCB_TYPE:
2755 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
2756 break;
2757 case CT_IOCB_TYPE:
2758 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2759 break;
2760 default:
2761
2762 ql_log(ql_log_warn, vha, 0x504a,
2763 "Received unknown response pkt type %x entry status=%x.\n",
2764 pkt->entry_type, pkt->entry_status);
2765 break;
2766 }
2767 }
2768
2769
2770
2771
2772
2773 void
2774 qla2x00_process_response_queue(struct rsp_que *rsp)
2775 {
2776 struct scsi_qla_host *vha;
2777 struct qla_hw_data *ha = rsp->hw;
2778 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2779 sts_entry_t *pkt;
2780
2781 vha = pci_get_drvdata(ha->pdev);
2782
2783 if (!vha->flags.online)
2784 return;
2785
2786 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2787 pkt = (sts_entry_t *)rsp->ring_ptr;
2788
2789 rsp->ring_index++;
2790 if (rsp->ring_index == rsp->length) {
2791 rsp->ring_index = 0;
2792 rsp->ring_ptr = rsp->ring;
2793 } else {
2794 rsp->ring_ptr++;
2795 }
2796
2797 if (pkt->entry_status != 0) {
2798 qla2x00_error_entry(vha, rsp, pkt);
2799 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2800 wmb();
2801 continue;
2802 }
2803
2804 qla2x00_process_response_entry(vha, rsp, pkt);
2805 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2806 wmb();
2807 }
2808
2809
2810 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2811 }
2812
2813 static inline void
2814 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2815 uint32_t sense_len, struct rsp_que *rsp, int res)
2816 {
2817 struct scsi_qla_host *vha = sp->vha;
2818 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2819 uint32_t track_sense_len;
2820
2821 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2822 sense_len = SCSI_SENSE_BUFFERSIZE;
2823
2824 SET_CMD_SENSE_LEN(sp, sense_len);
2825 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2826 track_sense_len = sense_len;
2827
2828 if (sense_len > par_sense_len)
2829 sense_len = par_sense_len;
2830
2831 memcpy(cp->sense_buffer, sense_data, sense_len);
2832
2833 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2834 track_sense_len -= sense_len;
2835 SET_CMD_SENSE_LEN(sp, track_sense_len);
2836
2837 if (track_sense_len != 0) {
2838 rsp->status_srb = sp;
2839 cp->result = res;
2840 }
2841
2842 if (sense_len) {
2843 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2844 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2845 sp->vha->host_no, cp->device->id, cp->device->lun,
2846 cp);
2847 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2848 cp->sense_buffer, sense_len);
2849 }
2850 }
2851
2852 struct scsi_dif_tuple {
2853 __be16 guard;
2854 __be16 app_tag;
2855 __be32 ref_tag;
2856 };
2857
2858
2859
2860
2861
2862
2863
2864 static inline int
2865 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2866 {
2867 struct scsi_qla_host *vha = sp->vha;
2868 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2869 uint8_t *ap = &sts24->data[12];
2870 uint8_t *ep = &sts24->data[20];
2871 uint32_t e_ref_tag, a_ref_tag;
2872 uint16_t e_app_tag, a_app_tag;
2873 uint16_t e_guard, a_guard;
2874
2875
2876
2877
2878
2879 a_guard = get_unaligned_le16(ap + 2);
2880 a_app_tag = get_unaligned_le16(ap + 0);
2881 a_ref_tag = get_unaligned_le32(ap + 4);
2882 e_guard = get_unaligned_le16(ep + 2);
2883 e_app_tag = get_unaligned_le16(ep + 0);
2884 e_ref_tag = get_unaligned_le32(ep + 4);
2885
2886 ql_dbg(ql_dbg_io, vha, 0x3023,
2887 "iocb(s) %p Returned STATUS.\n", sts24);
2888
2889 ql_dbg(ql_dbg_io, vha, 0x3024,
2890 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2891 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2892 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2893 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2894 a_app_tag, e_app_tag, a_guard, e_guard);
2895
2896
2897
2898
2899
2900
2901 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
2902 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
2903 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
2904 uint32_t blocks_done, resid;
2905 sector_t lba_s = scsi_get_lba(cmd);
2906
2907
2908 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2909
2910 resid = scsi_bufflen(cmd) - (blocks_done *
2911 cmd->device->sector_size);
2912
2913 scsi_set_resid(cmd, resid);
2914 cmd->result = DID_OK << 16;
2915
2916
2917 if (scsi_prot_sg_count(cmd)) {
2918 uint32_t i, j = 0, k = 0, num_ent;
2919 struct scatterlist *sg;
2920 struct t10_pi_tuple *spt;
2921
2922
2923 scsi_for_each_prot_sg(cmd, sg,
2924 scsi_prot_sg_count(cmd), i) {
2925 num_ent = sg_dma_len(sg) / 8;
2926 if (k + num_ent < blocks_done) {
2927 k += num_ent;
2928 continue;
2929 }
2930 j = blocks_done - k - 1;
2931 k = blocks_done;
2932 break;
2933 }
2934
2935 if (k != blocks_done) {
2936 ql_log(ql_log_warn, vha, 0x302f,
2937 "unexpected tag values tag:lba=%x:%llx)\n",
2938 e_ref_tag, (unsigned long long)lba_s);
2939 return 1;
2940 }
2941
2942 spt = page_address(sg_page(sg)) + sg->offset;
2943 spt += j;
2944
2945 spt->app_tag = T10_PI_APP_ESCAPE;
2946 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2947 spt->ref_tag = T10_PI_REF_ESCAPE;
2948 }
2949
2950 return 0;
2951 }
2952
2953
2954 if (e_guard != a_guard) {
2955 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2956 set_host_byte(cmd, DID_ABORT);
2957 return 1;
2958 }
2959
2960
2961 if (e_ref_tag != a_ref_tag) {
2962 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2963 set_host_byte(cmd, DID_ABORT);
2964 return 1;
2965 }
2966
2967
2968 if (e_app_tag != a_app_tag) {
2969 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2970 set_host_byte(cmd, DID_ABORT);
2971 return 1;
2972 }
2973
2974 return 1;
2975 }
2976
2977 static void
2978 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2979 struct req_que *req, uint32_t index)
2980 {
2981 struct qla_hw_data *ha = vha->hw;
2982 srb_t *sp;
2983 uint16_t comp_status;
2984 uint16_t scsi_status;
2985 uint16_t thread_id;
2986 uint32_t rval = EXT_STATUS_OK;
2987 struct bsg_job *bsg_job = NULL;
2988 struct fc_bsg_request *bsg_request;
2989 struct fc_bsg_reply *bsg_reply;
2990 sts_entry_t *sts = pkt;
2991 struct sts_entry_24xx *sts24 = pkt;
2992
2993
2994 if (index >= req->num_outstanding_cmds) {
2995 ql_log(ql_log_warn, vha, 0x70af,
2996 "Invalid SCSI completion handle 0x%x.\n", index);
2997 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2998 return;
2999 }
3000
3001 sp = req->outstanding_cmds[index];
3002 if (!sp) {
3003 ql_log(ql_log_warn, vha, 0x70b0,
3004 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
3005 req->id, index);
3006
3007 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3008 return;
3009 }
3010
3011
3012 req->outstanding_cmds[index] = NULL;
3013 bsg_job = sp->u.bsg_job;
3014 bsg_request = bsg_job->request;
3015 bsg_reply = bsg_job->reply;
3016
3017 if (IS_FWI2_CAPABLE(ha)) {
3018 comp_status = le16_to_cpu(sts24->comp_status);
3019 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
3020 } else {
3021 comp_status = le16_to_cpu(sts->comp_status);
3022 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
3023 }
3024
3025 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
3026 switch (comp_status) {
3027 case CS_COMPLETE:
3028 if (scsi_status == 0) {
3029 bsg_reply->reply_payload_rcv_len =
3030 bsg_job->reply_payload.payload_len;
3031 vha->qla_stats.input_bytes +=
3032 bsg_reply->reply_payload_rcv_len;
3033 vha->qla_stats.input_requests++;
3034 rval = EXT_STATUS_OK;
3035 }
3036 goto done;
3037
3038 case CS_DATA_OVERRUN:
3039 ql_dbg(ql_dbg_user, vha, 0x70b1,
3040 "Command completed with data overrun thread_id=%d\n",
3041 thread_id);
3042 rval = EXT_STATUS_DATA_OVERRUN;
3043 break;
3044
3045 case CS_DATA_UNDERRUN:
3046 ql_dbg(ql_dbg_user, vha, 0x70b2,
3047 "Command completed with data underrun thread_id=%d\n",
3048 thread_id);
3049 rval = EXT_STATUS_DATA_UNDERRUN;
3050 break;
3051 case CS_BIDIR_RD_OVERRUN:
3052 ql_dbg(ql_dbg_user, vha, 0x70b3,
3053 "Command completed with read data overrun thread_id=%d\n",
3054 thread_id);
3055 rval = EXT_STATUS_DATA_OVERRUN;
3056 break;
3057
3058 case CS_BIDIR_RD_WR_OVERRUN:
3059 ql_dbg(ql_dbg_user, vha, 0x70b4,
3060 "Command completed with read and write data overrun "
3061 "thread_id=%d\n", thread_id);
3062 rval = EXT_STATUS_DATA_OVERRUN;
3063 break;
3064
3065 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
3066 ql_dbg(ql_dbg_user, vha, 0x70b5,
3067 "Command completed with read data over and write data "
3068 "underrun thread_id=%d\n", thread_id);
3069 rval = EXT_STATUS_DATA_OVERRUN;
3070 break;
3071
3072 case CS_BIDIR_RD_UNDERRUN:
3073 ql_dbg(ql_dbg_user, vha, 0x70b6,
3074 "Command completed with read data underrun "
3075 "thread_id=%d\n", thread_id);
3076 rval = EXT_STATUS_DATA_UNDERRUN;
3077 break;
3078
3079 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
3080 ql_dbg(ql_dbg_user, vha, 0x70b7,
3081 "Command completed with read data under and write data "
3082 "overrun thread_id=%d\n", thread_id);
3083 rval = EXT_STATUS_DATA_UNDERRUN;
3084 break;
3085
3086 case CS_BIDIR_RD_WR_UNDERRUN:
3087 ql_dbg(ql_dbg_user, vha, 0x70b8,
3088 "Command completed with read and write data underrun "
3089 "thread_id=%d\n", thread_id);
3090 rval = EXT_STATUS_DATA_UNDERRUN;
3091 break;
3092
3093 case CS_BIDIR_DMA:
3094 ql_dbg(ql_dbg_user, vha, 0x70b9,
3095 "Command completed with data DMA error thread_id=%d\n",
3096 thread_id);
3097 rval = EXT_STATUS_DMA_ERR;
3098 break;
3099
3100 case CS_TIMEOUT:
3101 ql_dbg(ql_dbg_user, vha, 0x70ba,
3102 "Command completed with timeout thread_id=%d\n",
3103 thread_id);
3104 rval = EXT_STATUS_TIMEOUT;
3105 break;
3106 default:
3107 ql_dbg(ql_dbg_user, vha, 0x70bb,
3108 "Command completed with completion status=0x%x "
3109 "thread_id=%d\n", comp_status, thread_id);
3110 rval = EXT_STATUS_ERR;
3111 break;
3112 }
3113 bsg_reply->reply_payload_rcv_len = 0;
3114
3115 done:
3116
3117 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
3118 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
3119
3120
3121 sp->done(sp, DID_OK << 16);
3122
3123 }
3124
3125
3126
3127
3128
3129
3130
3131 static void
3132 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
3133 {
3134 srb_t *sp;
3135 fc_port_t *fcport;
3136 struct scsi_cmnd *cp;
3137 sts_entry_t *sts = pkt;
3138 struct sts_entry_24xx *sts24 = pkt;
3139 uint16_t comp_status;
3140 uint16_t scsi_status;
3141 uint16_t ox_id;
3142 uint8_t lscsi_status;
3143 int32_t resid;
3144 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
3145 fw_resid_len;
3146 uint8_t *rsp_info, *sense_data;
3147 struct qla_hw_data *ha = vha->hw;
3148 uint32_t handle;
3149 uint16_t que;
3150 struct req_que *req;
3151 int logit = 1;
3152 int res = 0;
3153 uint16_t state_flags = 0;
3154 uint16_t sts_qual = 0;
3155
3156 if (IS_FWI2_CAPABLE(ha)) {
3157 comp_status = le16_to_cpu(sts24->comp_status);
3158 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
3159 state_flags = le16_to_cpu(sts24->state_flags);
3160 } else {
3161 comp_status = le16_to_cpu(sts->comp_status);
3162 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
3163 }
3164 handle = (uint32_t) LSW(sts->handle);
3165 que = MSW(sts->handle);
3166 req = ha->req_q_map[que];
3167
3168
3169 if (req == NULL ||
3170 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
3171 ql_dbg(ql_dbg_io, vha, 0x3059,
3172 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
3173 "que=%u.\n", sts->handle, req, que);
3174 return;
3175 }
3176
3177
3178 if (handle < req->num_outstanding_cmds) {
3179 sp = req->outstanding_cmds[handle];
3180 if (!sp) {
3181 ql_dbg(ql_dbg_io, vha, 0x3075,
3182 "%s(%ld): Already returned command for status handle (0x%x).\n",
3183 __func__, vha->host_no, sts->handle);
3184 return;
3185 }
3186 } else {
3187 ql_dbg(ql_dbg_io, vha, 0x3017,
3188 "Invalid status handle, out of range (0x%x).\n",
3189 sts->handle);
3190
3191 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3192 if (IS_P3P_TYPE(ha))
3193 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3194 else
3195 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3196 qla2xxx_wake_dpc(vha);
3197 }
3198 return;
3199 }
3200 qla_put_iocbs(sp->qpair, &sp->iores);
3201
3202 if (sp->cmd_type != TYPE_SRB) {
3203 req->outstanding_cmds[handle] = NULL;
3204 ql_dbg(ql_dbg_io, vha, 0x3015,
3205 "Unknown sp->cmd_type %x %p).\n",
3206 sp->cmd_type, sp);
3207 return;
3208 }
3209
3210
3211 if (sp->type == SRB_NVME_CMD) {
3212 req->outstanding_cmds[handle] = NULL;
3213 qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
3214 return;
3215 }
3216
3217 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
3218 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
3219 return;
3220 }
3221
3222
3223 if (sp->type == SRB_TM_CMD) {
3224 qla24xx_tm_iocb_entry(vha, req, pkt);
3225 return;
3226 }
3227
3228
3229 qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24);
3230 sp->qpair->cmd_completion_cnt++;
3231
3232 if (comp_status == CS_COMPLETE && scsi_status == 0) {
3233 qla2x00_process_completed_request(vha, req, handle);
3234
3235 return;
3236 }
3237
3238 req->outstanding_cmds[handle] = NULL;
3239 cp = GET_CMD_SP(sp);
3240 if (cp == NULL) {
3241 ql_dbg(ql_dbg_io, vha, 0x3018,
3242 "Command already returned (0x%x/%p).\n",
3243 sts->handle, sp);
3244
3245 return;
3246 }
3247
3248 lscsi_status = scsi_status & STATUS_MASK;
3249
3250 fcport = sp->fcport;
3251
3252 ox_id = 0;
3253 sense_len = par_sense_len = rsp_info_len = resid_len =
3254 fw_resid_len = 0;
3255 if (IS_FWI2_CAPABLE(ha)) {
3256 if (scsi_status & SS_SENSE_LEN_VALID)
3257 sense_len = le32_to_cpu(sts24->sense_len);
3258 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3259 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
3260 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
3261 resid_len = le32_to_cpu(sts24->rsp_residual_count);
3262 if (comp_status == CS_DATA_UNDERRUN)
3263 fw_resid_len = le32_to_cpu(sts24->residual_len);
3264 rsp_info = sts24->data;
3265 sense_data = sts24->data;
3266 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
3267 ox_id = le16_to_cpu(sts24->ox_id);
3268 par_sense_len = sizeof(sts24->data);
3269 sts_qual = le16_to_cpu(sts24->status_qualifier);
3270 } else {
3271 if (scsi_status & SS_SENSE_LEN_VALID)
3272 sense_len = le16_to_cpu(sts->req_sense_length);
3273 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3274 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
3275 resid_len = le32_to_cpu(sts->residual_length);
3276 rsp_info = sts->rsp_info;
3277 sense_data = sts->req_sense_data;
3278 par_sense_len = sizeof(sts->req_sense_data);
3279 }
3280
3281
3282 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
3283
3284 if (IS_FWI2_CAPABLE(ha)) {
3285 sense_data += rsp_info_len;
3286 par_sense_len -= rsp_info_len;
3287 }
3288 if (rsp_info_len > 3 && rsp_info[3]) {
3289 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
3290 "FCP I/O protocol failure (0x%x/0x%x).\n",
3291 rsp_info_len, rsp_info[3]);
3292
3293 res = DID_BUS_BUSY << 16;
3294 goto out;
3295 }
3296 }
3297
3298
3299 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
3300 scsi_status & SS_RESIDUAL_OVER)
3301 comp_status = CS_DATA_OVERRUN;
3302
3303
3304
3305
3306
3307 if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL ||
3308 lscsi_status == SAM_STAT_BUSY))
3309 qla2x00_set_retry_delay_timestamp(fcport, sts_qual);
3310
3311
3312
3313
3314 switch (comp_status) {
3315 case CS_COMPLETE:
3316 case CS_QUEUE_FULL:
3317 if (scsi_status == 0) {
3318 res = DID_OK << 16;
3319 break;
3320 }
3321 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
3322 resid = resid_len;
3323 scsi_set_resid(cp, resid);
3324
3325 if (!lscsi_status &&
3326 ((unsigned)(scsi_bufflen(cp) - resid) <
3327 cp->underflow)) {
3328 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
3329 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3330 resid, scsi_bufflen(cp));
3331
3332 res = DID_ERROR << 16;
3333 break;
3334 }
3335 }
3336 res = DID_OK << 16 | lscsi_status;
3337
3338 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3339 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
3340 "QUEUE FULL detected.\n");
3341 break;
3342 }
3343 logit = 0;
3344 if (lscsi_status != SS_CHECK_CONDITION)
3345 break;
3346
3347 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3348 if (!(scsi_status & SS_SENSE_LEN_VALID))
3349 break;
3350
3351 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
3352 rsp, res);
3353 break;
3354
3355 case CS_DATA_UNDERRUN:
3356
3357 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
3358 scsi_set_resid(cp, resid);
3359 if (scsi_status & SS_RESIDUAL_UNDER) {
3360 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
3361 ql_log(ql_log_warn, fcport->vha, 0x301d,
3362 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3363 resid, scsi_bufflen(cp));
3364
3365 vha->interface_err_cnt++;
3366
3367 res = DID_ERROR << 16 | lscsi_status;
3368 goto check_scsi_status;
3369 }
3370
3371 if (!lscsi_status &&
3372 ((unsigned)(scsi_bufflen(cp) - resid) <
3373 cp->underflow)) {
3374 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
3375 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3376 resid, scsi_bufflen(cp));
3377
3378 res = DID_ERROR << 16;
3379 break;
3380 }
3381 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
3382 lscsi_status != SAM_STAT_BUSY) {
3383
3384
3385
3386
3387
3388 ql_log(ql_log_warn, fcport->vha, 0x301f,
3389 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3390 resid, scsi_bufflen(cp));
3391
3392 vha->interface_err_cnt++;
3393
3394 res = DID_ERROR << 16 | lscsi_status;
3395 goto check_scsi_status;
3396 } else {
3397 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
3398 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
3399 scsi_status, lscsi_status);
3400 }
3401
3402 res = DID_OK << 16 | lscsi_status;
3403 logit = 0;
3404
3405 check_scsi_status:
3406
3407
3408
3409
3410 if (lscsi_status != 0) {
3411 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3412 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
3413 "QUEUE FULL detected.\n");
3414 logit = 1;
3415 break;
3416 }
3417 if (lscsi_status != SS_CHECK_CONDITION)
3418 break;
3419
3420 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3421 if (!(scsi_status & SS_SENSE_LEN_VALID))
3422 break;
3423
3424 qla2x00_handle_sense(sp, sense_data, par_sense_len,
3425 sense_len, rsp, res);
3426 }
3427 break;
3428
3429 case CS_PORT_LOGGED_OUT:
3430 case CS_PORT_CONFIG_CHG:
3431 case CS_PORT_BUSY:
3432 case CS_INCOMPLETE:
3433 case CS_PORT_UNAVAILABLE:
3434 case CS_TIMEOUT:
3435 case CS_RESET:
3436 case CS_EDIF_INV_REQ:
3437
3438
3439
3440
3441
3442
3443 res = DID_TRANSPORT_DISRUPTED << 16;
3444
3445 if (comp_status == CS_TIMEOUT) {
3446 if (IS_FWI2_CAPABLE(ha))
3447 break;
3448 else if ((le16_to_cpu(sts->status_flags) &
3449 SF_LOGOUT_SENT) == 0)
3450 break;
3451 }
3452
3453 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3454 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
3455 "Port to be marked lost on fcport=%02x%02x%02x, current "
3456 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
3457 fcport->d_id.b.area, fcport->d_id.b.al_pa,
3458 port_state_str[FCS_ONLINE],
3459 comp_status);
3460
3461 qlt_schedule_sess_for_deletion(fcport);
3462 }
3463
3464 break;
3465
3466 case CS_ABORTED:
3467 res = DID_RESET << 16;
3468 break;
3469
3470 case CS_DIF_ERROR:
3471 logit = qla2x00_handle_dif_error(sp, sts24);
3472 res = cp->result;
3473 break;
3474
3475 case CS_TRANSPORT:
3476 res = DID_ERROR << 16;
3477 vha->hw_err_cnt++;
3478
3479 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
3480 break;
3481
3482 if (state_flags & BIT_4)
3483 scmd_printk(KERN_WARNING, cp,
3484 "Unsupported device '%s' found.\n",
3485 cp->device->vendor);
3486 break;
3487
3488 case CS_DMA:
3489 ql_log(ql_log_info, fcport->vha, 0x3022,
3490 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3491 comp_status, scsi_status, res, vha->host_no,
3492 cp->device->id, cp->device->lun, fcport->d_id.b24,
3493 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3494 resid_len, fw_resid_len, sp, cp);
3495 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
3496 pkt, sizeof(*sts24));
3497 res = DID_ERROR << 16;
3498 vha->hw_err_cnt++;
3499 break;
3500 default:
3501 res = DID_ERROR << 16;
3502 break;
3503 }
3504
3505 out:
3506 if (logit)
3507 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
3508 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3509 comp_status, scsi_status, res, vha->host_no,
3510 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
3511 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
3512 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3513 resid_len, fw_resid_len, sp, cp);
3514
3515 if (rsp->status_srb == NULL)
3516 sp->done(sp, res);
3517 }
3518
3519
3520
3521
3522
3523
3524
3525
3526 static void
3527 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
3528 {
3529 uint8_t sense_sz = 0;
3530 struct qla_hw_data *ha = rsp->hw;
3531 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
3532 srb_t *sp = rsp->status_srb;
3533 struct scsi_cmnd *cp;
3534 uint32_t sense_len;
3535 uint8_t *sense_ptr;
3536
3537 if (!sp || !GET_CMD_SENSE_LEN(sp))
3538 return;
3539
3540 sense_len = GET_CMD_SENSE_LEN(sp);
3541 sense_ptr = GET_CMD_SENSE_PTR(sp);
3542
3543 cp = GET_CMD_SP(sp);
3544 if (cp == NULL) {
3545 ql_log(ql_log_warn, vha, 0x3025,
3546 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
3547
3548 rsp->status_srb = NULL;
3549 return;
3550 }
3551
3552 if (sense_len > sizeof(pkt->data))
3553 sense_sz = sizeof(pkt->data);
3554 else
3555 sense_sz = sense_len;
3556
3557
3558 if (IS_FWI2_CAPABLE(ha))
3559 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
3560 memcpy(sense_ptr, pkt->data, sense_sz);
3561 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
3562 sense_ptr, sense_sz);
3563
3564 sense_len -= sense_sz;
3565 sense_ptr += sense_sz;
3566
3567 SET_CMD_SENSE_PTR(sp, sense_ptr);
3568 SET_CMD_SENSE_LEN(sp, sense_len);
3569
3570
3571 if (sense_len == 0) {
3572 rsp->status_srb = NULL;
3573 sp->done(sp, cp->result);
3574 }
3575 }
3576
3577
3578
3579
3580
3581
3582
3583
3584 static int
3585 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
3586 {
3587 srb_t *sp;
3588 struct qla_hw_data *ha = vha->hw;
3589 const char func[] = "ERROR-IOCB";
3590 uint16_t que = MSW(pkt->handle);
3591 struct req_que *req = NULL;
3592 int res = DID_ERROR << 16;
3593
3594 ql_dbg(ql_dbg_async, vha, 0x502a,
3595 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
3596 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
3597
3598 if (que >= ha->max_req_queues || !ha->req_q_map[que])
3599 goto fatal;
3600
3601 req = ha->req_q_map[que];
3602
3603 if (pkt->entry_status & RF_BUSY)
3604 res = DID_BUS_BUSY << 16;
3605
3606 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
3607 return 0;
3608
3609 switch (pkt->entry_type) {
3610 case NOTIFY_ACK_TYPE:
3611 case STATUS_TYPE:
3612 case STATUS_CONT_TYPE:
3613 case LOGINOUT_PORT_IOCB_TYPE:
3614 case CT_IOCB_TYPE:
3615 case ELS_IOCB_TYPE:
3616 case ABORT_IOCB_TYPE:
3617 case MBX_IOCB_TYPE:
3618 default:
3619 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3620 if (sp) {
3621 qla_put_iocbs(sp->qpair, &sp->iores);
3622 sp->done(sp, res);
3623 return 0;
3624 }
3625 break;
3626
3627 case SA_UPDATE_IOCB_TYPE:
3628 case ABTS_RESP_24XX:
3629 case CTIO_TYPE7:
3630 case CTIO_CRC2:
3631 return 1;
3632 }
3633 fatal:
3634 ql_log(ql_log_warn, vha, 0x5030,
3635 "Error entry - invalid handle/queue (%04x).\n", que);
3636 return 0;
3637 }
3638
3639
3640
3641
3642
3643
3644 static void
3645 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
3646 {
3647 uint16_t cnt;
3648 uint32_t mboxes;
3649 __le16 __iomem *wptr;
3650 struct qla_hw_data *ha = vha->hw;
3651 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3652
3653
3654 WARN_ON_ONCE(ha->mbx_count > 32);
3655 mboxes = (1ULL << ha->mbx_count) - 1;
3656 if (!ha->mcp)
3657 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
3658 else
3659 mboxes = ha->mcp->in_mb;
3660
3661
3662 ha->flags.mbox_int = 1;
3663 ha->mailbox_out[0] = mb0;
3664 mboxes >>= 1;
3665 wptr = ®->mailbox1;
3666
3667 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
3668 if (mboxes & BIT_0)
3669 ha->mailbox_out[cnt] = rd_reg_word(wptr);
3670
3671 mboxes >>= 1;
3672 wptr++;
3673 }
3674 }
3675
3676 static void
3677 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
3678 struct abort_entry_24xx *pkt)
3679 {
3680 const char func[] = "ABT_IOCB";
3681 srb_t *sp;
3682 srb_t *orig_sp = NULL;
3683 struct srb_iocb *abt;
3684
3685 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3686 if (!sp)
3687 return;
3688
3689 abt = &sp->u.iocb_cmd;
3690 abt->u.abt.comp_status = pkt->comp_status;
3691 orig_sp = sp->cmd_sp;
3692
3693 if (orig_sp)
3694 qla_nvme_abort_process_comp_status(pkt, orig_sp);
3695
3696 sp->done(sp, 0);
3697 }
3698
3699 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
3700 struct pt_ls4_request *pkt, struct req_que *req)
3701 {
3702 srb_t *sp;
3703 const char func[] = "LS4_IOCB";
3704 uint16_t comp_status;
3705
3706 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3707 if (!sp)
3708 return;
3709
3710 comp_status = le16_to_cpu(pkt->status);
3711 sp->done(sp, comp_status);
3712 }
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722 static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
3723 struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in)
3724 {
3725 int start_pkt_ring_index;
3726 u32 iocb_cnt = 0;
3727 int rc = 0;
3728
3729 if (pkt->entry_count == 1)
3730 return rc;
3731
3732
3733 if (rsp->ring_index == 0)
3734 start_pkt_ring_index = rsp->length - 1;
3735 else
3736 start_pkt_ring_index = rsp->ring_index - 1;
3737
3738 if (rsp_q_in < start_pkt_ring_index)
3739
3740 iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in;
3741 else
3742 iocb_cnt = rsp_q_in - start_pkt_ring_index;
3743
3744 if (iocb_cnt < pkt->entry_count)
3745 rc = -EIO;
3746
3747 ql_dbg(ql_dbg_init, vha, 0x5091,
3748 "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n",
3749 __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc);
3750
3751 return rc;
3752 }
3753
3754
3755
3756
3757
3758
3759 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
3760 struct rsp_que *rsp)
3761 {
3762 struct sts_entry_24xx *pkt;
3763 struct qla_hw_data *ha = vha->hw;
3764 struct purex_entry_24xx *purex_entry;
3765 struct purex_item *pure_item;
3766 u16 rsp_in = 0, cur_ring_index;
3767 int follow_inptr, is_shadow_hba;
3768
3769 if (!ha->flags.fw_started)
3770 return;
3771
3772 if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
3773 rsp->qpair->rcv_intr = 1;
3774 qla_cpu_update(rsp->qpair, smp_processor_id());
3775 }
3776
3777 #define __update_rsp_in(_update, _is_shadow_hba, _rsp, _rsp_in) \
3778 do { \
3779 if (_update) { \
3780 _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \
3781 rd_reg_dword_relaxed((_rsp)->rsp_q_in); \
3782 } \
3783 } while (0)
3784
3785 is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha);
3786 follow_inptr = is_shadow_hba ? ql2xrspq_follow_inptr :
3787 ql2xrspq_follow_inptr_legacy;
3788
3789 __update_rsp_in(follow_inptr, is_shadow_hba, rsp, rsp_in);
3790
3791 while ((likely(follow_inptr &&
3792 rsp->ring_index != rsp_in &&
3793 rsp->ring_ptr->signature != RESPONSE_PROCESSED)) ||
3794 (!follow_inptr &&
3795 rsp->ring_ptr->signature != RESPONSE_PROCESSED)) {
3796 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
3797 cur_ring_index = rsp->ring_index;
3798
3799 rsp->ring_index++;
3800 if (rsp->ring_index == rsp->length) {
3801 rsp->ring_index = 0;
3802 rsp->ring_ptr = rsp->ring;
3803 } else {
3804 rsp->ring_ptr++;
3805 }
3806
3807 if (pkt->entry_status != 0) {
3808 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
3809 goto process_err;
3810
3811 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3812 wmb();
3813 continue;
3814 }
3815 process_err:
3816
3817 switch (pkt->entry_type) {
3818 case STATUS_TYPE:
3819 qla2x00_status_entry(vha, rsp, pkt);
3820 break;
3821 case STATUS_CONT_TYPE:
3822 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
3823 break;
3824 case VP_RPT_ID_IOCB_TYPE:
3825 qla24xx_report_id_acquisition(vha,
3826 (struct vp_rpt_id_entry_24xx *)pkt);
3827 break;
3828 case LOGINOUT_PORT_IOCB_TYPE:
3829 qla24xx_logio_entry(vha, rsp->req,
3830 (struct logio_entry_24xx *)pkt);
3831 break;
3832 case CT_IOCB_TYPE:
3833 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
3834 break;
3835 case ELS_IOCB_TYPE:
3836 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
3837 break;
3838 case ABTS_RECV_24XX:
3839 if (qla_ini_mode_enabled(vha)) {
3840 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3841 if (!pure_item)
3842 break;
3843 qla24xx_queue_purex_item(vha, pure_item,
3844 qla24xx_process_abts);
3845 break;
3846 }
3847 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3848 IS_QLA28XX(ha)) {
3849
3850 qlt_handle_abts_recv(vha, rsp,
3851 (response_t *)pkt);
3852 break;
3853 } else {
3854 qlt_24xx_process_atio_queue(vha, 1);
3855 }
3856 fallthrough;
3857 case ABTS_RESP_24XX:
3858 case CTIO_TYPE7:
3859 case CTIO_CRC2:
3860 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3861 break;
3862 case PT_LS4_REQUEST:
3863 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
3864 rsp->req);
3865 break;
3866 case NOTIFY_ACK_TYPE:
3867 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3868 qlt_response_pkt_all_vps(vha, rsp,
3869 (response_t *)pkt);
3870 else
3871 qla24xxx_nack_iocb_entry(vha, rsp->req,
3872 (struct nack_to_isp *)pkt);
3873 break;
3874 case MARKER_TYPE:
3875
3876
3877
3878 break;
3879 case ABORT_IOCB_TYPE:
3880 qla24xx_abort_iocb_entry(vha, rsp->req,
3881 (struct abort_entry_24xx *)pkt);
3882 break;
3883 case MBX_IOCB_TYPE:
3884 qla24xx_mbx_iocb_entry(vha, rsp->req,
3885 (struct mbx_24xx_entry *)pkt);
3886 break;
3887 case VP_CTRL_IOCB_TYPE:
3888 qla_ctrlvp_completed(vha, rsp->req,
3889 (struct vp_ctrl_entry_24xx *)pkt);
3890 break;
3891 case PUREX_IOCB_TYPE:
3892 purex_entry = (void *)pkt;
3893 switch (purex_entry->els_frame_payload[3]) {
3894 case ELS_RDP:
3895 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3896 if (!pure_item)
3897 break;
3898 qla24xx_queue_purex_item(vha, pure_item,
3899 qla24xx_process_purex_rdp);
3900 break;
3901 case ELS_FPIN:
3902 if (!vha->hw->flags.scm_enabled) {
3903 ql_log(ql_log_warn, vha, 0x5094,
3904 "SCM not active for this port\n");
3905 break;
3906 }
3907 pure_item = qla27xx_copy_fpin_pkt(vha,
3908 (void **)&pkt, &rsp);
3909 __update_rsp_in(follow_inptr, is_shadow_hba,
3910 rsp, rsp_in);
3911 if (!pure_item)
3912 break;
3913 qla24xx_queue_purex_item(vha, pure_item,
3914 qla27xx_process_purex_fpin);
3915 break;
3916
3917 case ELS_AUTH_ELS:
3918 if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
3919
3920
3921
3922
3923
3924
3925
3926 rsp->ring_ptr = (response_t *)pkt;
3927 rsp->ring_index = cur_ring_index;
3928
3929 ql_dbg(ql_dbg_init, vha, 0x5091,
3930 "Defer processing ELS opcode %#x...\n",
3931 purex_entry->els_frame_payload[3]);
3932 return;
3933 }
3934 qla24xx_auth_els(vha, (void **)&pkt, &rsp);
3935 break;
3936 default:
3937 ql_log(ql_log_warn, vha, 0x509c,
3938 "Discarding ELS Request opcode 0x%x\n",
3939 purex_entry->els_frame_payload[3]);
3940 }
3941 break;
3942 case SA_UPDATE_IOCB_TYPE:
3943 qla28xx_sa_update_iocb_entry(vha, rsp->req,
3944 (struct sa_update_28xx *)pkt);
3945 break;
3946
3947 default:
3948
3949 ql_dbg(ql_dbg_async, vha, 0x5042,
3950 "Received unknown response pkt type 0x%x entry status=%x.\n",
3951 pkt->entry_type, pkt->entry_status);
3952 break;
3953 }
3954 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3955 wmb();
3956 }
3957
3958
3959 if (IS_P3P_TYPE(ha)) {
3960 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3961
3962 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index);
3963 } else {
3964 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
3965 }
3966 }
3967
3968 static void
3969 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3970 {
3971 int rval;
3972 uint32_t cnt;
3973 struct qla_hw_data *ha = vha->hw;
3974 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3975
3976 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3977 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3978 return;
3979
3980 rval = QLA_SUCCESS;
3981 wrt_reg_dword(®->iobase_addr, 0x7C00);
3982 rd_reg_dword(®->iobase_addr);
3983 wrt_reg_dword(®->iobase_window, 0x0001);
3984 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3985 rval == QLA_SUCCESS; cnt--) {
3986 if (cnt) {
3987 wrt_reg_dword(®->iobase_window, 0x0001);
3988 udelay(10);
3989 } else
3990 rval = QLA_FUNCTION_TIMEOUT;
3991 }
3992 if (rval == QLA_SUCCESS)
3993 goto next_test;
3994
3995 rval = QLA_SUCCESS;
3996 wrt_reg_dword(®->iobase_window, 0x0003);
3997 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3998 rval == QLA_SUCCESS; cnt--) {
3999 if (cnt) {
4000 wrt_reg_dword(®->iobase_window, 0x0003);
4001 udelay(10);
4002 } else
4003 rval = QLA_FUNCTION_TIMEOUT;
4004 }
4005 if (rval != QLA_SUCCESS)
4006 goto done;
4007
4008 next_test:
4009 if (rd_reg_dword(®->iobase_c8) & BIT_3)
4010 ql_log(ql_log_info, vha, 0x504c,
4011 "Additional code -- 0x55AA.\n");
4012
4013 done:
4014 wrt_reg_dword(®->iobase_window, 0x0000);
4015 rd_reg_dword(®->iobase_window);
4016 }
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027 irqreturn_t
4028 qla24xx_intr_handler(int irq, void *dev_id)
4029 {
4030 scsi_qla_host_t *vha;
4031 struct qla_hw_data *ha;
4032 struct device_reg_24xx __iomem *reg;
4033 int status;
4034 unsigned long iter;
4035 uint32_t stat;
4036 uint32_t hccr;
4037 uint16_t mb[8];
4038 struct rsp_que *rsp;
4039 unsigned long flags;
4040 bool process_atio = false;
4041
4042 rsp = (struct rsp_que *) dev_id;
4043 if (!rsp) {
4044 ql_log(ql_log_info, NULL, 0x5059,
4045 "%s: NULL response queue pointer.\n", __func__);
4046 return IRQ_NONE;
4047 }
4048
4049 ha = rsp->hw;
4050 reg = &ha->iobase->isp24;
4051 status = 0;
4052
4053 if (unlikely(pci_channel_offline(ha->pdev)))
4054 return IRQ_HANDLED;
4055
4056 spin_lock_irqsave(&ha->hardware_lock, flags);
4057 vha = pci_get_drvdata(ha->pdev);
4058 for (iter = 50; iter--; ) {
4059 stat = rd_reg_dword(®->host_status);
4060 if (qla2x00_check_reg32_for_disconnect(vha, stat))
4061 break;
4062 if (stat & HSRX_RISC_PAUSED) {
4063 if (unlikely(pci_channel_offline(ha->pdev)))
4064 break;
4065
4066 hccr = rd_reg_dword(®->hccr);
4067
4068 ql_log(ql_log_warn, vha, 0x504b,
4069 "RISC paused -- HCCR=%x, Dumping firmware.\n",
4070 hccr);
4071
4072 qla2xxx_check_risc_status(vha);
4073
4074 ha->isp_ops->fw_dump(vha);
4075 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4076 break;
4077 } else if ((stat & HSRX_RISC_INT) == 0)
4078 break;
4079
4080 switch (stat & 0xff) {
4081 case INTR_ROM_MB_SUCCESS:
4082 case INTR_ROM_MB_FAILED:
4083 case INTR_MB_SUCCESS:
4084 case INTR_MB_FAILED:
4085 qla24xx_mbx_completion(vha, MSW(stat));
4086 status |= MBX_INTERRUPT;
4087
4088 break;
4089 case INTR_ASYNC_EVENT:
4090 mb[0] = MSW(stat);
4091 mb[1] = rd_reg_word(®->mailbox1);
4092 mb[2] = rd_reg_word(®->mailbox2);
4093 mb[3] = rd_reg_word(®->mailbox3);
4094 qla2x00_async_event(vha, rsp, mb);
4095 break;
4096 case INTR_RSP_QUE_UPDATE:
4097 case INTR_RSP_QUE_UPDATE_83XX:
4098 qla24xx_process_response_queue(vha, rsp);
4099 break;
4100 case INTR_ATIO_QUE_UPDATE_27XX:
4101 case INTR_ATIO_QUE_UPDATE:
4102 process_atio = true;
4103 break;
4104 case INTR_ATIO_RSP_QUE_UPDATE:
4105 process_atio = true;
4106 qla24xx_process_response_queue(vha, rsp);
4107 break;
4108 default:
4109 ql_dbg(ql_dbg_async, vha, 0x504f,
4110 "Unrecognized interrupt type (%d).\n", stat * 0xff);
4111 break;
4112 }
4113 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4114 rd_reg_dword_relaxed(®->hccr);
4115 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
4116 ndelay(3500);
4117 }
4118 qla2x00_handle_mbx_completion(ha, status);
4119 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4120
4121 if (process_atio) {
4122 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4123 qlt_24xx_process_atio_queue(vha, 0);
4124 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4125 }
4126
4127 return IRQ_HANDLED;
4128 }
4129
4130 static irqreturn_t
4131 qla24xx_msix_rsp_q(int irq, void *dev_id)
4132 {
4133 struct qla_hw_data *ha;
4134 struct rsp_que *rsp;
4135 struct device_reg_24xx __iomem *reg;
4136 struct scsi_qla_host *vha;
4137 unsigned long flags;
4138
4139 rsp = (struct rsp_que *) dev_id;
4140 if (!rsp) {
4141 ql_log(ql_log_info, NULL, 0x505a,
4142 "%s: NULL response queue pointer.\n", __func__);
4143 return IRQ_NONE;
4144 }
4145 ha = rsp->hw;
4146 reg = &ha->iobase->isp24;
4147
4148 spin_lock_irqsave(&ha->hardware_lock, flags);
4149
4150 vha = pci_get_drvdata(ha->pdev);
4151 qla24xx_process_response_queue(vha, rsp);
4152 if (!ha->flags.disable_msix_handshake) {
4153 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4154 rd_reg_dword_relaxed(®->hccr);
4155 }
4156 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4157
4158 return IRQ_HANDLED;
4159 }
4160
4161 static irqreturn_t
4162 qla24xx_msix_default(int irq, void *dev_id)
4163 {
4164 scsi_qla_host_t *vha;
4165 struct qla_hw_data *ha;
4166 struct rsp_que *rsp;
4167 struct device_reg_24xx __iomem *reg;
4168 int status;
4169 uint32_t stat;
4170 uint32_t hccr;
4171 uint16_t mb[8];
4172 unsigned long flags;
4173 bool process_atio = false;
4174
4175 rsp = (struct rsp_que *) dev_id;
4176 if (!rsp) {
4177 ql_log(ql_log_info, NULL, 0x505c,
4178 "%s: NULL response queue pointer.\n", __func__);
4179 return IRQ_NONE;
4180 }
4181 ha = rsp->hw;
4182 reg = &ha->iobase->isp24;
4183 status = 0;
4184
4185 spin_lock_irqsave(&ha->hardware_lock, flags);
4186 vha = pci_get_drvdata(ha->pdev);
4187 do {
4188 stat = rd_reg_dword(®->host_status);
4189 if (qla2x00_check_reg32_for_disconnect(vha, stat))
4190 break;
4191 if (stat & HSRX_RISC_PAUSED) {
4192 if (unlikely(pci_channel_offline(ha->pdev)))
4193 break;
4194
4195 hccr = rd_reg_dword(®->hccr);
4196
4197 ql_log(ql_log_info, vha, 0x5050,
4198 "RISC paused -- HCCR=%x, Dumping firmware.\n",
4199 hccr);
4200
4201 qla2xxx_check_risc_status(vha);
4202 vha->hw_err_cnt++;
4203
4204 ha->isp_ops->fw_dump(vha);
4205 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4206 break;
4207 } else if ((stat & HSRX_RISC_INT) == 0)
4208 break;
4209
4210 switch (stat & 0xff) {
4211 case INTR_ROM_MB_SUCCESS:
4212 case INTR_ROM_MB_FAILED:
4213 case INTR_MB_SUCCESS:
4214 case INTR_MB_FAILED:
4215 qla24xx_mbx_completion(vha, MSW(stat));
4216 status |= MBX_INTERRUPT;
4217
4218 break;
4219 case INTR_ASYNC_EVENT:
4220 mb[0] = MSW(stat);
4221 mb[1] = rd_reg_word(®->mailbox1);
4222 mb[2] = rd_reg_word(®->mailbox2);
4223 mb[3] = rd_reg_word(®->mailbox3);
4224 qla2x00_async_event(vha, rsp, mb);
4225 break;
4226 case INTR_RSP_QUE_UPDATE:
4227 case INTR_RSP_QUE_UPDATE_83XX:
4228 qla24xx_process_response_queue(vha, rsp);
4229 break;
4230 case INTR_ATIO_QUE_UPDATE_27XX:
4231 case INTR_ATIO_QUE_UPDATE:
4232 process_atio = true;
4233 break;
4234 case INTR_ATIO_RSP_QUE_UPDATE:
4235 process_atio = true;
4236 qla24xx_process_response_queue(vha, rsp);
4237 break;
4238 default:
4239 ql_dbg(ql_dbg_async, vha, 0x5051,
4240 "Unrecognized interrupt type (%d).\n", stat & 0xff);
4241 break;
4242 }
4243 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4244 } while (0);
4245 qla2x00_handle_mbx_completion(ha, status);
4246 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4247
4248 if (process_atio) {
4249 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4250 qlt_24xx_process_atio_queue(vha, 0);
4251 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4252 }
4253
4254 return IRQ_HANDLED;
4255 }
4256
4257 irqreturn_t
4258 qla2xxx_msix_rsp_q(int irq, void *dev_id)
4259 {
4260 struct qla_hw_data *ha;
4261 struct qla_qpair *qpair;
4262
4263 qpair = dev_id;
4264 if (!qpair) {
4265 ql_log(ql_log_info, NULL, 0x505b,
4266 "%s: NULL response queue pointer.\n", __func__);
4267 return IRQ_NONE;
4268 }
4269 ha = qpair->hw;
4270
4271 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
4272
4273 return IRQ_HANDLED;
4274 }
4275
4276 irqreturn_t
4277 qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
4278 {
4279 struct qla_hw_data *ha;
4280 struct qla_qpair *qpair;
4281 struct device_reg_24xx __iomem *reg;
4282 unsigned long flags;
4283
4284 qpair = dev_id;
4285 if (!qpair) {
4286 ql_log(ql_log_info, NULL, 0x505b,
4287 "%s: NULL response queue pointer.\n", __func__);
4288 return IRQ_NONE;
4289 }
4290 ha = qpair->hw;
4291
4292 reg = &ha->iobase->isp24;
4293 spin_lock_irqsave(&ha->hardware_lock, flags);
4294 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4295 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4296
4297 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
4298
4299 return IRQ_HANDLED;
4300 }
4301
4302
4303
4304 struct qla_init_msix_entry {
4305 const char *name;
4306 irq_handler_t handler;
4307 };
4308
4309 static const struct qla_init_msix_entry msix_entries[] = {
4310 { "default", qla24xx_msix_default },
4311 { "rsp_q", qla24xx_msix_rsp_q },
4312 { "atio_q", qla83xx_msix_atio_q },
4313 { "qpair_multiq", qla2xxx_msix_rsp_q },
4314 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
4315 };
4316
4317 static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
4318 { "qla2xxx (default)", qla82xx_msix_default },
4319 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
4320 };
4321
4322 static int
4323 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
4324 {
4325 int i, ret;
4326 struct qla_msix_entry *qentry;
4327 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4328 int min_vecs = QLA_BASE_VECTORS;
4329 struct irq_affinity desc = {
4330 .pre_vectors = QLA_BASE_VECTORS,
4331 };
4332
4333 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4334 IS_ATIO_MSIX_CAPABLE(ha)) {
4335 desc.pre_vectors++;
4336 min_vecs++;
4337 }
4338
4339 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
4340
4341 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
4342 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4343 PCI_IRQ_MSIX);
4344 } else
4345 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
4346 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4347 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
4348 &desc);
4349
4350 if (ret < 0) {
4351 ql_log(ql_log_fatal, vha, 0x00c7,
4352 "MSI-X: Failed to enable support, "
4353 "giving up -- %d/%d.\n",
4354 ha->msix_count, ret);
4355 goto msix_out;
4356 } else if (ret < ha->msix_count) {
4357 ql_log(ql_log_info, vha, 0x00c6,
4358 "MSI-X: Using %d vectors\n", ret);
4359 ha->msix_count = ret;
4360
4361 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
4362 ha->max_req_queues = ha->msix_count - 1;
4363
4364
4365 if (QLA_TGT_MODE_ENABLED())
4366 ha->max_req_queues--;
4367
4368 ha->max_rsp_queues = ha->max_req_queues;
4369
4370 ha->max_qpairs = ha->max_req_queues - 1;
4371 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
4372 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
4373 }
4374 }
4375 vha->irq_offset = desc.pre_vectors;
4376 ha->msix_entries = kcalloc(ha->msix_count,
4377 sizeof(struct qla_msix_entry),
4378 GFP_KERNEL);
4379 if (!ha->msix_entries) {
4380 ql_log(ql_log_fatal, vha, 0x00c8,
4381 "Failed to allocate memory for ha->msix_entries.\n");
4382 ret = -ENOMEM;
4383 goto free_irqs;
4384 }
4385 ha->flags.msix_enabled = 1;
4386
4387 for (i = 0; i < ha->msix_count; i++) {
4388 qentry = &ha->msix_entries[i];
4389 qentry->vector = pci_irq_vector(ha->pdev, i);
4390 qentry->entry = i;
4391 qentry->have_irq = 0;
4392 qentry->in_use = 0;
4393 qentry->handle = NULL;
4394 }
4395
4396
4397 for (i = 0; i < QLA_BASE_VECTORS; i++) {
4398 qentry = &ha->msix_entries[i];
4399 qentry->handle = rsp;
4400 rsp->msix = qentry;
4401 scnprintf(qentry->name, sizeof(qentry->name),
4402 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
4403 if (IS_P3P_TYPE(ha))
4404 ret = request_irq(qentry->vector,
4405 qla82xx_msix_entries[i].handler,
4406 0, qla82xx_msix_entries[i].name, rsp);
4407 else
4408 ret = request_irq(qentry->vector,
4409 msix_entries[i].handler,
4410 0, qentry->name, rsp);
4411 if (ret)
4412 goto msix_register_fail;
4413 qentry->have_irq = 1;
4414 qentry->in_use = 1;
4415 }
4416
4417
4418
4419
4420
4421 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4422 IS_ATIO_MSIX_CAPABLE(ha)) {
4423 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
4424 rsp->msix = qentry;
4425 qentry->handle = rsp;
4426 scnprintf(qentry->name, sizeof(qentry->name),
4427 "qla2xxx%lu_%s", vha->host_no,
4428 msix_entries[QLA_ATIO_VECTOR].name);
4429 qentry->in_use = 1;
4430 ret = request_irq(qentry->vector,
4431 msix_entries[QLA_ATIO_VECTOR].handler,
4432 0, qentry->name, rsp);
4433 qentry->have_irq = 1;
4434 }
4435
4436 msix_register_fail:
4437 if (ret) {
4438 ql_log(ql_log_fatal, vha, 0x00cb,
4439 "MSI-X: unable to register handler -- %x/%d.\n",
4440 qentry->vector, ret);
4441 qla2x00_free_irqs(vha);
4442 ha->mqenable = 0;
4443 goto msix_out;
4444 }
4445
4446
4447 if (IS_MQUE_CAPABLE(ha) &&
4448 (ha->msixbase && ha->mqiobase && ha->max_qpairs))
4449 ha->mqenable = 1;
4450 else
4451 ha->mqenable = 0;
4452
4453 ql_dbg(ql_dbg_multiq, vha, 0xc005,
4454 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4455 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4456 ql_dbg(ql_dbg_init, vha, 0x0055,
4457 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4458 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4459
4460 msix_out:
4461 return ret;
4462
4463 free_irqs:
4464 pci_free_irq_vectors(ha->pdev);
4465 goto msix_out;
4466 }
4467
4468 int
4469 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
4470 {
4471 int ret = QLA_FUNCTION_FAILED;
4472 device_reg_t *reg = ha->iobase;
4473 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4474
4475
4476 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
4477 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
4478 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
4479 goto skip_msi;
4480
4481 if (ql2xenablemsix == 2)
4482 goto skip_msix;
4483
4484 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
4485 (ha->pdev->subsystem_device == 0x7040 ||
4486 ha->pdev->subsystem_device == 0x7041 ||
4487 ha->pdev->subsystem_device == 0x1705)) {
4488 ql_log(ql_log_warn, vha, 0x0034,
4489 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
4490 ha->pdev->subsystem_vendor,
4491 ha->pdev->subsystem_device);
4492 goto skip_msi;
4493 }
4494
4495 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
4496 ql_log(ql_log_warn, vha, 0x0035,
4497 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
4498 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
4499 goto skip_msix;
4500 }
4501
4502 ret = qla24xx_enable_msix(ha, rsp);
4503 if (!ret) {
4504 ql_dbg(ql_dbg_init, vha, 0x0036,
4505 "MSI-X: Enabled (0x%X, 0x%X).\n",
4506 ha->chip_revision, ha->fw_attributes);
4507 goto clear_risc_ints;
4508 }
4509
4510 skip_msix:
4511
4512 ql_log(ql_log_info, vha, 0x0037,
4513 "Falling back-to MSI mode -- ret=%d.\n", ret);
4514
4515 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
4516 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
4517 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4518 goto skip_msi;
4519
4520 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
4521 if (ret > 0) {
4522 ql_dbg(ql_dbg_init, vha, 0x0038,
4523 "MSI: Enabled.\n");
4524 ha->flags.msi_enabled = 1;
4525 } else
4526 ql_log(ql_log_warn, vha, 0x0039,
4527 "Falling back-to INTa mode -- ret=%d.\n", ret);
4528 skip_msi:
4529
4530
4531 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
4532 return QLA_FUNCTION_FAILED;
4533
4534 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
4535 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
4536 QLA2XXX_DRIVER_NAME, rsp);
4537 if (ret) {
4538 ql_log(ql_log_warn, vha, 0x003a,
4539 "Failed to reserve interrupt %d already in use.\n",
4540 ha->pdev->irq);
4541 goto fail;
4542 } else if (!ha->flags.msi_enabled) {
4543 ql_dbg(ql_dbg_init, vha, 0x0125,
4544 "INTa mode: Enabled.\n");
4545 ha->flags.mr_intr_valid = 1;
4546
4547 ha->max_qpairs = 0;
4548 }
4549
4550 clear_risc_ints:
4551 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
4552 goto fail;
4553
4554 spin_lock_irq(&ha->hardware_lock);
4555 wrt_reg_word(®->isp.semaphore, 0);
4556 spin_unlock_irq(&ha->hardware_lock);
4557
4558 fail:
4559 return ret;
4560 }
4561
4562 void
4563 qla2x00_free_irqs(scsi_qla_host_t *vha)
4564 {
4565 struct qla_hw_data *ha = vha->hw;
4566 struct rsp_que *rsp;
4567 struct qla_msix_entry *qentry;
4568 int i;
4569
4570
4571
4572
4573
4574 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
4575 goto free_irqs;
4576 rsp = ha->rsp_q_map[0];
4577
4578 if (ha->flags.msix_enabled) {
4579 for (i = 0; i < ha->msix_count; i++) {
4580 qentry = &ha->msix_entries[i];
4581 if (qentry->have_irq) {
4582 irq_set_affinity_notifier(qentry->vector, NULL);
4583 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
4584 }
4585 }
4586 kfree(ha->msix_entries);
4587 ha->msix_entries = NULL;
4588 ha->flags.msix_enabled = 0;
4589 ql_dbg(ql_dbg_init, vha, 0x0042,
4590 "Disabled MSI-X.\n");
4591 } else {
4592 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
4593 }
4594
4595 free_irqs:
4596 pci_free_irq_vectors(ha->pdev);
4597 }
4598
4599 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
4600 struct qla_msix_entry *msix, int vector_type)
4601 {
4602 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
4603 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4604 int ret;
4605
4606 scnprintf(msix->name, sizeof(msix->name),
4607 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
4608 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
4609 if (ret) {
4610 ql_log(ql_log_fatal, vha, 0x00e6,
4611 "MSI-X: Unable to register handler -- %x/%d.\n",
4612 msix->vector, ret);
4613 return ret;
4614 }
4615 msix->have_irq = 1;
4616 msix->handle = qpair;
4617 return ret;
4618 }