0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <scsi/iscsi_proto.h>
0011
0012 #include "be_main.h"
0013 #include "be.h"
0014 #include "be_mgmt.h"
0015
0016
0017 static const char * const desc_ue_status_low[] = {
0018 "CEV",
0019 "CTX",
0020 "DBUF",
0021 "ERX",
0022 "Host",
0023 "MPU",
0024 "NDMA",
0025 "PTC ",
0026 "RDMA ",
0027 "RXF ",
0028 "RXIPS ",
0029 "RXULP0 ",
0030 "RXULP1 ",
0031 "RXULP2 ",
0032 "TIM ",
0033 "TPOST ",
0034 "TPRE ",
0035 "TXIPS ",
0036 "TXULP0 ",
0037 "TXULP1 ",
0038 "UC ",
0039 "WDMA ",
0040 "TXULP2 ",
0041 "HOST1 ",
0042 "P0_OB_LINK ",
0043 "P1_OB_LINK ",
0044 "HOST_GPIO ",
0045 "MBOX ",
0046 "AXGMAC0",
0047 "AXGMAC1",
0048 "JTAG",
0049 "MPU_INTPEND"
0050 };
0051
0052
0053 static const char * const desc_ue_status_hi[] = {
0054 "LPCMEMHOST",
0055 "MGMT_MAC",
0056 "PCS0ONLINE",
0057 "MPU_IRAM",
0058 "PCS1ONLINE",
0059 "PCTL0",
0060 "PCTL1",
0061 "PMEM",
0062 "RR",
0063 "TXPB",
0064 "RXPP",
0065 "XAUI",
0066 "TXP",
0067 "ARM",
0068 "IPC",
0069 "HOST2",
0070 "HOST3",
0071 "HOST4",
0072 "HOST5",
0073 "HOST6",
0074 "HOST7",
0075 "HOST8",
0076 "HOST9",
0077 "NETC",
0078 "Unknown",
0079 "Unknown",
0080 "Unknown",
0081 "Unknown",
0082 "Unknown",
0083 "Unknown",
0084 "Unknown",
0085 "Unknown"
0086 };
0087
0088 struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
0089 unsigned int *ref_tag)
0090 {
0091 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
0092 struct be_mcc_wrb *wrb = NULL;
0093 unsigned int tag;
0094
0095 spin_lock(&phba->ctrl.mcc_lock);
0096 if (mccq->used == mccq->len) {
0097 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
0098 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
0099 "BC_%d : MCC queue full: WRB used %u tag avail %u\n",
0100 mccq->used, phba->ctrl.mcc_tag_available);
0101 goto alloc_failed;
0102 }
0103
0104 if (!phba->ctrl.mcc_tag_available)
0105 goto alloc_failed;
0106
0107 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
0108 if (!tag) {
0109 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
0110 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
0111 "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n",
0112 phba->ctrl.mcc_tag_available,
0113 phba->ctrl.mcc_alloc_index);
0114 goto alloc_failed;
0115 }
0116
0117
0118 *ref_tag = tag;
0119 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
0120 phba->ctrl.mcc_tag_status[tag] = 0;
0121 phba->ctrl.ptag_state[tag].tag_state = 0;
0122 phba->ctrl.ptag_state[tag].cbfn = NULL;
0123 phba->ctrl.mcc_tag_available--;
0124 if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
0125 phba->ctrl.mcc_alloc_index = 0;
0126 else
0127 phba->ctrl.mcc_alloc_index++;
0128
0129 wrb = queue_head_node(mccq);
0130 memset(wrb, 0, sizeof(*wrb));
0131 wrb->tag0 = tag;
0132 wrb->tag0 |= (mccq->head << MCC_Q_WRB_IDX_SHIFT) & MCC_Q_WRB_IDX_MASK;
0133 queue_head_inc(mccq);
0134 mccq->used++;
0135
0136 alloc_failed:
0137 spin_unlock(&phba->ctrl.mcc_lock);
0138 return wrb;
0139 }
0140
0141 void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
0142 {
0143 struct be_queue_info *mccq = &ctrl->mcc_obj.q;
0144
0145 spin_lock(&ctrl->mcc_lock);
0146 tag = tag & MCC_Q_CMD_TAG_MASK;
0147 ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
0148 if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
0149 ctrl->mcc_free_index = 0;
0150 else
0151 ctrl->mcc_free_index++;
0152 ctrl->mcc_tag_available++;
0153 mccq->used--;
0154 spin_unlock(&ctrl->mcc_lock);
0155 }
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168 int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
0169 unsigned int tag,
0170 struct be_mcc_wrb **wrb,
0171 struct be_dma_mem *mbx_cmd_mem)
0172 {
0173 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
0174 uint16_t status = 0, addl_status = 0, wrb_num = 0;
0175 struct be_cmd_resp_hdr *mbx_resp_hdr;
0176 struct be_cmd_req_hdr *mbx_hdr;
0177 struct be_mcc_wrb *temp_wrb;
0178 uint32_t mcc_tag_status;
0179 int rc = 0;
0180
0181 mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
0182 status = (mcc_tag_status & CQE_STATUS_MASK);
0183 addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
0184 CQE_STATUS_ADDL_SHIFT);
0185
0186 if (mbx_cmd_mem) {
0187 mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
0188 } else {
0189 wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
0190 CQE_STATUS_WRB_SHIFT;
0191 temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
0192 mbx_hdr = embedded_payload(temp_wrb);
0193
0194 if (wrb)
0195 *wrb = temp_wrb;
0196 }
0197
0198 if (status || addl_status) {
0199 beiscsi_log(phba, KERN_WARNING,
0200 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
0201 BEISCSI_LOG_CONFIG,
0202 "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n",
0203 mbx_hdr->subsystem, mbx_hdr->opcode,
0204 status, addl_status);
0205 rc = -EIO;
0206 if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
0207 mbx_resp_hdr = (struct be_cmd_resp_hdr *)mbx_hdr;
0208 beiscsi_log(phba, KERN_WARNING,
0209 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
0210 BEISCSI_LOG_CONFIG,
0211 "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n",
0212 mbx_resp_hdr->response_length,
0213 mbx_resp_hdr->actual_resp_len);
0214 rc = -EAGAIN;
0215 }
0216 }
0217
0218 return rc;
0219 }
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234 int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
0235 unsigned int tag,
0236 struct be_mcc_wrb **wrb,
0237 struct be_dma_mem *mbx_cmd_mem)
0238 {
0239 int rc = 0;
0240
0241 if (!tag || tag > MAX_MCC_CMD) {
0242 __beiscsi_log(phba, KERN_ERR,
0243 "BC_%d : invalid tag %u\n", tag);
0244 return -EINVAL;
0245 }
0246
0247 if (beiscsi_hba_in_error(phba)) {
0248 clear_bit(MCC_TAG_STATE_RUNNING,
0249 &phba->ctrl.ptag_state[tag].tag_state);
0250 return -EIO;
0251 }
0252
0253
0254 rc = wait_event_interruptible_timeout(phba->ctrl.mcc_wait[tag],
0255 phba->ctrl.mcc_tag_status[tag],
0256 msecs_to_jiffies(
0257 BEISCSI_HOST_MBX_TIMEOUT));
0258
0259
0260
0261
0262
0263 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
0264 clear_bit(MCC_TAG_STATE_RUNNING,
0265 &phba->ctrl.ptag_state[tag].tag_state);
0266 return -EIO;
0267 }
0268
0269
0270
0271
0272
0273 if (rc <= 0) {
0274 struct be_dma_mem *tag_mem;
0275
0276
0277
0278
0279
0280
0281
0282 tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
0283 if (mbx_cmd_mem) {
0284 tag_mem->size = mbx_cmd_mem->size;
0285 tag_mem->va = mbx_cmd_mem->va;
0286 tag_mem->dma = mbx_cmd_mem->dma;
0287 } else
0288 tag_mem->size = 0;
0289
0290
0291 wmb();
0292 set_bit(MCC_TAG_STATE_TIMEOUT,
0293 &phba->ctrl.ptag_state[tag].tag_state);
0294
0295 beiscsi_log(phba, KERN_ERR,
0296 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
0297 BEISCSI_LOG_CONFIG,
0298 "BC_%d : MBX Cmd Completion timed out\n");
0299 return -EBUSY;
0300 }
0301
0302 rc = __beiscsi_mcc_compl_status(phba, tag, wrb, mbx_cmd_mem);
0303
0304 free_mcc_wrb(&phba->ctrl, tag);
0305 return rc;
0306 }
0307
0308
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319 static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
0320 struct be_mcc_compl *compl)
0321 {
0322 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
0323 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
0324 struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
0325 u16 compl_status, extd_status;
0326
0327
0328
0329
0330
0331
0332 if (!compl->flags) {
0333 beiscsi_log(phba, KERN_ERR,
0334 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
0335 "BC_%d : BMBX busy, no completion\n");
0336 return -EBUSY;
0337 }
0338 compl->flags = le32_to_cpu(compl->flags);
0339 WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
0340
0341
0342
0343
0344
0345 be_dws_le_to_cpu(compl, 4);
0346 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
0347 CQE_STATUS_COMPL_MASK;
0348 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
0349 CQE_STATUS_EXTD_MASK;
0350
0351 compl->flags = 0;
0352
0353 if (compl_status == MCC_STATUS_SUCCESS)
0354 return 0;
0355
0356 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
0357 "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
0358 hdr->subsystem, hdr->opcode, compl_status, extd_status);
0359 return compl_status;
0360 }
0361
0362 static void beiscsi_process_async_link(struct beiscsi_hba *phba,
0363 struct be_mcc_compl *compl)
0364 {
0365 struct be_async_event_link_state *evt;
0366
0367 evt = (struct be_async_event_link_state *)compl;
0368
0369 phba->port_speed = evt->port_speed;
0370
0371
0372
0373
0374 if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
0375 set_bit(BEISCSI_HBA_LINK_UP, &phba->state);
0376 if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
0377 beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
0378 __beiscsi_log(phba, KERN_ERR,
0379 "BC_%d : Link Up on Port %d tag 0x%x\n",
0380 evt->physical_port, evt->event_tag);
0381 } else {
0382 clear_bit(BEISCSI_HBA_LINK_UP, &phba->state);
0383 __beiscsi_log(phba, KERN_ERR,
0384 "BC_%d : Link Down on Port %d tag 0x%x\n",
0385 evt->physical_port, evt->event_tag);
0386 iscsi_host_for_each_session(phba->shost,
0387 beiscsi_session_fail);
0388 }
0389 }
0390
0391 static char *beiscsi_port_misconf_event_msg[] = {
0392 "Physical Link is functional.",
0393 "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.",
0394 "Optics of two types installed - Remove one optic or install matching pair of optics.",
0395 "Incompatible optics - Replace with compatible optics for card to function.",
0396 "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.",
0397 "Uncertified optics - Replace with Avago Certified optics to enable link operation."
0398 };
0399
0400 static void beiscsi_process_async_sli(struct beiscsi_hba *phba,
0401 struct be_mcc_compl *compl)
0402 {
0403 struct be_async_event_sli *async_sli;
0404 u8 evt_type, state, old_state, le;
0405 char *sev = KERN_WARNING;
0406 char *msg = NULL;
0407
0408 evt_type = compl->flags >> ASYNC_TRAILER_EVENT_TYPE_SHIFT;
0409 evt_type &= ASYNC_TRAILER_EVENT_TYPE_MASK;
0410
0411
0412 if (evt_type != ASYNC_SLI_EVENT_TYPE_MISCONFIGURED)
0413 return;
0414
0415 async_sli = (struct be_async_event_sli *)compl;
0416 state = async_sli->event_data1 >>
0417 (phba->fw_config.phys_port * 8) & 0xff;
0418 le = async_sli->event_data2 >>
0419 (phba->fw_config.phys_port * 8) & 0xff;
0420
0421 old_state = phba->optic_state;
0422 phba->optic_state = state;
0423
0424 if (state >= ARRAY_SIZE(beiscsi_port_misconf_event_msg)) {
0425
0426 __beiscsi_log(phba, KERN_ERR,
0427 "BC_%d : Port %c: Unrecognized optic state 0x%x\n",
0428 phba->port_name, async_sli->event_data1);
0429 return;
0430 }
0431
0432 if (ASYNC_SLI_LINK_EFFECT_VALID(le)) {
0433
0434 if (state > 3)
0435 msg = (ASYNC_SLI_LINK_EFFECT_STATE(le)) ?
0436 " Link is non-operational." :
0437 " Link is operational.";
0438
0439 if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 1)
0440 sev = KERN_INFO;
0441
0442 if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 2)
0443 sev = KERN_ERR;
0444 }
0445
0446 if (old_state != phba->optic_state)
0447 __beiscsi_log(phba, sev, "BC_%d : Port %c: %s%s\n",
0448 phba->port_name,
0449 beiscsi_port_misconf_event_msg[state],
0450 !msg ? "" : msg);
0451 }
0452
0453 void beiscsi_process_async_event(struct beiscsi_hba *phba,
0454 struct be_mcc_compl *compl)
0455 {
0456 char *sev = KERN_INFO;
0457 u8 evt_code;
0458
0459
0460 evt_code = compl->flags >> ASYNC_TRAILER_EVENT_CODE_SHIFT;
0461 evt_code &= ASYNC_TRAILER_EVENT_CODE_MASK;
0462 switch (evt_code) {
0463 case ASYNC_EVENT_CODE_LINK_STATE:
0464 beiscsi_process_async_link(phba, compl);
0465 break;
0466 case ASYNC_EVENT_CODE_ISCSI:
0467 if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
0468 beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
0469 sev = KERN_ERR;
0470 break;
0471 case ASYNC_EVENT_CODE_SLI:
0472 beiscsi_process_async_sli(phba, compl);
0473 break;
0474 default:
0475
0476 sev = KERN_ERR;
0477 }
0478
0479 beiscsi_log(phba, sev, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
0480 "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n",
0481 evt_code, compl->status, compl->flags);
0482 }
0483
0484 int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
0485 struct be_mcc_compl *compl)
0486 {
0487 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
0488 u16 compl_status, extd_status;
0489 struct be_dma_mem *tag_mem;
0490 unsigned int tag, wrb_idx;
0491
0492 be_dws_le_to_cpu(compl, 4);
0493 tag = (compl->tag0 & MCC_Q_CMD_TAG_MASK);
0494 wrb_idx = (compl->tag0 & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT;
0495
0496 if (!test_bit(MCC_TAG_STATE_RUNNING,
0497 &ctrl->ptag_state[tag].tag_state)) {
0498 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX |
0499 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
0500 "BC_%d : MBX cmd completed but not posted\n");
0501 return 0;
0502 }
0503
0504
0505 clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
0506
0507 if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
0508 beiscsi_log(phba, KERN_WARNING,
0509 BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
0510 BEISCSI_LOG_CONFIG,
0511 "BC_%d : MBX Completion for timeout Command from FW\n");
0512
0513
0514
0515
0516 tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
0517 if (tag_mem->size) {
0518 dma_free_coherent(&ctrl->pdev->dev, tag_mem->size,
0519 tag_mem->va, tag_mem->dma);
0520 tag_mem->size = 0;
0521 }
0522 free_mcc_wrb(ctrl, tag);
0523 return 0;
0524 }
0525
0526 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
0527 CQE_STATUS_COMPL_MASK;
0528 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
0529 CQE_STATUS_EXTD_MASK;
0530
0531
0532
0533
0534 ctrl->mcc_tag_status[tag] = CQE_VALID_MASK;
0535 ctrl->mcc_tag_status[tag] |= (wrb_idx << CQE_STATUS_WRB_SHIFT);
0536 ctrl->mcc_tag_status[tag] |= (extd_status << CQE_STATUS_ADDL_SHIFT) &
0537 CQE_STATUS_ADDL_MASK;
0538 ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
0539
0540 if (test_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state)) {
0541 if (ctrl->ptag_state[tag].cbfn)
0542 ctrl->ptag_state[tag].cbfn(phba, tag);
0543 else
0544 __beiscsi_log(phba, KERN_ERR,
0545 "BC_%d : MBX ASYNC command with no callback\n");
0546 free_mcc_wrb(ctrl, tag);
0547 return 0;
0548 }
0549
0550 if (test_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state)) {
0551
0552 __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
0553 free_mcc_wrb(ctrl, tag);
0554 return 0;
0555 }
0556
0557 wake_up_interruptible(&ctrl->mcc_wait[tag]);
0558 return 0;
0559 }
0560
0561 void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
0562 {
0563 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
0564 u32 val = 0;
0565
0566 set_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state);
0567 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
0568 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
0569
0570 wmb();
0571 iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
0572 }
0573
0574
0575
0576
0577
0578
0579
0580
0581
0582
0583
0584
0585 static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
0586 {
0587
0588 #define BEISCSI_MBX_RDY_BIT_TIMEOUT 30000
0589 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
0590 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
0591 unsigned long timeout;
0592 u32 ready;
0593
0594
0595
0596
0597
0598 timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
0599 do {
0600 if (beiscsi_hba_in_error(phba))
0601 return -EIO;
0602
0603 ready = ioread32(db);
0604 if (ready == 0xffffffff)
0605 return -EIO;
0606
0607 ready &= MPU_MAILBOX_DB_RDY_MASK;
0608 if (ready)
0609 return 0;
0610
0611 if (time_after(jiffies, timeout))
0612 break;
0613
0614 schedule_timeout_uninterruptible(msecs_to_jiffies(1));
0615 } while (!ready);
0616
0617 beiscsi_log(phba, KERN_ERR,
0618 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
0619 "BC_%d : FW Timed Out\n");
0620 set_bit(BEISCSI_HBA_FW_TIMEOUT, &phba->state);
0621 return -EBUSY;
0622 }
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633
0634
0635 static int be_mbox_notify(struct be_ctrl_info *ctrl)
0636 {
0637 int status;
0638 u32 val = 0;
0639 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
0640 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
0641 struct be_mcc_mailbox *mbox = mbox_mem->va;
0642
0643 status = be_mbox_db_ready_poll(ctrl);
0644 if (status)
0645 return status;
0646
0647 val &= ~MPU_MAILBOX_DB_RDY_MASK;
0648 val |= MPU_MAILBOX_DB_HI_MASK;
0649 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
0650 iowrite32(val, db);
0651
0652 status = be_mbox_db_ready_poll(ctrl);
0653 if (status)
0654 return status;
0655
0656 val = 0;
0657 val &= ~MPU_MAILBOX_DB_RDY_MASK;
0658 val &= ~MPU_MAILBOX_DB_HI_MASK;
0659 val |= (u32) (mbox_mem->dma >> 4) << 2;
0660 iowrite32(val, db);
0661
0662 status = be_mbox_db_ready_poll(ctrl);
0663 if (status)
0664 return status;
0665
0666
0667 udelay(1);
0668
0669 status = beiscsi_process_mbox_compl(ctrl, &mbox->compl);
0670 return status;
0671 }
0672
0673 void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, u32 payload_len,
0674 bool embedded, u8 sge_cnt)
0675 {
0676 if (embedded)
0677 wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK;
0678 else
0679 wrb->emb_sgecnt_special |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
0680 MCC_WRB_SGE_CNT_SHIFT;
0681 wrb->payload_length = payload_len;
0682 be_dws_cpu_to_le(wrb, 8);
0683 }
0684
0685 void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
0686 u8 subsystem, u8 opcode, u32 cmd_len)
0687 {
0688 req_hdr->opcode = opcode;
0689 req_hdr->subsystem = subsystem;
0690 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
0691 req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
0692 }
0693
0694 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
0695 struct be_dma_mem *mem)
0696 {
0697 int i, buf_pages;
0698 u64 dma = (u64) mem->dma;
0699
0700 buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
0701 for (i = 0; i < buf_pages; i++) {
0702 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
0703 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
0704 dma += PAGE_SIZE_4K;
0705 }
0706 }
0707
0708 static u32 eq_delay_to_mult(u32 usec_delay)
0709 {
0710 #define MAX_INTR_RATE 651042
0711 const u32 round = 10;
0712 u32 multiplier;
0713
0714 if (usec_delay == 0)
0715 multiplier = 0;
0716 else {
0717 u32 interrupt_rate = 1000000 / usec_delay;
0718 if (interrupt_rate == 0)
0719 multiplier = 1023;
0720 else {
0721 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
0722 multiplier /= interrupt_rate;
0723 multiplier = (multiplier + round / 2) / round;
0724 multiplier = min(multiplier, (u32) 1023);
0725 }
0726 }
0727 return multiplier;
0728 }
0729
0730 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
0731 {
0732 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
0733 }
0734
0735 int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
0736 struct be_queue_info *eq, int eq_delay)
0737 {
0738 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
0739 struct be_cmd_req_eq_create *req = embedded_payload(wrb);
0740 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
0741 struct be_dma_mem *q_mem = &eq->dma_mem;
0742 int status;
0743
0744 mutex_lock(&ctrl->mbox_lock);
0745 memset(wrb, 0, sizeof(*wrb));
0746
0747 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
0748
0749 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
0750 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
0751
0752 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
0753
0754 AMAP_SET_BITS(struct amap_eq_context, func, req->context,
0755 PCI_FUNC(ctrl->pdev->devfn));
0756 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
0757 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
0758 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
0759 __ilog2_u32(eq->len / 256));
0760 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
0761 eq_delay_to_mult(eq_delay));
0762 be_dws_cpu_to_le(req->context, sizeof(req->context));
0763
0764 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
0765
0766 status = be_mbox_notify(ctrl);
0767 if (!status) {
0768 eq->id = le16_to_cpu(resp->eq_id);
0769 eq->created = true;
0770 }
0771 mutex_unlock(&ctrl->mbox_lock);
0772 return status;
0773 }
0774
0775 int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
0776 struct be_queue_info *cq, struct be_queue_info *eq,
0777 bool sol_evts, bool no_delay, int coalesce_wm)
0778 {
0779 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
0780 struct be_cmd_req_cq_create *req = embedded_payload(wrb);
0781 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
0782 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
0783 struct be_dma_mem *q_mem = &cq->dma_mem;
0784 void *ctxt = &req->context;
0785 int status;
0786
0787 mutex_lock(&ctrl->mbox_lock);
0788 memset(wrb, 0, sizeof(*wrb));
0789
0790 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
0791
0792 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
0793 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
0794
0795 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
0796 if (is_chip_be2_be3r(phba)) {
0797 AMAP_SET_BITS(struct amap_cq_context, coalescwm,
0798 ctxt, coalesce_wm);
0799 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
0800 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
0801 __ilog2_u32(cq->len / 256));
0802 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
0803 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
0804 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
0805 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
0806 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
0807 AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
0808 PCI_FUNC(ctrl->pdev->devfn));
0809 } else {
0810 req->hdr.version = MBX_CMD_VER2;
0811 req->page_size = 1;
0812 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
0813 ctxt, coalesce_wm);
0814 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
0815 ctxt, no_delay);
0816 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
0817 __ilog2_u32(cq->len / 256));
0818 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
0819 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
0820 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
0821 AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
0822 }
0823
0824 be_dws_cpu_to_le(ctxt, sizeof(req->context));
0825
0826 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
0827
0828 status = be_mbox_notify(ctrl);
0829 if (!status) {
0830 cq->id = le16_to_cpu(resp->cq_id);
0831 cq->created = true;
0832 } else
0833 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
0834 "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
0835 status);
0836
0837 mutex_unlock(&ctrl->mbox_lock);
0838
0839 return status;
0840 }
0841
0842 static u32 be_encoded_q_len(int q_len)
0843 {
0844 u32 len_encoded = fls(q_len);
0845 if (len_encoded == 16)
0846 len_encoded = 0;
0847 return len_encoded;
0848 }
0849
0850 int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
0851 struct be_queue_info *mccq,
0852 struct be_queue_info *cq)
0853 {
0854 struct be_mcc_wrb *wrb;
0855 struct be_cmd_req_mcc_create_ext *req;
0856 struct be_dma_mem *q_mem = &mccq->dma_mem;
0857 struct be_ctrl_info *ctrl;
0858 void *ctxt;
0859 int status;
0860
0861 mutex_lock(&phba->ctrl.mbox_lock);
0862 ctrl = &phba->ctrl;
0863 wrb = wrb_from_mbox(&ctrl->mbox_mem);
0864 memset(wrb, 0, sizeof(*wrb));
0865 req = embedded_payload(wrb);
0866 ctxt = &req->context;
0867
0868 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
0869
0870 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
0871 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
0872
0873 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
0874 req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE;
0875 req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI;
0876 req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI;
0877
0878 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
0879 PCI_FUNC(phba->pcidev->devfn));
0880 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
0881 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
0882 be_encoded_q_len(mccq->len));
0883 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
0884
0885 be_dws_cpu_to_le(ctxt, sizeof(req->context));
0886
0887 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
0888
0889 status = be_mbox_notify(ctrl);
0890 if (!status) {
0891 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
0892 mccq->id = le16_to_cpu(resp->id);
0893 mccq->created = true;
0894 }
0895 mutex_unlock(&phba->ctrl.mbox_lock);
0896
0897 return status;
0898 }
0899
0900 int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
0901 int queue_type)
0902 {
0903 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
0904 struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
0905 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
0906 u8 subsys = 0, opcode = 0;
0907 int status;
0908
0909 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
0910 "BC_%d : In beiscsi_cmd_q_destroy "
0911 "queue_type : %d\n", queue_type);
0912
0913 mutex_lock(&ctrl->mbox_lock);
0914 memset(wrb, 0, sizeof(*wrb));
0915 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
0916
0917 switch (queue_type) {
0918 case QTYPE_EQ:
0919 subsys = CMD_SUBSYSTEM_COMMON;
0920 opcode = OPCODE_COMMON_EQ_DESTROY;
0921 break;
0922 case QTYPE_CQ:
0923 subsys = CMD_SUBSYSTEM_COMMON;
0924 opcode = OPCODE_COMMON_CQ_DESTROY;
0925 break;
0926 case QTYPE_MCCQ:
0927 subsys = CMD_SUBSYSTEM_COMMON;
0928 opcode = OPCODE_COMMON_MCC_DESTROY;
0929 break;
0930 case QTYPE_WRBQ:
0931 subsys = CMD_SUBSYSTEM_ISCSI;
0932 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
0933 break;
0934 case QTYPE_DPDUQ:
0935 subsys = CMD_SUBSYSTEM_ISCSI;
0936 opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
0937 break;
0938 case QTYPE_SGL:
0939 subsys = CMD_SUBSYSTEM_ISCSI;
0940 opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
0941 break;
0942 default:
0943 mutex_unlock(&ctrl->mbox_lock);
0944 BUG();
0945 }
0946 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
0947 if (queue_type != QTYPE_SGL)
0948 req->id = cpu_to_le16(q->id);
0949
0950 status = be_mbox_notify(ctrl);
0951
0952 mutex_unlock(&ctrl->mbox_lock);
0953 return status;
0954 }
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973
0974 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
0975 struct be_queue_info *cq,
0976 struct be_queue_info *dq, int length,
0977 int entry_size, uint8_t is_header,
0978 uint8_t ulp_num)
0979 {
0980 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
0981 struct be_defq_create_req *req = embedded_payload(wrb);
0982 struct be_dma_mem *q_mem = &dq->dma_mem;
0983 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
0984 void *ctxt = &req->context;
0985 int status;
0986
0987 mutex_lock(&ctrl->mbox_lock);
0988 memset(wrb, 0, sizeof(*wrb));
0989
0990 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
0991
0992 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
0993 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
0994
0995 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
0996 if (phba->fw_config.dual_ulp_aware) {
0997 req->ulp_num = ulp_num;
0998 req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
0999 req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1000 }
1001
1002 if (is_chip_be2_be3r(phba)) {
1003 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1004 rx_pdid, ctxt, 0);
1005 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1006 rx_pdid_valid, ctxt, 1);
1007 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1008 pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
1009 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1010 ring_size, ctxt,
1011 be_encoded_q_len(length /
1012 sizeof(struct phys_addr)));
1013 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1014 default_buffer_size, ctxt, entry_size);
1015 AMAP_SET_BITS(struct amap_be_default_pdu_context,
1016 cq_id_recv, ctxt, cq->id);
1017 } else {
1018 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1019 rx_pdid, ctxt, 0);
1020 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1021 rx_pdid_valid, ctxt, 1);
1022 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1023 ring_size, ctxt,
1024 be_encoded_q_len(length /
1025 sizeof(struct phys_addr)));
1026 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1027 default_buffer_size, ctxt, entry_size);
1028 AMAP_SET_BITS(struct amap_default_pdu_context_ext,
1029 cq_id_recv, ctxt, cq->id);
1030 }
1031
1032 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1033
1034 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1035
1036 status = be_mbox_notify(ctrl);
1037 if (!status) {
1038 struct be_ring *defq_ring;
1039 struct be_defq_create_resp *resp = embedded_payload(wrb);
1040
1041 dq->id = le16_to_cpu(resp->id);
1042 dq->created = true;
1043 if (is_header)
1044 defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
1045 else
1046 defq_ring = &phba->phwi_ctrlr->
1047 default_pdu_data[ulp_num];
1048
1049 defq_ring->id = dq->id;
1050
1051 if (!phba->fw_config.dual_ulp_aware) {
1052 defq_ring->ulp_num = BEISCSI_ULP0;
1053 defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
1054 } else {
1055 defq_ring->ulp_num = resp->ulp_num;
1056 defq_ring->doorbell_offset = resp->doorbell_offset;
1057 }
1058 }
1059 mutex_unlock(&ctrl->mbox_lock);
1060
1061 return status;
1062 }
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075 int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
1076 struct be_dma_mem *q_mem,
1077 struct be_queue_info *wrbq,
1078 struct hwi_wrb_context *pwrb_context,
1079 uint8_t ulp_num)
1080 {
1081 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1082 struct be_wrbq_create_req *req = embedded_payload(wrb);
1083 struct be_wrbq_create_resp *resp = embedded_payload(wrb);
1084 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
1085 int status;
1086
1087 mutex_lock(&ctrl->mbox_lock);
1088 memset(wrb, 0, sizeof(*wrb));
1089
1090 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1091
1092 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1093 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
1094 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1095
1096 if (phba->fw_config.dual_ulp_aware) {
1097 req->ulp_num = ulp_num;
1098 req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
1099 req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
1100 }
1101
1102 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1103
1104 status = be_mbox_notify(ctrl);
1105 if (!status) {
1106 wrbq->id = le16_to_cpu(resp->cid);
1107 wrbq->created = true;
1108
1109 pwrb_context->cid = wrbq->id;
1110 if (!phba->fw_config.dual_ulp_aware) {
1111 pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
1112 pwrb_context->ulp_num = BEISCSI_ULP0;
1113 } else {
1114 pwrb_context->ulp_num = resp->ulp_num;
1115 pwrb_context->doorbell_offset = resp->doorbell_offset;
1116 }
1117 }
1118 mutex_unlock(&ctrl->mbox_lock);
1119 return status;
1120 }
1121
1122 int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
1123 struct be_dma_mem *q_mem)
1124 {
1125 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1126 struct be_post_template_pages_req *req = embedded_payload(wrb);
1127 int status;
1128
1129 mutex_lock(&ctrl->mbox_lock);
1130
1131 memset(wrb, 0, sizeof(*wrb));
1132 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1133 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1134 OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
1135 sizeof(*req));
1136
1137 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1138 req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1139 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1140
1141 status = be_mbox_notify(ctrl);
1142 mutex_unlock(&ctrl->mbox_lock);
1143 return status;
1144 }
1145
1146 int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
1147 {
1148 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1149 struct be_remove_template_pages_req *req = embedded_payload(wrb);
1150 int status;
1151
1152 mutex_lock(&ctrl->mbox_lock);
1153
1154 memset(wrb, 0, sizeof(*wrb));
1155 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1156 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1157 OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
1158 sizeof(*req));
1159
1160 req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
1161
1162 status = be_mbox_notify(ctrl);
1163 mutex_unlock(&ctrl->mbox_lock);
1164 return status;
1165 }
1166
1167 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
1168 struct be_dma_mem *q_mem,
1169 u32 page_offset, u32 num_pages)
1170 {
1171 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1172 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
1173 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
1174 int status;
1175 unsigned int curr_pages;
1176 u32 internal_page_offset = 0;
1177 u32 temp_num_pages = num_pages;
1178
1179 if (num_pages == 0xff)
1180 num_pages = 1;
1181
1182 mutex_lock(&ctrl->mbox_lock);
1183 do {
1184 memset(wrb, 0, sizeof(*wrb));
1185 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1186 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1187 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
1188 sizeof(*req));
1189 curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
1190 pages);
1191 req->num_pages = min(num_pages, curr_pages);
1192 req->page_offset = page_offset;
1193 be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
1194 q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
1195 internal_page_offset += req->num_pages;
1196 page_offset += req->num_pages;
1197 num_pages -= req->num_pages;
1198
1199 if (temp_num_pages == 0xff)
1200 req->num_pages = temp_num_pages;
1201
1202 status = be_mbox_notify(ctrl);
1203 if (status) {
1204 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1205 "BC_%d : FW CMD to map iscsi frags failed.\n");
1206
1207 goto error;
1208 }
1209 } while (num_pages > 0);
1210 error:
1211 mutex_unlock(&ctrl->mbox_lock);
1212 if (status != 0)
1213 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
1214 return status;
1215 }
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227 int be_cmd_set_vlan(struct beiscsi_hba *phba,
1228 uint16_t vlan_tag)
1229 {
1230 unsigned int tag;
1231 struct be_mcc_wrb *wrb;
1232 struct be_cmd_set_vlan_req *req;
1233 struct be_ctrl_info *ctrl = &phba->ctrl;
1234
1235 if (mutex_lock_interruptible(&ctrl->mbox_lock))
1236 return 0;
1237 wrb = alloc_mcc_wrb(phba, &tag);
1238 if (!wrb) {
1239 mutex_unlock(&ctrl->mbox_lock);
1240 return 0;
1241 }
1242
1243 req = embedded_payload(wrb);
1244 be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
1245 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1246 OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
1247 sizeof(*req));
1248
1249 req->interface_hndl = phba->interface_handle;
1250 req->vlan_priority = vlan_tag;
1251
1252 be_mcc_notify(phba, tag);
1253 mutex_unlock(&ctrl->mbox_lock);
1254
1255 return tag;
1256 }
1257
1258 int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
1259 struct beiscsi_hba *phba)
1260 {
1261 struct be_dma_mem nonemb_cmd;
1262 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1263 struct be_mgmt_controller_attributes *req;
1264 struct be_sge *sge = nonembedded_sgl(wrb);
1265 int status = 0;
1266
1267 nonemb_cmd.va = dma_alloc_coherent(&ctrl->pdev->dev,
1268 sizeof(struct be_mgmt_controller_attributes),
1269 &nonemb_cmd.dma, GFP_KERNEL);
1270 if (nonemb_cmd.va == NULL) {
1271 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1272 "BG_%d : dma_alloc_coherent failed in %s\n",
1273 __func__);
1274 return -ENOMEM;
1275 }
1276 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
1277 req = nonemb_cmd.va;
1278 memset(req, 0, sizeof(*req));
1279 mutex_lock(&ctrl->mbox_lock);
1280 memset(wrb, 0, sizeof(*wrb));
1281 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
1282 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1283 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
1284 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
1285 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
1286 sge->len = cpu_to_le32(nonemb_cmd.size);
1287 status = be_mbox_notify(ctrl);
1288 if (!status) {
1289 struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
1290
1291 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1292 "BG_%d : Firmware Version of CMD : %s\n"
1293 "Firmware Version is : %s\n"
1294 "Developer Build, not performing version check...\n",
1295 resp->params.hba_attribs
1296 .flashrom_version_string,
1297 resp->params.hba_attribs.
1298 firmware_version_string);
1299
1300 phba->fw_config.iscsi_features =
1301 resp->params.hba_attribs.iscsi_features;
1302 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1303 "BM_%d : phba->fw_config.iscsi_features = %d\n",
1304 phba->fw_config.iscsi_features);
1305 memcpy(phba->fw_ver_str, resp->params.hba_attribs.
1306 firmware_version_string, BEISCSI_VER_STRLEN);
1307 } else
1308 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1309 "BG_%d : Failed in beiscsi_check_supported_fw\n");
1310 mutex_unlock(&ctrl->mbox_lock);
1311 if (nonemb_cmd.va)
1312 dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd.size,
1313 nonemb_cmd.va, nonemb_cmd.dma);
1314
1315 return status;
1316 }
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330 int beiscsi_get_fw_config(struct be_ctrl_info *ctrl,
1331 struct beiscsi_hba *phba)
1332 {
1333 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1334 struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
1335 uint32_t cid_count, icd_count;
1336 int status = -EINVAL;
1337 uint8_t ulp_num = 0;
1338
1339 mutex_lock(&ctrl->mbox_lock);
1340 memset(wrb, 0, sizeof(*wrb));
1341 be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
1342
1343 be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
1344 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
1345 EMBED_MBX_MAX_PAYLOAD_SIZE);
1346
1347 if (be_mbox_notify(ctrl)) {
1348 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1349 "BG_%d : Failed in beiscsi_get_fw_config\n");
1350 goto fail_init;
1351 }
1352
1353
1354 phba->fw_config.phys_port = pfw_cfg->phys_port;
1355 if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
1356 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1357 "BG_%d : invalid physical port id %d\n",
1358 phba->fw_config.phys_port);
1359 goto fail_init;
1360 }
1361
1362
1363 if (!is_chip_be2_be3r(phba)) {
1364 phba->fw_config.eqid_count = pfw_cfg->eqid_count;
1365 phba->fw_config.cqid_count = pfw_cfg->cqid_count;
1366 if (phba->fw_config.eqid_count == 0 ||
1367 phba->fw_config.eqid_count > 2048) {
1368 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1369 "BG_%d : invalid EQ count %d\n",
1370 phba->fw_config.eqid_count);
1371 goto fail_init;
1372 }
1373 if (phba->fw_config.cqid_count == 0 ||
1374 phba->fw_config.cqid_count > 4096) {
1375 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1376 "BG_%d : invalid CQ count %d\n",
1377 phba->fw_config.cqid_count);
1378 goto fail_init;
1379 }
1380 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1381 "BG_%d : EQ_Count : %d CQ_Count : %d\n",
1382 phba->fw_config.eqid_count,
1383 phba->fw_config.cqid_count);
1384 }
1385
1386
1387
1388
1389
1390
1391
1392 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
1393 if (pfw_cfg->ulp[ulp_num].ulp_mode &
1394 BEISCSI_ULP_ISCSI_INI_MODE) {
1395 set_bit(ulp_num, &phba->fw_config.ulp_supported);
1396
1397
1398 phba->fw_config.iscsi_cid_start[ulp_num] =
1399 pfw_cfg->ulp[ulp_num].sq_base;
1400 phba->fw_config.iscsi_cid_count[ulp_num] =
1401 pfw_cfg->ulp[ulp_num].sq_count;
1402
1403 phba->fw_config.iscsi_icd_start[ulp_num] =
1404 pfw_cfg->ulp[ulp_num].icd_base;
1405 phba->fw_config.iscsi_icd_count[ulp_num] =
1406 pfw_cfg->ulp[ulp_num].icd_count;
1407
1408 phba->fw_config.iscsi_chain_start[ulp_num] =
1409 pfw_cfg->chain_icd[ulp_num].chain_base;
1410 phba->fw_config.iscsi_chain_count[ulp_num] =
1411 pfw_cfg->chain_icd[ulp_num].chain_count;
1412
1413 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1414 "BG_%d : Function loaded on ULP : %d\n"
1415 "\tiscsi_cid_count : %d\n"
1416 "\tiscsi_cid_start : %d\n"
1417 "\t iscsi_icd_count : %d\n"
1418 "\t iscsi_icd_start : %d\n",
1419 ulp_num,
1420 phba->fw_config.
1421 iscsi_cid_count[ulp_num],
1422 phba->fw_config.
1423 iscsi_cid_start[ulp_num],
1424 phba->fw_config.
1425 iscsi_icd_count[ulp_num],
1426 phba->fw_config.
1427 iscsi_icd_start[ulp_num]);
1428 }
1429 }
1430
1431 if (phba->fw_config.ulp_supported == 0) {
1432 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1433 "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
1434 pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
1435 pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
1436 goto fail_init;
1437 }
1438
1439
1440
1441
1442 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
1443 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
1444 break;
1445 icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
1446 if (icd_count == 0 || icd_count > 65536) {
1447 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1448 "BG_%d: invalid ICD count %d\n", icd_count);
1449 goto fail_init;
1450 }
1451
1452 cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
1453 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
1454 if (cid_count == 0 || cid_count > 4096) {
1455 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1456 "BG_%d: invalid CID count %d\n", cid_count);
1457 goto fail_init;
1458 }
1459
1460
1461
1462
1463
1464 phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
1465 BEISCSI_FUNC_DUA_MODE);
1466
1467 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1468 "BG_%d : DUA Mode : 0x%x\n",
1469 phba->fw_config.dual_ulp_aware);
1470
1471
1472 status = 0;
1473 fail_init:
1474 mutex_unlock(&ctrl->mbox_lock);
1475 return status;
1476 }
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486 int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba)
1487 {
1488 int ret = 0;
1489 struct be_mcc_wrb *wrb;
1490 struct be_cmd_get_port_name *ioctl;
1491
1492 mutex_lock(&ctrl->mbox_lock);
1493 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1494 memset(wrb, 0, sizeof(*wrb));
1495 ioctl = embedded_payload(wrb);
1496
1497 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1498 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1499 OPCODE_COMMON_GET_PORT_NAME,
1500 EMBED_MBX_MAX_PAYLOAD_SIZE);
1501 ret = be_mbox_notify(ctrl);
1502 phba->port_name = 0;
1503 if (!ret) {
1504 phba->port_name = ioctl->p.resp.port_names >>
1505 (phba->fw_config.phys_port * 8) & 0xff;
1506 } else {
1507 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1508 "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
1509 ret, ioctl->h.resp_hdr.status);
1510 }
1511
1512 if (phba->port_name == 0)
1513 phba->port_name = '?';
1514
1515 mutex_unlock(&ctrl->mbox_lock);
1516 return ret;
1517 }
1518
1519 int beiscsi_set_host_data(struct beiscsi_hba *phba)
1520 {
1521 struct be_ctrl_info *ctrl = &phba->ctrl;
1522 struct be_cmd_set_host_data *ioctl;
1523 struct be_mcc_wrb *wrb;
1524 int ret = 0;
1525
1526 if (is_chip_be2_be3r(phba))
1527 return ret;
1528
1529 mutex_lock(&ctrl->mbox_lock);
1530 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1531 memset(wrb, 0, sizeof(*wrb));
1532 ioctl = embedded_payload(wrb);
1533
1534 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1535 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1536 OPCODE_COMMON_SET_HOST_DATA,
1537 EMBED_MBX_MAX_PAYLOAD_SIZE);
1538 ioctl->param.req.param_id = BE_CMD_SET_HOST_PARAM_ID;
1539 ioctl->param.req.param_len =
1540 snprintf((char *)ioctl->param.req.param_data,
1541 sizeof(ioctl->param.req.param_data),
1542 "Linux iSCSI v%s", BUILD_STR);
1543 ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len + 1, 4);
1544 if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION)
1545 ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION;
1546 ret = be_mbox_notify(ctrl);
1547 if (!ret) {
1548 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1549 "BG_%d : HBA set host driver version\n");
1550 } else {
1551
1552
1553
1554
1555 if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
1556 ret == MCC_STATUS_INVALID_LENGTH)
1557 __beiscsi_log(phba, KERN_INFO,
1558 "BG_%d : HBA failed to set host driver version\n");
1559 }
1560
1561 mutex_unlock(&ctrl->mbox_lock);
1562 return ret;
1563 }
1564
1565 int beiscsi_set_uer_feature(struct beiscsi_hba *phba)
1566 {
1567 struct be_ctrl_info *ctrl = &phba->ctrl;
1568 struct be_cmd_set_features *ioctl;
1569 struct be_mcc_wrb *wrb;
1570 int ret = 0;
1571
1572 mutex_lock(&ctrl->mbox_lock);
1573 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1574 memset(wrb, 0, sizeof(*wrb));
1575 ioctl = embedded_payload(wrb);
1576
1577 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1578 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1579 OPCODE_COMMON_SET_FEATURES,
1580 EMBED_MBX_MAX_PAYLOAD_SIZE);
1581 ioctl->feature = BE_CMD_SET_FEATURE_UER;
1582 ioctl->param_len = sizeof(ioctl->param.req);
1583 ioctl->param.req.uer = BE_CMD_UER_SUPP_BIT;
1584 ret = be_mbox_notify(ctrl);
1585 if (!ret) {
1586 phba->ue2rp = ioctl->param.resp.ue2rp;
1587 set_bit(BEISCSI_HBA_UER_SUPP, &phba->state);
1588 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1589 "BG_%d : HBA error recovery supported\n");
1590 } else {
1591
1592
1593
1594
1595 if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
1596 ret == MCC_STATUS_INVALID_LENGTH)
1597 __beiscsi_log(phba, KERN_INFO,
1598 "BG_%d : HBA error recovery not supported\n");
1599 }
1600
1601 mutex_unlock(&ctrl->mbox_lock);
1602 return ret;
1603 }
1604
1605 static u32 beiscsi_get_post_stage(struct beiscsi_hba *phba)
1606 {
1607 u32 sem;
1608
1609 if (is_chip_be2_be3r(phba))
1610 sem = ioread32(phba->csr_va + SLIPORT_SEMAPHORE_OFFSET_BEx);
1611 else
1612 pci_read_config_dword(phba->pcidev,
1613 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
1614 return sem;
1615 }
1616
1617 int beiscsi_check_fw_rdy(struct beiscsi_hba *phba)
1618 {
1619 u32 loop, post, rdy = 0;
1620
1621 loop = 1000;
1622 while (loop--) {
1623 post = beiscsi_get_post_stage(phba);
1624 if (post & POST_ERROR_BIT)
1625 break;
1626 if ((post & POST_STAGE_MASK) == POST_STAGE_ARMFW_RDY) {
1627 rdy = 1;
1628 break;
1629 }
1630 msleep(60);
1631 }
1632
1633 if (!rdy) {
1634 __beiscsi_log(phba, KERN_ERR,
1635 "BC_%d : FW not ready 0x%x\n", post);
1636 }
1637
1638 return rdy;
1639 }
1640
1641 int beiscsi_cmd_function_reset(struct beiscsi_hba *phba)
1642 {
1643 struct be_ctrl_info *ctrl = &phba->ctrl;
1644 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1645 struct be_post_sgl_pages_req *req;
1646 int status;
1647
1648 mutex_lock(&ctrl->mbox_lock);
1649
1650 req = embedded_payload(wrb);
1651 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1652 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1653 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1654 status = be_mbox_notify(ctrl);
1655
1656 mutex_unlock(&ctrl->mbox_lock);
1657 return status;
1658 }
1659
1660 int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load)
1661 {
1662 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1663 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
1664 u8 *endian_check;
1665 int status;
1666
1667 mutex_lock(&ctrl->mbox_lock);
1668 memset(wrb, 0, sizeof(*wrb));
1669
1670 endian_check = (u8 *) wrb;
1671 if (load) {
1672
1673 *endian_check++ = 0xFF;
1674 *endian_check++ = 0x12;
1675 *endian_check++ = 0x34;
1676 *endian_check++ = 0xFF;
1677 *endian_check++ = 0xFF;
1678 *endian_check++ = 0x56;
1679 *endian_check++ = 0x78;
1680 *endian_check++ = 0xFF;
1681 } else {
1682
1683 *endian_check++ = 0xFF;
1684 *endian_check++ = 0xAA;
1685 *endian_check++ = 0xBB;
1686 *endian_check++ = 0xFF;
1687 *endian_check++ = 0xFF;
1688 *endian_check++ = 0xCC;
1689 *endian_check++ = 0xDD;
1690 *endian_check = 0xFF;
1691 }
1692 be_dws_cpu_to_le(wrb, sizeof(*wrb));
1693
1694 status = be_mbox_notify(ctrl);
1695 if (status)
1696 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1697 "BC_%d : special WRB message failed\n");
1698 mutex_unlock(&ctrl->mbox_lock);
1699 return status;
1700 }
1701
1702 int beiscsi_init_sliport(struct beiscsi_hba *phba)
1703 {
1704 int status;
1705
1706
1707 status = beiscsi_check_fw_rdy(phba);
1708 if (!status)
1709 return -EIO;
1710
1711
1712 phba->state &= ~BEISCSI_HBA_IN_ERR;
1713
1714
1715 phba->state &= ~BEISCSI_HBA_UER_SUPP;
1716
1717
1718
1719
1720
1721 status = beiscsi_cmd_function_reset(phba);
1722 if (status) {
1723 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1724 "BC_%d : SLI Function Reset failed\n");
1725 return status;
1726 }
1727
1728
1729 return beiscsi_cmd_special_wrb(&phba->ctrl, 1);
1730 }
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741 int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp)
1742 {
1743 struct be_ctrl_info *ctrl = &phba->ctrl;
1744 struct iscsi_cleanup_req_v1 *req_v1;
1745 struct iscsi_cleanup_req *req;
1746 u16 hdr_ring_id, data_ring_id;
1747 struct be_mcc_wrb *wrb;
1748 int status;
1749
1750 mutex_lock(&ctrl->mbox_lock);
1751 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1752
1753 hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
1754 data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
1755 if (is_chip_be2_be3r(phba)) {
1756 req = embedded_payload(wrb);
1757 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1758 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1759 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
1760 req->chute = (1 << ulp);
1761
1762 req->hdr_ring_id = hdr_ring_id;
1763 req->data_ring_id = data_ring_id;
1764 } else {
1765 req_v1 = embedded_payload(wrb);
1766 be_wrb_hdr_prepare(wrb, sizeof(*req_v1), true, 0);
1767 be_cmd_hdr_prepare(&req_v1->hdr, CMD_SUBSYSTEM_ISCSI,
1768 OPCODE_COMMON_ISCSI_CLEANUP,
1769 sizeof(*req_v1));
1770 req_v1->hdr.version = 1;
1771 req_v1->chute = (1 << ulp);
1772 req_v1->hdr_ring_id = cpu_to_le16(hdr_ring_id);
1773 req_v1->data_ring_id = cpu_to_le16(data_ring_id);
1774 }
1775
1776 status = be_mbox_notify(ctrl);
1777 if (status)
1778 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
1779 "BG_%d : %s failed %d\n", __func__, ulp);
1780 mutex_unlock(&ctrl->mbox_lock);
1781 return status;
1782 }
1783
1784
1785
1786
1787
1788
1789
1790 int beiscsi_detect_ue(struct beiscsi_hba *phba)
1791 {
1792 uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
1793 uint32_t ue_hi = 0, ue_lo = 0;
1794 uint8_t i = 0;
1795 int ret = 0;
1796
1797 pci_read_config_dword(phba->pcidev,
1798 PCICFG_UE_STATUS_LOW, &ue_lo);
1799 pci_read_config_dword(phba->pcidev,
1800 PCICFG_UE_STATUS_MASK_LOW,
1801 &ue_mask_lo);
1802 pci_read_config_dword(phba->pcidev,
1803 PCICFG_UE_STATUS_HIGH,
1804 &ue_hi);
1805 pci_read_config_dword(phba->pcidev,
1806 PCICFG_UE_STATUS_MASK_HI,
1807 &ue_mask_hi);
1808
1809 ue_lo = (ue_lo & ~ue_mask_lo);
1810 ue_hi = (ue_hi & ~ue_mask_hi);
1811
1812
1813 if (ue_lo || ue_hi) {
1814 set_bit(BEISCSI_HBA_IN_UE, &phba->state);
1815 __beiscsi_log(phba, KERN_ERR,
1816 "BC_%d : HBA error detected\n");
1817 ret = 1;
1818 }
1819
1820 if (ue_lo) {
1821 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
1822 if (ue_lo & 1)
1823 __beiscsi_log(phba, KERN_ERR,
1824 "BC_%d : UE_LOW %s bit set\n",
1825 desc_ue_status_low[i]);
1826 }
1827 }
1828
1829 if (ue_hi) {
1830 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
1831 if (ue_hi & 1)
1832 __beiscsi_log(phba, KERN_ERR,
1833 "BC_%d : UE_HIGH %s bit set\n",
1834 desc_ue_status_hi[i]);
1835 }
1836 }
1837 return ret;
1838 }
1839
1840
1841
1842
1843
1844
1845
1846
1847 int beiscsi_detect_tpe(struct beiscsi_hba *phba)
1848 {
1849 u32 post, status;
1850 int ret = 0;
1851
1852 post = beiscsi_get_post_stage(phba);
1853 status = post & POST_STAGE_MASK;
1854 if ((status & POST_ERR_RECOVERY_CODE_MASK) ==
1855 POST_STAGE_RECOVERABLE_ERR) {
1856 set_bit(BEISCSI_HBA_IN_TPE, &phba->state);
1857 __beiscsi_log(phba, KERN_INFO,
1858 "BC_%d : HBA error recoverable: 0x%x\n", post);
1859 ret = 1;
1860 } else {
1861 __beiscsi_log(phba, KERN_INFO,
1862 "BC_%d : HBA in UE: 0x%x\n", post);
1863 }
1864
1865 return ret;
1866 }