0001
0002
0003
0004
0005
0006
0007 #include <linux/vmalloc.h>
0008 #include <linux/delay.h>
0009
0010 #include "qla_def.h"
0011 #include "qla_gbl.h"
0012
0013 #define TIMEOUT_100_MS 100
0014
0015 static const uint32_t qla8044_reg_tbl[] = {
0016 QLA8044_PEG_HALT_STATUS1,
0017 QLA8044_PEG_HALT_STATUS2,
0018 QLA8044_PEG_ALIVE_COUNTER,
0019 QLA8044_CRB_DRV_ACTIVE,
0020 QLA8044_CRB_DEV_STATE,
0021 QLA8044_CRB_DRV_STATE,
0022 QLA8044_CRB_DRV_SCRATCH,
0023 QLA8044_CRB_DEV_PART_INFO1,
0024 QLA8044_CRB_IDC_VER_MAJOR,
0025 QLA8044_FW_VER_MAJOR,
0026 QLA8044_FW_VER_MINOR,
0027 QLA8044_FW_VER_SUB,
0028 QLA8044_CMDPEG_STATE,
0029 QLA8044_ASIC_TEMP,
0030 };
0031
0032
0033 uint32_t
0034 qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
0035 {
0036 return readl((void __iomem *) (ha->nx_pcibase + addr));
0037 }
0038
0039 void
0040 qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val)
0041 {
0042 writel(val, (void __iomem *)((ha)->nx_pcibase + addr));
0043 }
0044
0045 int
0046 qla8044_rd_direct(struct scsi_qla_host *vha,
0047 const uint32_t crb_reg)
0048 {
0049 struct qla_hw_data *ha = vha->hw;
0050
0051 if (crb_reg < CRB_REG_INDEX_MAX)
0052 return qla8044_rd_reg(ha, qla8044_reg_tbl[crb_reg]);
0053 else
0054 return QLA_FUNCTION_FAILED;
0055 }
0056
0057 void
0058 qla8044_wr_direct(struct scsi_qla_host *vha,
0059 const uint32_t crb_reg,
0060 const uint32_t value)
0061 {
0062 struct qla_hw_data *ha = vha->hw;
0063
0064 if (crb_reg < CRB_REG_INDEX_MAX)
0065 qla8044_wr_reg(ha, qla8044_reg_tbl[crb_reg], value);
0066 }
0067
0068 static int
0069 qla8044_set_win_base(scsi_qla_host_t *vha, uint32_t addr)
0070 {
0071 uint32_t val;
0072 int ret_val = QLA_SUCCESS;
0073 struct qla_hw_data *ha = vha->hw;
0074
0075 qla8044_wr_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum), addr);
0076 val = qla8044_rd_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum));
0077
0078 if (val != addr) {
0079 ql_log(ql_log_warn, vha, 0xb087,
0080 "%s: Failed to set register window : "
0081 "addr written 0x%x, read 0x%x!\n",
0082 __func__, addr, val);
0083 ret_val = QLA_FUNCTION_FAILED;
0084 }
0085 return ret_val;
0086 }
0087
0088 static int
0089 qla8044_rd_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
0090 {
0091 int ret_val = QLA_SUCCESS;
0092 struct qla_hw_data *ha = vha->hw;
0093
0094 ret_val = qla8044_set_win_base(vha, addr);
0095 if (!ret_val)
0096 *data = qla8044_rd_reg(ha, QLA8044_WILDCARD);
0097 else
0098 ql_log(ql_log_warn, vha, 0xb088,
0099 "%s: failed read of addr 0x%x!\n", __func__, addr);
0100 return ret_val;
0101 }
0102
0103 static int
0104 qla8044_wr_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
0105 {
0106 int ret_val = QLA_SUCCESS;
0107 struct qla_hw_data *ha = vha->hw;
0108
0109 ret_val = qla8044_set_win_base(vha, addr);
0110 if (!ret_val)
0111 qla8044_wr_reg(ha, QLA8044_WILDCARD, data);
0112 else
0113 ql_log(ql_log_warn, vha, 0xb089,
0114 "%s: failed wrt to addr 0x%x, data 0x%x\n",
0115 __func__, addr, data);
0116 return ret_val;
0117 }
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127 static void
0128 qla8044_read_write_crb_reg(struct scsi_qla_host *vha,
0129 uint32_t raddr, uint32_t waddr)
0130 {
0131 uint32_t value;
0132
0133 qla8044_rd_reg_indirect(vha, raddr, &value);
0134 qla8044_wr_reg_indirect(vha, waddr, value);
0135 }
0136
0137 static int
0138 qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1,
0139 uint32_t mask)
0140 {
0141 unsigned long timeout;
0142 uint32_t temp = 0;
0143
0144
0145 timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
0146 do {
0147 qla8044_rd_reg_indirect(vha, addr1, &temp);
0148 if ((temp & mask) != 0)
0149 break;
0150 if (time_after_eq(jiffies, timeout)) {
0151 ql_log(ql_log_warn, vha, 0xb151,
0152 "Error in processing rdmdio entry\n");
0153 return -1;
0154 }
0155 } while (1);
0156
0157 return 0;
0158 }
0159
0160 static uint32_t
0161 qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha,
0162 uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr)
0163 {
0164 uint32_t temp;
0165 int ret = 0;
0166
0167 ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
0168 if (ret == -1)
0169 return -1;
0170
0171 temp = (0x40000000 | addr);
0172 qla8044_wr_reg_indirect(vha, addr1, temp);
0173
0174 ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
0175 if (ret == -1)
0176 return 0;
0177
0178 qla8044_rd_reg_indirect(vha, addr3, &ret);
0179
0180 return ret;
0181 }
0182
0183
0184 static int
0185 qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha,
0186 uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask)
0187 {
0188 unsigned long timeout;
0189 uint32_t temp;
0190
0191
0192 timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
0193 do {
0194 temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2);
0195 if ((temp & 0x1) != 1)
0196 break;
0197 if (time_after_eq(jiffies, timeout)) {
0198 ql_log(ql_log_warn, vha, 0xb152,
0199 "Error in processing mdiobus idle\n");
0200 return -1;
0201 }
0202 } while (1);
0203
0204 return 0;
0205 }
0206
0207 static int
0208 qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1,
0209 uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value)
0210 {
0211 int ret = 0;
0212
0213 ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
0214 if (ret == -1)
0215 return -1;
0216
0217 qla8044_wr_reg_indirect(vha, addr3, value);
0218 qla8044_wr_reg_indirect(vha, addr1, addr);
0219
0220 ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
0221 if (ret == -1)
0222 return -1;
0223
0224 return 0;
0225 }
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236 static void
0237 qla8044_rmw_crb_reg(struct scsi_qla_host *vha,
0238 uint32_t raddr, uint32_t waddr, struct qla8044_rmw *p_rmw_hdr)
0239 {
0240 uint32_t value;
0241
0242 if (p_rmw_hdr->index_a)
0243 value = vha->reset_tmplt.array[p_rmw_hdr->index_a];
0244 else
0245 qla8044_rd_reg_indirect(vha, raddr, &value);
0246 value &= p_rmw_hdr->test_mask;
0247 value <<= p_rmw_hdr->shl;
0248 value >>= p_rmw_hdr->shr;
0249 value |= p_rmw_hdr->or_value;
0250 value ^= p_rmw_hdr->xor_value;
0251 qla8044_wr_reg_indirect(vha, waddr, value);
0252 return;
0253 }
0254
0255 static inline void
0256 qla8044_set_qsnt_ready(struct scsi_qla_host *vha)
0257 {
0258 uint32_t qsnt_state;
0259 struct qla_hw_data *ha = vha->hw;
0260
0261 qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
0262 qsnt_state |= (1 << ha->portnum);
0263 qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
0264 ql_log(ql_log_info, vha, 0xb08e, "%s(%ld): qsnt_state: 0x%08x\n",
0265 __func__, vha->host_no, qsnt_state);
0266 }
0267
0268 void
0269 qla8044_clear_qsnt_ready(struct scsi_qla_host *vha)
0270 {
0271 uint32_t qsnt_state;
0272 struct qla_hw_data *ha = vha->hw;
0273
0274 qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
0275 qsnt_state &= ~(1 << ha->portnum);
0276 qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
0277 ql_log(ql_log_info, vha, 0xb08f, "%s(%ld): qsnt_state: 0x%08x\n",
0278 __func__, vha->host_no, qsnt_state);
0279 }
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302
0303
0304
0305
0306 static int
0307 qla8044_lock_recovery(struct scsi_qla_host *vha)
0308 {
0309 uint32_t lock = 0, lockid;
0310 struct qla_hw_data *ha = vha->hw;
0311
0312 lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
0313
0314
0315 if ((lockid & IDC_LOCK_RECOVERY_STATE_MASK) != 0)
0316 return QLA_FUNCTION_FAILED;
0317
0318
0319 qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
0320 (ha->portnum <<
0321 IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | INTENT_TO_RECOVER);
0322 msleep(200);
0323
0324
0325 lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
0326 if ((lockid & IDC_LOCK_RECOVERY_OWNER_MASK) != (ha->portnum <<
0327 IDC_LOCK_RECOVERY_STATE_SHIFT_BITS))
0328 return QLA_FUNCTION_FAILED;
0329
0330 ql_dbg(ql_dbg_p3p, vha, 0xb08B, "%s:%d: IDC Lock recovery initiated\n"
0331 , __func__, ha->portnum);
0332
0333
0334 qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
0335 (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) |
0336 PROCEED_TO_RECOVER);
0337
0338
0339 qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, 0xFF);
0340 qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
0341
0342
0343 qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 0);
0344
0345
0346 lock = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
0347 if (lock) {
0348 lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
0349 lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->portnum;
0350 qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lockid);
0351 return QLA_SUCCESS;
0352 } else
0353 return QLA_FUNCTION_FAILED;
0354 }
0355
0356 int
0357 qla8044_idc_lock(struct qla_hw_data *ha)
0358 {
0359 uint32_t ret_val = QLA_SUCCESS, timeout = 0, status = 0;
0360 uint32_t lock_id, lock_cnt, func_num, tmo_owner = 0, first_owner = 0;
0361 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
0362
0363 while (status == 0) {
0364
0365 status = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
0366
0367 if (status) {
0368
0369
0370 lock_id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
0371 lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->portnum;
0372 qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lock_id);
0373 break;
0374 }
0375
0376 if (timeout == 0)
0377 first_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
0378
0379 if (++timeout >=
0380 (QLA8044_DRV_LOCK_TIMEOUT / QLA8044_DRV_LOCK_MSLEEP)) {
0381 tmo_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
0382 func_num = tmo_owner & 0xFF;
0383 lock_cnt = tmo_owner >> 8;
0384 ql_log(ql_log_warn, vha, 0xb114,
0385 "%s: Lock by func %d failed after 2s, lock held "
0386 "by func %d, lock count %d, first_owner %d\n",
0387 __func__, ha->portnum, func_num, lock_cnt,
0388 (first_owner & 0xFF));
0389 if (first_owner != tmo_owner) {
0390
0391
0392
0393
0394 ql_dbg(ql_dbg_p3p, vha, 0xb115,
0395 "%s: %d: IDC lock failed\n",
0396 __func__, ha->portnum);
0397 timeout = 0;
0398 } else {
0399
0400
0401 if (qla8044_lock_recovery(vha) == QLA_SUCCESS) {
0402
0403 ret_val = QLA_SUCCESS;
0404 ql_dbg(ql_dbg_p3p, vha, 0xb116,
0405 "%s:IDC lock Recovery by %d"
0406 "successful...\n", __func__,
0407 ha->portnum);
0408 }
0409
0410
0411
0412
0413 ql_dbg(ql_dbg_p3p, vha, 0xb08a,
0414 "%s: IDC lock Recovery by %d "
0415 "failed, Retrying timeout\n", __func__,
0416 ha->portnum);
0417 timeout = 0;
0418 }
0419 }
0420 msleep(QLA8044_DRV_LOCK_MSLEEP);
0421 }
0422 return ret_val;
0423 }
0424
0425 void
0426 qla8044_idc_unlock(struct qla_hw_data *ha)
0427 {
0428 int id;
0429 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
0430
0431 id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
0432
0433 if ((id & 0xFF) != ha->portnum) {
0434 ql_log(ql_log_warn, vha, 0xb118,
0435 "%s: IDC Unlock by %d failed, lock owner is %d!\n",
0436 __func__, ha->portnum, (id & 0xFF));
0437 return;
0438 }
0439
0440
0441 qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, (id | 0xFF));
0442 qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
0443 }
0444
0445
0446 static int
0447 qla8044_flash_lock(scsi_qla_host_t *vha)
0448 {
0449 int lock_owner;
0450 int timeout = 0;
0451 uint32_t lock_status = 0;
0452 int ret_val = QLA_SUCCESS;
0453 struct qla_hw_data *ha = vha->hw;
0454
0455 while (lock_status == 0) {
0456 lock_status = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK);
0457 if (lock_status)
0458 break;
0459
0460 if (++timeout >= QLA8044_FLASH_LOCK_TIMEOUT / 20) {
0461 lock_owner = qla8044_rd_reg(ha,
0462 QLA8044_FLASH_LOCK_ID);
0463 ql_log(ql_log_warn, vha, 0xb113,
0464 "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
0465 __func__, ha->portnum, lock_owner);
0466 ret_val = QLA_FUNCTION_FAILED;
0467 break;
0468 }
0469 msleep(20);
0470 }
0471 qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, ha->portnum);
0472 return ret_val;
0473 }
0474
0475 static void
0476 qla8044_flash_unlock(scsi_qla_host_t *vha)
0477 {
0478 struct qla_hw_data *ha = vha->hw;
0479
0480
0481 qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF);
0482 qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
0483 }
0484
0485
0486 static
0487 void qla8044_flash_lock_recovery(struct scsi_qla_host *vha)
0488 {
0489
0490 if (qla8044_flash_lock(vha)) {
0491
0492 ql_log(ql_log_warn, vha, 0xb120, "Resetting flash_lock\n");
0493 }
0494
0495
0496
0497
0498
0499
0500 qla8044_flash_unlock(vha);
0501 }
0502
0503
0504
0505
0506 static int
0507 qla8044_read_flash_data(scsi_qla_host_t *vha, uint8_t *p_data,
0508 uint32_t flash_addr, int u32_word_count)
0509 {
0510 int i, ret_val = QLA_SUCCESS;
0511 uint32_t u32_word;
0512
0513 if (qla8044_flash_lock(vha) != QLA_SUCCESS) {
0514 ret_val = QLA_FUNCTION_FAILED;
0515 goto exit_lock_error;
0516 }
0517
0518 if (flash_addr & 0x03) {
0519 ql_log(ql_log_warn, vha, 0xb117,
0520 "%s: Illegal addr = 0x%x\n", __func__, flash_addr);
0521 ret_val = QLA_FUNCTION_FAILED;
0522 goto exit_flash_read;
0523 }
0524
0525 for (i = 0; i < u32_word_count; i++) {
0526 if (qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW,
0527 (flash_addr & 0xFFFF0000))) {
0528 ql_log(ql_log_warn, vha, 0xb119,
0529 "%s: failed to write addr 0x%x to "
0530 "FLASH_DIRECT_WINDOW\n! ",
0531 __func__, flash_addr);
0532 ret_val = QLA_FUNCTION_FAILED;
0533 goto exit_flash_read;
0534 }
0535
0536 ret_val = qla8044_rd_reg_indirect(vha,
0537 QLA8044_FLASH_DIRECT_DATA(flash_addr),
0538 &u32_word);
0539 if (ret_val != QLA_SUCCESS) {
0540 ql_log(ql_log_warn, vha, 0xb08c,
0541 "%s: failed to read addr 0x%x!\n",
0542 __func__, flash_addr);
0543 goto exit_flash_read;
0544 }
0545
0546 *(uint32_t *)p_data = u32_word;
0547 p_data = p_data + 4;
0548 flash_addr = flash_addr + 4;
0549 }
0550
0551 exit_flash_read:
0552 qla8044_flash_unlock(vha);
0553
0554 exit_lock_error:
0555 return ret_val;
0556 }
0557
0558
0559
0560
0561 void *
0562 qla8044_read_optrom_data(struct scsi_qla_host *vha, void *buf,
0563 uint32_t offset, uint32_t length)
0564 {
0565 scsi_block_requests(vha->host);
0566 if (qla8044_read_flash_data(vha, buf, offset, length / 4)
0567 != QLA_SUCCESS) {
0568 ql_log(ql_log_warn, vha, 0xb08d,
0569 "%s: Failed to read from flash\n",
0570 __func__);
0571 }
0572 scsi_unblock_requests(vha->host);
0573 return buf;
0574 }
0575
0576 static inline int
0577 qla8044_need_reset(struct scsi_qla_host *vha)
0578 {
0579 uint32_t drv_state, drv_active;
0580 int rval;
0581 struct qla_hw_data *ha = vha->hw;
0582
0583 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
0584 drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
0585
0586 rval = drv_state & (1 << ha->portnum);
0587
0588 if (ha->flags.eeh_busy && drv_active)
0589 rval = 1;
0590 return rval;
0591 }
0592
0593
0594
0595
0596
0597
0598
0599
0600
0601
0602 static void
0603 qla8044_write_list(struct scsi_qla_host *vha,
0604 struct qla8044_reset_entry_hdr *p_hdr)
0605 {
0606 struct qla8044_entry *p_entry;
0607 uint32_t i;
0608
0609 p_entry = (struct qla8044_entry *)((char *)p_hdr +
0610 sizeof(struct qla8044_reset_entry_hdr));
0611
0612 for (i = 0; i < p_hdr->count; i++, p_entry++) {
0613 qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2);
0614 if (p_hdr->delay)
0615 udelay((uint32_t)(p_hdr->delay));
0616 }
0617 }
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628 static void
0629 qla8044_read_write_list(struct scsi_qla_host *vha,
0630 struct qla8044_reset_entry_hdr *p_hdr)
0631 {
0632 struct qla8044_entry *p_entry;
0633 uint32_t i;
0634
0635 p_entry = (struct qla8044_entry *)((char *)p_hdr +
0636 sizeof(struct qla8044_reset_entry_hdr));
0637
0638 for (i = 0; i < p_hdr->count; i++, p_entry++) {
0639 qla8044_read_write_crb_reg(vha, p_entry->arg1,
0640 p_entry->arg2);
0641 if (p_hdr->delay)
0642 udelay((uint32_t)(p_hdr->delay));
0643 }
0644 }
0645
0646
0647
0648
0649
0650
0651
0652
0653
0654
0655
0656
0657
0658 static int
0659 qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr,
0660 int duration, uint32_t test_mask, uint32_t test_result)
0661 {
0662 uint32_t value = 0;
0663 int timeout_error;
0664 uint8_t retries;
0665 int ret_val = QLA_SUCCESS;
0666
0667 ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
0668 if (ret_val == QLA_FUNCTION_FAILED) {
0669 timeout_error = 1;
0670 goto exit_poll_reg;
0671 }
0672
0673
0674 retries = duration/10;
0675
0676 do {
0677 if ((value & test_mask) != test_result) {
0678 timeout_error = 1;
0679 msleep(duration/10);
0680 ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
0681 if (ret_val == QLA_FUNCTION_FAILED) {
0682 timeout_error = 1;
0683 goto exit_poll_reg;
0684 }
0685 } else {
0686 timeout_error = 0;
0687 break;
0688 }
0689 } while (retries--);
0690
0691 exit_poll_reg:
0692 if (timeout_error) {
0693 vha->reset_tmplt.seq_error++;
0694 ql_log(ql_log_fatal, vha, 0xb090,
0695 "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
0696 __func__, value, test_mask, test_result);
0697 }
0698
0699 return timeout_error;
0700 }
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711 static void
0712 qla8044_poll_list(struct scsi_qla_host *vha,
0713 struct qla8044_reset_entry_hdr *p_hdr)
0714 {
0715 long delay;
0716 struct qla8044_entry *p_entry;
0717 struct qla8044_poll *p_poll;
0718 uint32_t i;
0719 uint32_t value;
0720
0721 p_poll = (struct qla8044_poll *)
0722 ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
0723
0724
0725
0726
0727 p_entry = (struct qla8044_entry *)((char *)p_poll +
0728 sizeof(struct qla8044_poll));
0729
0730 delay = (long)p_hdr->delay;
0731
0732 if (!delay) {
0733 for (i = 0; i < p_hdr->count; i++, p_entry++)
0734 qla8044_poll_reg(vha, p_entry->arg1,
0735 delay, p_poll->test_mask, p_poll->test_value);
0736 } else {
0737 for (i = 0; i < p_hdr->count; i++, p_entry++) {
0738 if (delay) {
0739 if (qla8044_poll_reg(vha,
0740 p_entry->arg1, delay,
0741 p_poll->test_mask,
0742 p_poll->test_value)) {
0743
0744
0745
0746
0747
0748 qla8044_rd_reg_indirect(vha,
0749 p_entry->arg1, &value);
0750 qla8044_rd_reg_indirect(vha,
0751 p_entry->arg2, &value);
0752 }
0753 }
0754 }
0755 }
0756 }
0757
0758
0759
0760
0761
0762
0763
0764
0765
0766
0767 static void
0768 qla8044_poll_write_list(struct scsi_qla_host *vha,
0769 struct qla8044_reset_entry_hdr *p_hdr)
0770 {
0771 long delay;
0772 struct qla8044_quad_entry *p_entry;
0773 struct qla8044_poll *p_poll;
0774 uint32_t i;
0775
0776 p_poll = (struct qla8044_poll *)((char *)p_hdr +
0777 sizeof(struct qla8044_reset_entry_hdr));
0778
0779 p_entry = (struct qla8044_quad_entry *)((char *)p_poll +
0780 sizeof(struct qla8044_poll));
0781
0782 delay = (long)p_hdr->delay;
0783
0784 for (i = 0; i < p_hdr->count; i++, p_entry++) {
0785 qla8044_wr_reg_indirect(vha,
0786 p_entry->dr_addr, p_entry->dr_value);
0787 qla8044_wr_reg_indirect(vha,
0788 p_entry->ar_addr, p_entry->ar_value);
0789 if (delay) {
0790 if (qla8044_poll_reg(vha,
0791 p_entry->ar_addr, delay,
0792 p_poll->test_mask,
0793 p_poll->test_value)) {
0794 ql_dbg(ql_dbg_p3p, vha, 0xb091,
0795 "%s: Timeout Error: poll list, ",
0796 __func__);
0797 ql_dbg(ql_dbg_p3p, vha, 0xb092,
0798 "item_num %d, entry_num %d\n", i,
0799 vha->reset_tmplt.seq_index);
0800 }
0801 }
0802 }
0803 }
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813
0814 static void
0815 qla8044_read_modify_write(struct scsi_qla_host *vha,
0816 struct qla8044_reset_entry_hdr *p_hdr)
0817 {
0818 struct qla8044_entry *p_entry;
0819 struct qla8044_rmw *p_rmw_hdr;
0820 uint32_t i;
0821
0822 p_rmw_hdr = (struct qla8044_rmw *)((char *)p_hdr +
0823 sizeof(struct qla8044_reset_entry_hdr));
0824
0825 p_entry = (struct qla8044_entry *)((char *)p_rmw_hdr +
0826 sizeof(struct qla8044_rmw));
0827
0828 for (i = 0; i < p_hdr->count; i++, p_entry++) {
0829 qla8044_rmw_crb_reg(vha, p_entry->arg1,
0830 p_entry->arg2, p_rmw_hdr);
0831 if (p_hdr->delay)
0832 udelay((uint32_t)(p_hdr->delay));
0833 }
0834 }
0835
0836
0837
0838
0839
0840
0841
0842
0843
0844 static
0845 void qla8044_pause(struct scsi_qla_host *vha,
0846 struct qla8044_reset_entry_hdr *p_hdr)
0847 {
0848 if (p_hdr->delay)
0849 mdelay((uint32_t)((long)p_hdr->delay));
0850 }
0851
0852
0853
0854
0855
0856
0857
0858
0859 static void
0860 qla8044_template_end(struct scsi_qla_host *vha,
0861 struct qla8044_reset_entry_hdr *p_hdr)
0862 {
0863 vha->reset_tmplt.template_end = 1;
0864
0865 if (vha->reset_tmplt.seq_error == 0) {
0866 ql_dbg(ql_dbg_p3p, vha, 0xb093,
0867 "%s: Reset sequence completed SUCCESSFULLY.\n", __func__);
0868 } else {
0869 ql_log(ql_log_fatal, vha, 0xb094,
0870 "%s: Reset sequence completed with some timeout "
0871 "errors.\n", __func__);
0872 }
0873 }
0874
0875
0876
0877
0878
0879
0880
0881
0882
0883
0884 static void
0885 qla8044_poll_read_list(struct scsi_qla_host *vha,
0886 struct qla8044_reset_entry_hdr *p_hdr)
0887 {
0888 long delay;
0889 int index;
0890 struct qla8044_quad_entry *p_entry;
0891 struct qla8044_poll *p_poll;
0892 uint32_t i;
0893 uint32_t value;
0894
0895 p_poll = (struct qla8044_poll *)
0896 ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
0897
0898 p_entry = (struct qla8044_quad_entry *)
0899 ((char *)p_poll + sizeof(struct qla8044_poll));
0900
0901 delay = (long)p_hdr->delay;
0902
0903 for (i = 0; i < p_hdr->count; i++, p_entry++) {
0904 qla8044_wr_reg_indirect(vha, p_entry->ar_addr,
0905 p_entry->ar_value);
0906 if (delay) {
0907 if (qla8044_poll_reg(vha, p_entry->ar_addr, delay,
0908 p_poll->test_mask, p_poll->test_value)) {
0909 ql_dbg(ql_dbg_p3p, vha, 0xb095,
0910 "%s: Timeout Error: poll "
0911 "list, ", __func__);
0912 ql_dbg(ql_dbg_p3p, vha, 0xb096,
0913 "Item_num %d, "
0914 "entry_num %d\n", i,
0915 vha->reset_tmplt.seq_index);
0916 } else {
0917 index = vha->reset_tmplt.array_index;
0918 qla8044_rd_reg_indirect(vha,
0919 p_entry->dr_addr, &value);
0920 vha->reset_tmplt.array[index++] = value;
0921 if (index == QLA8044_MAX_RESET_SEQ_ENTRIES)
0922 vha->reset_tmplt.array_index = 1;
0923 }
0924 }
0925 }
0926 }
0927
0928
0929
0930
0931
0932
0933
0934
0935
0936
0937
0938
0939 static void
0940 qla8044_process_reset_template(struct scsi_qla_host *vha,
0941 char *p_buff)
0942 {
0943 int index, entries;
0944 struct qla8044_reset_entry_hdr *p_hdr;
0945 char *p_entry = p_buff;
0946
0947 vha->reset_tmplt.seq_end = 0;
0948 vha->reset_tmplt.template_end = 0;
0949 entries = vha->reset_tmplt.hdr->entries;
0950 index = vha->reset_tmplt.seq_index;
0951
0952 for (; (!vha->reset_tmplt.seq_end) && (index < entries); index++) {
0953 p_hdr = (struct qla8044_reset_entry_hdr *)p_entry;
0954 switch (p_hdr->cmd) {
0955 case OPCODE_NOP:
0956 break;
0957 case OPCODE_WRITE_LIST:
0958 qla8044_write_list(vha, p_hdr);
0959 break;
0960 case OPCODE_READ_WRITE_LIST:
0961 qla8044_read_write_list(vha, p_hdr);
0962 break;
0963 case OPCODE_POLL_LIST:
0964 qla8044_poll_list(vha, p_hdr);
0965 break;
0966 case OPCODE_POLL_WRITE_LIST:
0967 qla8044_poll_write_list(vha, p_hdr);
0968 break;
0969 case OPCODE_READ_MODIFY_WRITE:
0970 qla8044_read_modify_write(vha, p_hdr);
0971 break;
0972 case OPCODE_SEQ_PAUSE:
0973 qla8044_pause(vha, p_hdr);
0974 break;
0975 case OPCODE_SEQ_END:
0976 vha->reset_tmplt.seq_end = 1;
0977 break;
0978 case OPCODE_TMPL_END:
0979 qla8044_template_end(vha, p_hdr);
0980 break;
0981 case OPCODE_POLL_READ_LIST:
0982 qla8044_poll_read_list(vha, p_hdr);
0983 break;
0984 default:
0985 ql_log(ql_log_fatal, vha, 0xb097,
0986 "%s: Unknown command ==> 0x%04x on "
0987 "entry = %d\n", __func__, p_hdr->cmd, index);
0988 break;
0989 }
0990
0991
0992
0993 p_entry += p_hdr->size;
0994 }
0995 vha->reset_tmplt.seq_index = index;
0996 }
0997
0998 static void
0999 qla8044_process_init_seq(struct scsi_qla_host *vha)
1000 {
1001 qla8044_process_reset_template(vha,
1002 vha->reset_tmplt.init_offset);
1003 if (vha->reset_tmplt.seq_end != 1)
1004 ql_log(ql_log_fatal, vha, 0xb098,
1005 "%s: Abrupt INIT Sub-Sequence end.\n",
1006 __func__);
1007 }
1008
1009 static void
1010 qla8044_process_stop_seq(struct scsi_qla_host *vha)
1011 {
1012 vha->reset_tmplt.seq_index = 0;
1013 qla8044_process_reset_template(vha, vha->reset_tmplt.stop_offset);
1014 if (vha->reset_tmplt.seq_end != 1)
1015 ql_log(ql_log_fatal, vha, 0xb099,
1016 "%s: Abrupt STOP Sub-Sequence end.\n", __func__);
1017 }
1018
1019 static void
1020 qla8044_process_start_seq(struct scsi_qla_host *vha)
1021 {
1022 qla8044_process_reset_template(vha, vha->reset_tmplt.start_offset);
1023 if (vha->reset_tmplt.template_end != 1)
1024 ql_log(ql_log_fatal, vha, 0xb09a,
1025 "%s: Abrupt START Sub-Sequence end.\n",
1026 __func__);
1027 }
1028
1029 static int
1030 qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha,
1031 uint32_t flash_addr, uint8_t *p_data, int u32_word_count)
1032 {
1033 uint32_t i;
1034 uint32_t u32_word;
1035 uint32_t flash_offset;
1036 uint32_t addr = flash_addr;
1037 int ret_val = QLA_SUCCESS;
1038
1039 flash_offset = addr & (QLA8044_FLASH_SECTOR_SIZE - 1);
1040
1041 if (addr & 0x3) {
1042 ql_log(ql_log_fatal, vha, 0xb09b, "%s: Illegal addr = 0x%x\n",
1043 __func__, addr);
1044 ret_val = QLA_FUNCTION_FAILED;
1045 goto exit_lockless_read;
1046 }
1047
1048 ret_val = qla8044_wr_reg_indirect(vha,
1049 QLA8044_FLASH_DIRECT_WINDOW, (addr));
1050
1051 if (ret_val != QLA_SUCCESS) {
1052 ql_log(ql_log_fatal, vha, 0xb09c,
1053 "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
1054 __func__, addr);
1055 goto exit_lockless_read;
1056 }
1057
1058
1059 if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
1060 (QLA8044_FLASH_SECTOR_SIZE - 1)) {
1061
1062 for (i = 0; i < u32_word_count; i++) {
1063 ret_val = qla8044_rd_reg_indirect(vha,
1064 QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
1065 if (ret_val != QLA_SUCCESS) {
1066 ql_log(ql_log_fatal, vha, 0xb09d,
1067 "%s: failed to read addr 0x%x!\n",
1068 __func__, addr);
1069 goto exit_lockless_read;
1070 }
1071 *(uint32_t *)p_data = u32_word;
1072 p_data = p_data + 4;
1073 addr = addr + 4;
1074 flash_offset = flash_offset + 4;
1075 if (flash_offset > (QLA8044_FLASH_SECTOR_SIZE - 1)) {
1076
1077 ret_val = qla8044_wr_reg_indirect(vha,
1078 QLA8044_FLASH_DIRECT_WINDOW, (addr));
1079 if (ret_val != QLA_SUCCESS) {
1080 ql_log(ql_log_fatal, vha, 0xb09f,
1081 "%s: failed to write addr "
1082 "0x%x to FLASH_DIRECT_WINDOW!\n",
1083 __func__, addr);
1084 goto exit_lockless_read;
1085 }
1086 flash_offset = 0;
1087 }
1088 }
1089 } else {
1090
1091 for (i = 0; i < u32_word_count; i++) {
1092 ret_val = qla8044_rd_reg_indirect(vha,
1093 QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
1094 if (ret_val != QLA_SUCCESS) {
1095 ql_log(ql_log_fatal, vha, 0xb0a0,
1096 "%s: failed to read addr 0x%x!\n",
1097 __func__, addr);
1098 goto exit_lockless_read;
1099 }
1100 *(uint32_t *)p_data = u32_word;
1101 p_data = p_data + 4;
1102 addr = addr + 4;
1103 }
1104 }
1105
1106 exit_lockless_read:
1107 return ret_val;
1108 }
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120 static int
1121 qla8044_ms_mem_write_128b(struct scsi_qla_host *vha,
1122 uint64_t addr, uint32_t *data, uint32_t count)
1123 {
1124 int i, j, ret_val = QLA_SUCCESS;
1125 uint32_t agt_ctrl;
1126 unsigned long flags;
1127 struct qla_hw_data *ha = vha->hw;
1128
1129
1130 if (addr & 0xF) {
1131 ret_val = QLA_FUNCTION_FAILED;
1132 goto exit_ms_mem_write;
1133 }
1134 write_lock_irqsave(&ha->hw_lock, flags);
1135
1136
1137 ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, 0);
1138 if (ret_val == QLA_FUNCTION_FAILED) {
1139 ql_log(ql_log_fatal, vha, 0xb0a1,
1140 "%s: write to AGT_ADDR_HI failed!\n", __func__);
1141 goto exit_ms_mem_write_unlock;
1142 }
1143
1144 for (i = 0; i < count; i++, addr += 16) {
1145 if (!((addr_in_range(addr, QLA8044_ADDR_QDR_NET,
1146 QLA8044_ADDR_QDR_NET_MAX)) ||
1147 (addr_in_range(addr, QLA8044_ADDR_DDR_NET,
1148 QLA8044_ADDR_DDR_NET_MAX)))) {
1149 ret_val = QLA_FUNCTION_FAILED;
1150 goto exit_ms_mem_write_unlock;
1151 }
1152
1153 ret_val = qla8044_wr_reg_indirect(vha,
1154 MD_MIU_TEST_AGT_ADDR_LO, addr);
1155
1156
1157 ret_val += qla8044_wr_reg_indirect(vha,
1158 MD_MIU_TEST_AGT_WRDATA_LO, *data++);
1159 ret_val += qla8044_wr_reg_indirect(vha,
1160 MD_MIU_TEST_AGT_WRDATA_HI, *data++);
1161 ret_val += qla8044_wr_reg_indirect(vha,
1162 MD_MIU_TEST_AGT_WRDATA_ULO, *data++);
1163 ret_val += qla8044_wr_reg_indirect(vha,
1164 MD_MIU_TEST_AGT_WRDATA_UHI, *data++);
1165 if (ret_val == QLA_FUNCTION_FAILED) {
1166 ql_log(ql_log_fatal, vha, 0xb0a2,
1167 "%s: write to AGT_WRDATA failed!\n",
1168 __func__);
1169 goto exit_ms_mem_write_unlock;
1170 }
1171
1172
1173 ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
1174 MIU_TA_CTL_WRITE_ENABLE);
1175 ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
1176 MIU_TA_CTL_WRITE_START);
1177 if (ret_val == QLA_FUNCTION_FAILED) {
1178 ql_log(ql_log_fatal, vha, 0xb0a3,
1179 "%s: write to AGT_CTRL failed!\n", __func__);
1180 goto exit_ms_mem_write_unlock;
1181 }
1182
1183 for (j = 0; j < MAX_CTL_CHECK; j++) {
1184 ret_val = qla8044_rd_reg_indirect(vha,
1185 MD_MIU_TEST_AGT_CTRL, &agt_ctrl);
1186 if (ret_val == QLA_FUNCTION_FAILED) {
1187 ql_log(ql_log_fatal, vha, 0xb0a4,
1188 "%s: failed to read "
1189 "MD_MIU_TEST_AGT_CTRL!\n", __func__);
1190 goto exit_ms_mem_write_unlock;
1191 }
1192 if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
1193 break;
1194 }
1195
1196
1197 if (j >= MAX_CTL_CHECK) {
1198 ql_log(ql_log_fatal, vha, 0xb0a5,
1199 "%s: MS memory write failed!\n",
1200 __func__);
1201 ret_val = QLA_FUNCTION_FAILED;
1202 goto exit_ms_mem_write_unlock;
1203 }
1204 }
1205
1206 exit_ms_mem_write_unlock:
1207 write_unlock_irqrestore(&ha->hw_lock, flags);
1208
1209 exit_ms_mem_write:
1210 return ret_val;
1211 }
1212
1213 static int
1214 qla8044_copy_bootloader(struct scsi_qla_host *vha)
1215 {
1216 uint8_t *p_cache;
1217 uint32_t src, count, size;
1218 uint64_t dest;
1219 int ret_val = QLA_SUCCESS;
1220 struct qla_hw_data *ha = vha->hw;
1221
1222 src = QLA8044_BOOTLOADER_FLASH_ADDR;
1223 dest = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_ADDR);
1224 size = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_SIZE);
1225
1226
1227 if (size & 0xF)
1228 size = (size + 16) & ~0xF;
1229
1230
1231 count = size/16;
1232
1233 p_cache = vmalloc(size);
1234 if (p_cache == NULL) {
1235 ql_log(ql_log_fatal, vha, 0xb0a6,
1236 "%s: Failed to allocate memory for "
1237 "boot loader cache\n", __func__);
1238 ret_val = QLA_FUNCTION_FAILED;
1239 goto exit_copy_bootloader;
1240 }
1241
1242 ret_val = qla8044_lockless_flash_read_u32(vha, src,
1243 p_cache, size/sizeof(uint32_t));
1244 if (ret_val == QLA_FUNCTION_FAILED) {
1245 ql_log(ql_log_fatal, vha, 0xb0a7,
1246 "%s: Error reading F/W from flash!!!\n", __func__);
1247 goto exit_copy_error;
1248 }
1249 ql_dbg(ql_dbg_p3p, vha, 0xb0a8, "%s: Read F/W from flash!\n",
1250 __func__);
1251
1252
1253 ret_val = qla8044_ms_mem_write_128b(vha, dest,
1254 (uint32_t *)p_cache, count);
1255 if (ret_val == QLA_FUNCTION_FAILED) {
1256 ql_log(ql_log_fatal, vha, 0xb0a9,
1257 "%s: Error writing F/W to MS !!!\n", __func__);
1258 goto exit_copy_error;
1259 }
1260 ql_dbg(ql_dbg_p3p, vha, 0xb0aa,
1261 "%s: Wrote F/W (size %d) to MS !!!\n",
1262 __func__, size);
1263
1264 exit_copy_error:
1265 vfree(p_cache);
1266
1267 exit_copy_bootloader:
1268 return ret_val;
1269 }
1270
1271 static int
1272 qla8044_restart(struct scsi_qla_host *vha)
1273 {
1274 int ret_val = QLA_SUCCESS;
1275 struct qla_hw_data *ha = vha->hw;
1276
1277 qla8044_process_stop_seq(vha);
1278
1279
1280 if (ql2xmdenable)
1281 qla8044_get_minidump(vha);
1282 else
1283 ql_log(ql_log_fatal, vha, 0xb14c,
1284 "Minidump disabled.\n");
1285
1286 qla8044_process_init_seq(vha);
1287
1288 if (qla8044_copy_bootloader(vha)) {
1289 ql_log(ql_log_fatal, vha, 0xb0ab,
1290 "%s: Copy bootloader, firmware restart failed!\n",
1291 __func__);
1292 ret_val = QLA_FUNCTION_FAILED;
1293 goto exit_restart;
1294 }
1295
1296
1297
1298
1299 qla8044_wr_reg(ha, QLA8044_FW_IMAGE_VALID, QLA8044_BOOT_FROM_FLASH);
1300
1301 qla8044_process_start_seq(vha);
1302
1303 exit_restart:
1304 return ret_val;
1305 }
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315 static int
1316 qla8044_check_cmd_peg_status(struct scsi_qla_host *vha)
1317 {
1318 uint32_t val, ret_val = QLA_FUNCTION_FAILED;
1319 int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
1320 struct qla_hw_data *ha = vha->hw;
1321
1322 do {
1323 val = qla8044_rd_reg(ha, QLA8044_CMDPEG_STATE);
1324 if (val == PHAN_INITIALIZE_COMPLETE) {
1325 ql_dbg(ql_dbg_p3p, vha, 0xb0ac,
1326 "%s: Command Peg initialization "
1327 "complete! state=0x%x\n", __func__, val);
1328 ret_val = QLA_SUCCESS;
1329 break;
1330 }
1331 msleep(CRB_CMDPEG_CHECK_DELAY);
1332 } while (--retries);
1333
1334 return ret_val;
1335 }
1336
1337 static int
1338 qla8044_start_firmware(struct scsi_qla_host *vha)
1339 {
1340 int ret_val = QLA_SUCCESS;
1341
1342 if (qla8044_restart(vha)) {
1343 ql_log(ql_log_fatal, vha, 0xb0ad,
1344 "%s: Restart Error!!!, Need Reset!!!\n",
1345 __func__);
1346 ret_val = QLA_FUNCTION_FAILED;
1347 goto exit_start_fw;
1348 } else
1349 ql_dbg(ql_dbg_p3p, vha, 0xb0af,
1350 "%s: Restart done!\n", __func__);
1351
1352 ret_val = qla8044_check_cmd_peg_status(vha);
1353 if (ret_val) {
1354 ql_log(ql_log_fatal, vha, 0xb0b0,
1355 "%s: Peg not initialized!\n", __func__);
1356 ret_val = QLA_FUNCTION_FAILED;
1357 }
1358
1359 exit_start_fw:
1360 return ret_val;
1361 }
1362
1363 void
1364 qla8044_clear_drv_active(struct qla_hw_data *ha)
1365 {
1366 uint32_t drv_active;
1367 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
1368
1369 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1370 drv_active &= ~(1 << (ha->portnum));
1371
1372 ql_log(ql_log_info, vha, 0xb0b1,
1373 "%s(%ld): drv_active: 0x%08x\n",
1374 __func__, vha->host_no, drv_active);
1375
1376 qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
1377 }
1378
1379
1380
1381
1382
1383
1384
1385 static int
1386 qla8044_device_bootstrap(struct scsi_qla_host *vha)
1387 {
1388 int rval = QLA_FUNCTION_FAILED;
1389 int i;
1390 uint32_t old_count = 0, count = 0;
1391 int need_reset = 0;
1392 uint32_t idc_ctrl;
1393 struct qla_hw_data *ha = vha->hw;
1394
1395 need_reset = qla8044_need_reset(vha);
1396
1397 if (!need_reset) {
1398 old_count = qla8044_rd_direct(vha,
1399 QLA8044_PEG_ALIVE_COUNTER_INDEX);
1400
1401 for (i = 0; i < 10; i++) {
1402 msleep(200);
1403
1404 count = qla8044_rd_direct(vha,
1405 QLA8044_PEG_ALIVE_COUNTER_INDEX);
1406 if (count != old_count) {
1407 rval = QLA_SUCCESS;
1408 goto dev_ready;
1409 }
1410 }
1411 qla8044_flash_lock_recovery(vha);
1412 } else {
1413
1414 if (ha->flags.isp82xx_fw_hung)
1415 qla8044_flash_lock_recovery(vha);
1416 }
1417
1418
1419 ql_log(ql_log_info, vha, 0xb0b2,
1420 "%s: HW State: INITIALIZING\n", __func__);
1421 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1422 QLA8XXX_DEV_INITIALIZING);
1423
1424 qla8044_idc_unlock(ha);
1425 rval = qla8044_start_firmware(vha);
1426 qla8044_idc_lock(ha);
1427
1428 if (rval != QLA_SUCCESS) {
1429 ql_log(ql_log_info, vha, 0xb0b3,
1430 "%s: HW State: FAILED\n", __func__);
1431 qla8044_clear_drv_active(ha);
1432 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1433 QLA8XXX_DEV_FAILED);
1434 return rval;
1435 }
1436
1437
1438
1439 idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
1440 if (idc_ctrl & GRACEFUL_RESET_BIT1) {
1441 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
1442 (idc_ctrl & ~GRACEFUL_RESET_BIT1));
1443 ha->fw_dumped = false;
1444 }
1445
1446 dev_ready:
1447 ql_log(ql_log_info, vha, 0xb0b4,
1448 "%s: HW State: READY\n", __func__);
1449 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY);
1450
1451 return rval;
1452 }
1453
1454
1455 static void
1456 qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha)
1457 {
1458 u8 *phdr;
1459
1460 if (!vha->reset_tmplt.buff) {
1461 ql_log(ql_log_fatal, vha, 0xb0b5,
1462 "%s: Error Invalid reset_seq_template\n", __func__);
1463 return;
1464 }
1465
1466 phdr = vha->reset_tmplt.buff;
1467 ql_dbg(ql_dbg_p3p, vha, 0xb0b6,
1468 "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X"
1469 "0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n"
1470 "\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n",
1471 *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
1472 *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
1473 *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
1474 *(phdr+13), *(phdr+14), *(phdr+15));
1475 }
1476
1477
1478
1479
1480
1481
1482
1483
1484 static int
1485 qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha)
1486 {
1487 uint32_t sum = 0;
1488 uint16_t *buff = (uint16_t *)vha->reset_tmplt.buff;
1489 int u16_count = vha->reset_tmplt.hdr->size / sizeof(uint16_t);
1490
1491 while (u16_count-- > 0)
1492 sum += *buff++;
1493
1494 while (sum >> 16)
1495 sum = (sum & 0xFFFF) + (sum >> 16);
1496
1497
1498 if (~sum) {
1499 return QLA_SUCCESS;
1500 } else {
1501 ql_log(ql_log_fatal, vha, 0xb0b7,
1502 "%s: Reset seq checksum failed\n", __func__);
1503 return QLA_FUNCTION_FAILED;
1504 }
1505 }
1506
1507
1508
1509
1510
1511
1512
1513 void
1514 qla8044_read_reset_template(struct scsi_qla_host *vha)
1515 {
1516 uint8_t *p_buff;
1517 uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
1518
1519 vha->reset_tmplt.seq_error = 0;
1520 vha->reset_tmplt.buff = vmalloc(QLA8044_RESTART_TEMPLATE_SIZE);
1521 if (vha->reset_tmplt.buff == NULL) {
1522 ql_log(ql_log_fatal, vha, 0xb0b8,
1523 "%s: Failed to allocate reset template resources\n",
1524 __func__);
1525 goto exit_read_reset_template;
1526 }
1527
1528 p_buff = vha->reset_tmplt.buff;
1529 addr = QLA8044_RESET_TEMPLATE_ADDR;
1530
1531 tmplt_hdr_def_size =
1532 sizeof(struct qla8044_reset_template_hdr) / sizeof(uint32_t);
1533
1534 ql_dbg(ql_dbg_p3p, vha, 0xb0b9,
1535 "%s: Read template hdr size %d from Flash\n",
1536 __func__, tmplt_hdr_def_size);
1537
1538
1539 if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
1540 ql_log(ql_log_fatal, vha, 0xb0ba,
1541 "%s: Failed to read reset template\n", __func__);
1542 goto exit_read_template_error;
1543 }
1544
1545 vha->reset_tmplt.hdr =
1546 (struct qla8044_reset_template_hdr *) vha->reset_tmplt.buff;
1547
1548
1549 tmplt_hdr_size = vha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
1550 if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
1551 (vha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
1552 ql_log(ql_log_fatal, vha, 0xb0bb,
1553 "%s: Template Header size invalid %d "
1554 "tmplt_hdr_def_size %d!!!\n", __func__,
1555 tmplt_hdr_size, tmplt_hdr_def_size);
1556 goto exit_read_template_error;
1557 }
1558
1559 addr = QLA8044_RESET_TEMPLATE_ADDR + vha->reset_tmplt.hdr->hdr_size;
1560 p_buff = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size;
1561 tmplt_hdr_def_size = (vha->reset_tmplt.hdr->size -
1562 vha->reset_tmplt.hdr->hdr_size)/sizeof(uint32_t);
1563
1564 ql_dbg(ql_dbg_p3p, vha, 0xb0bc,
1565 "%s: Read rest of the template size %d\n",
1566 __func__, vha->reset_tmplt.hdr->size);
1567
1568
1569 if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
1570 ql_log(ql_log_fatal, vha, 0xb0bd,
1571 "%s: Failed to read reset template\n", __func__);
1572 goto exit_read_template_error;
1573 }
1574
1575
1576 if (qla8044_reset_seq_checksum_test(vha)) {
1577 ql_log(ql_log_fatal, vha, 0xb0be,
1578 "%s: Reset Seq checksum failed!\n", __func__);
1579 goto exit_read_template_error;
1580 }
1581
1582 ql_dbg(ql_dbg_p3p, vha, 0xb0bf,
1583 "%s: Reset Seq checksum passed! Get stop, "
1584 "start and init seq offsets\n", __func__);
1585
1586
1587 vha->reset_tmplt.init_offset = vha->reset_tmplt.buff +
1588 vha->reset_tmplt.hdr->init_seq_offset;
1589
1590 vha->reset_tmplt.start_offset = vha->reset_tmplt.buff +
1591 vha->reset_tmplt.hdr->start_seq_offset;
1592
1593 vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff +
1594 vha->reset_tmplt.hdr->hdr_size;
1595
1596 qla8044_dump_reset_seq_hdr(vha);
1597
1598 goto exit_read_reset_template;
1599
1600 exit_read_template_error:
1601 vfree(vha->reset_tmplt.buff);
1602
1603 exit_read_reset_template:
1604 return;
1605 }
1606
1607 void
1608 qla8044_set_idc_dontreset(struct scsi_qla_host *vha)
1609 {
1610 uint32_t idc_ctrl;
1611 struct qla_hw_data *ha = vha->hw;
1612
1613 idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
1614 idc_ctrl |= DONTRESET_BIT0;
1615 ql_dbg(ql_dbg_p3p, vha, 0xb0c0,
1616 "%s: idc_ctrl = %d\n", __func__, idc_ctrl);
1617 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
1618 }
1619
1620 static inline void
1621 qla8044_set_rst_ready(struct scsi_qla_host *vha)
1622 {
1623 uint32_t drv_state;
1624 struct qla_hw_data *ha = vha->hw;
1625
1626 drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
1627
1628
1629
1630 drv_state |= (1 << ha->portnum);
1631
1632 ql_log(ql_log_info, vha, 0xb0c1,
1633 "%s(%ld): drv_state: 0x%08x\n",
1634 __func__, vha->host_no, drv_state);
1635 qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
1636 }
1637
1638
1639
1640
1641
1642
1643
1644 static void
1645 qla8044_need_reset_handler(struct scsi_qla_host *vha)
1646 {
1647 uint32_t dev_state = 0, drv_state, drv_active;
1648 unsigned long reset_timeout;
1649 struct qla_hw_data *ha = vha->hw;
1650
1651 ql_log(ql_log_fatal, vha, 0xb0c2,
1652 "%s: Performing ISP error recovery\n", __func__);
1653
1654 if (vha->flags.online) {
1655 qla8044_idc_unlock(ha);
1656 qla2x00_abort_isp_cleanup(vha);
1657 ha->isp_ops->get_flash_version(vha, vha->req->ring);
1658 ha->isp_ops->nvram_config(vha);
1659 qla8044_idc_lock(ha);
1660 }
1661
1662 dev_state = qla8044_rd_direct(vha,
1663 QLA8044_CRB_DEV_STATE_INDEX);
1664 drv_state = qla8044_rd_direct(vha,
1665 QLA8044_CRB_DRV_STATE_INDEX);
1666 drv_active = qla8044_rd_direct(vha,
1667 QLA8044_CRB_DRV_ACTIVE_INDEX);
1668
1669 ql_log(ql_log_info, vha, 0xb0c5,
1670 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n",
1671 __func__, vha->host_no, drv_state, drv_active, dev_state);
1672
1673 qla8044_set_rst_ready(vha);
1674
1675
1676 reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
1677
1678 do {
1679 if (time_after_eq(jiffies, reset_timeout)) {
1680 ql_log(ql_log_info, vha, 0xb0c4,
1681 "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n",
1682 __func__, ha->portnum, drv_state, drv_active);
1683 break;
1684 }
1685
1686 qla8044_idc_unlock(ha);
1687 msleep(1000);
1688 qla8044_idc_lock(ha);
1689
1690 dev_state = qla8044_rd_direct(vha,
1691 QLA8044_CRB_DEV_STATE_INDEX);
1692 drv_state = qla8044_rd_direct(vha,
1693 QLA8044_CRB_DRV_STATE_INDEX);
1694 drv_active = qla8044_rd_direct(vha,
1695 QLA8044_CRB_DRV_ACTIVE_INDEX);
1696 } while (((drv_state & drv_active) != drv_active) &&
1697 (dev_state == QLA8XXX_DEV_NEED_RESET));
1698
1699
1700 if (drv_state != drv_active) {
1701 ql_log(ql_log_info, vha, 0xb0c7,
1702 "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n",
1703 __func__, vha->host_no, ha->portnum,
1704 (drv_active ^ drv_state));
1705 drv_active = drv_active & drv_state;
1706 qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
1707 drv_active);
1708 } else {
1709
1710
1711
1712
1713 if ((ha->flags.nic_core_reset_owner) &&
1714 (dev_state == QLA8XXX_DEV_NEED_RESET)) {
1715 ha->flags.nic_core_reset_owner = 0;
1716 qla8044_device_bootstrap(vha);
1717 return;
1718 }
1719 }
1720
1721
1722 if (!(drv_active & (1 << ha->portnum))) {
1723 ha->flags.nic_core_reset_owner = 0;
1724 return;
1725 }
1726
1727
1728
1729
1730
1731 if (ha->flags.nic_core_reset_owner ||
1732 ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) {
1733 ha->flags.nic_core_reset_owner = 0;
1734 qla8044_device_bootstrap(vha);
1735 }
1736 }
1737
1738 static void
1739 qla8044_set_drv_active(struct scsi_qla_host *vha)
1740 {
1741 uint32_t drv_active;
1742 struct qla_hw_data *ha = vha->hw;
1743
1744 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1745
1746
1747
1748 drv_active |= (1 << ha->portnum);
1749
1750 ql_log(ql_log_info, vha, 0xb0c8,
1751 "%s(%ld): drv_active: 0x%08x\n",
1752 __func__, vha->host_no, drv_active);
1753 qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
1754 }
1755
1756 static int
1757 qla8044_check_drv_active(struct scsi_qla_host *vha)
1758 {
1759 uint32_t drv_active;
1760 struct qla_hw_data *ha = vha->hw;
1761
1762 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1763 if (drv_active & (1 << ha->portnum))
1764 return QLA_SUCCESS;
1765 else
1766 return QLA_TEST_FAILED;
1767 }
1768
1769 static void
1770 qla8044_clear_idc_dontreset(struct scsi_qla_host *vha)
1771 {
1772 uint32_t idc_ctrl;
1773 struct qla_hw_data *ha = vha->hw;
1774
1775 idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
1776 idc_ctrl &= ~DONTRESET_BIT0;
1777 ql_log(ql_log_info, vha, 0xb0c9,
1778 "%s: idc_ctrl = %d\n", __func__,
1779 idc_ctrl);
1780 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
1781 }
1782
1783 static int
1784 qla8044_set_idc_ver(struct scsi_qla_host *vha)
1785 {
1786 int idc_ver;
1787 uint32_t drv_active;
1788 int rval = QLA_SUCCESS;
1789 struct qla_hw_data *ha = vha->hw;
1790
1791 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1792 if (drv_active == (1 << ha->portnum)) {
1793 idc_ver = qla8044_rd_direct(vha,
1794 QLA8044_CRB_DRV_IDC_VERSION_INDEX);
1795 idc_ver &= (~0xFF);
1796 idc_ver |= QLA8044_IDC_VER_MAJ_VALUE;
1797 qla8044_wr_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX,
1798 idc_ver);
1799 ql_log(ql_log_info, vha, 0xb0ca,
1800 "%s: IDC version updated to %d\n",
1801 __func__, idc_ver);
1802 } else {
1803 idc_ver = qla8044_rd_direct(vha,
1804 QLA8044_CRB_DRV_IDC_VERSION_INDEX);
1805 idc_ver &= 0xFF;
1806 if (QLA8044_IDC_VER_MAJ_VALUE != idc_ver) {
1807 ql_log(ql_log_info, vha, 0xb0cb,
1808 "%s: qla4xxx driver IDC version %d "
1809 "is not compatible with IDC version %d "
1810 "of other drivers!\n",
1811 __func__, QLA8044_IDC_VER_MAJ_VALUE,
1812 idc_ver);
1813 rval = QLA_FUNCTION_FAILED;
1814 goto exit_set_idc_ver;
1815 }
1816 }
1817
1818
1819 idc_ver = qla8044_rd_reg(ha, QLA8044_CRB_IDC_VER_MINOR);
1820 idc_ver &= ~(0x03 << (ha->portnum * 2));
1821 idc_ver |= (QLA8044_IDC_VER_MIN_VALUE << (ha->portnum * 2));
1822 qla8044_wr_reg(ha, QLA8044_CRB_IDC_VER_MINOR, idc_ver);
1823
1824 exit_set_idc_ver:
1825 return rval;
1826 }
1827
1828 static int
1829 qla8044_update_idc_reg(struct scsi_qla_host *vha)
1830 {
1831 uint32_t drv_active;
1832 int rval = QLA_SUCCESS;
1833 struct qla_hw_data *ha = vha->hw;
1834
1835 if (vha->flags.init_done)
1836 goto exit_update_idc_reg;
1837
1838 qla8044_idc_lock(ha);
1839 qla8044_set_drv_active(vha);
1840
1841 drv_active = qla8044_rd_direct(vha,
1842 QLA8044_CRB_DRV_ACTIVE_INDEX);
1843
1844
1845
1846 if ((drv_active == (1 << ha->portnum)) && !ql2xdontresethba)
1847 qla8044_clear_idc_dontreset(vha);
1848
1849 rval = qla8044_set_idc_ver(vha);
1850 if (rval == QLA_FUNCTION_FAILED)
1851 qla8044_clear_drv_active(ha);
1852 qla8044_idc_unlock(ha);
1853
1854 exit_update_idc_reg:
1855 return rval;
1856 }
1857
1858
1859
1860
1861
1862 static void
1863 qla8044_need_qsnt_handler(struct scsi_qla_host *vha)
1864 {
1865 unsigned long qsnt_timeout;
1866 uint32_t drv_state, drv_active, dev_state;
1867 struct qla_hw_data *ha = vha->hw;
1868
1869 if (vha->flags.online)
1870 qla2x00_quiesce_io(vha);
1871 else
1872 return;
1873
1874 qla8044_set_qsnt_ready(vha);
1875
1876
1877 qsnt_timeout = jiffies + (QSNT_ACK_TOV * HZ);
1878 drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
1879 drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
1880
1881
1882
1883 drv_active = drv_active << 1;
1884
1885 while (drv_state != drv_active) {
1886 if (time_after_eq(jiffies, qsnt_timeout)) {
1887
1888
1889
1890 clear_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1891 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1892 QLA8XXX_DEV_READY);
1893 qla8044_clear_qsnt_ready(vha);
1894 ql_log(ql_log_info, vha, 0xb0cc,
1895 "Timeout waiting for quiescent ack!!!\n");
1896 return;
1897 }
1898 qla8044_idc_unlock(ha);
1899 msleep(1000);
1900 qla8044_idc_lock(ha);
1901
1902 drv_state = qla8044_rd_direct(vha,
1903 QLA8044_CRB_DRV_STATE_INDEX);
1904 drv_active = qla8044_rd_direct(vha,
1905 QLA8044_CRB_DRV_ACTIVE_INDEX);
1906 drv_active = drv_active << 1;
1907 }
1908
1909
1910 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
1911
1912 if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
1913 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
1914 QLA8XXX_DEV_QUIESCENT);
1915 ql_log(ql_log_info, vha, 0xb0cd,
1916 "%s: HW State: QUIESCENT\n", __func__);
1917 }
1918 }
1919
1920
1921
1922
1923
1924
1925
1926 int
1927 qla8044_device_state_handler(struct scsi_qla_host *vha)
1928 {
1929 uint32_t dev_state;
1930 int rval = QLA_SUCCESS;
1931 unsigned long dev_init_timeout;
1932 struct qla_hw_data *ha = vha->hw;
1933
1934 rval = qla8044_update_idc_reg(vha);
1935 if (rval == QLA_FUNCTION_FAILED)
1936 goto exit_error;
1937
1938 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
1939 ql_dbg(ql_dbg_p3p, vha, 0xb0ce,
1940 "Device state is 0x%x = %s\n",
1941 dev_state, qdev_state(dev_state));
1942
1943
1944 dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
1945
1946 qla8044_idc_lock(ha);
1947
1948 while (1) {
1949 if (time_after_eq(jiffies, dev_init_timeout)) {
1950 if (qla8044_check_drv_active(vha) == QLA_SUCCESS) {
1951 ql_log(ql_log_warn, vha, 0xb0cf,
1952 "%s: Device Init Failed 0x%x = %s\n",
1953 QLA2XXX_DRIVER_NAME, dev_state,
1954 qdev_state(dev_state));
1955 qla8044_wr_direct(vha,
1956 QLA8044_CRB_DEV_STATE_INDEX,
1957 QLA8XXX_DEV_FAILED);
1958 }
1959 }
1960
1961 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
1962 ql_log(ql_log_info, vha, 0xb0d0,
1963 "Device state is 0x%x = %s\n",
1964 dev_state, qdev_state(dev_state));
1965
1966
1967 switch (dev_state) {
1968 case QLA8XXX_DEV_READY:
1969 ha->flags.nic_core_reset_owner = 0;
1970 goto exit;
1971 case QLA8XXX_DEV_COLD:
1972 rval = qla8044_device_bootstrap(vha);
1973 break;
1974 case QLA8XXX_DEV_INITIALIZING:
1975 qla8044_idc_unlock(ha);
1976 msleep(1000);
1977 qla8044_idc_lock(ha);
1978 break;
1979 case QLA8XXX_DEV_NEED_RESET:
1980
1981
1982
1983 qla8044_need_reset_handler(vha);
1984 break;
1985 case QLA8XXX_DEV_NEED_QUIESCENT:
1986
1987 qla8044_need_qsnt_handler(vha);
1988
1989
1990 dev_init_timeout = jiffies +
1991 (ha->fcoe_reset_timeout * HZ);
1992 break;
1993 case QLA8XXX_DEV_QUIESCENT:
1994 ql_log(ql_log_info, vha, 0xb0d1,
1995 "HW State: QUIESCENT\n");
1996
1997 qla8044_idc_unlock(ha);
1998 msleep(1000);
1999 qla8044_idc_lock(ha);
2000
2001
2002 dev_init_timeout = jiffies +
2003 (ha->fcoe_reset_timeout * HZ);
2004 break;
2005 case QLA8XXX_DEV_FAILED:
2006 ha->flags.nic_core_reset_owner = 0;
2007 qla8044_idc_unlock(ha);
2008 qla8xxx_dev_failed_handler(vha);
2009 rval = QLA_FUNCTION_FAILED;
2010 qla8044_idc_lock(ha);
2011 goto exit;
2012 default:
2013 qla8044_idc_unlock(ha);
2014 qla8xxx_dev_failed_handler(vha);
2015 rval = QLA_FUNCTION_FAILED;
2016 qla8044_idc_lock(ha);
2017 goto exit;
2018 }
2019 }
2020 exit:
2021 qla8044_idc_unlock(ha);
2022
2023 exit_error:
2024 return rval;
2025 }
2026
2027
2028
2029
2030
2031
2032
2033 static int
2034 qla8044_check_temp(struct scsi_qla_host *vha)
2035 {
2036 uint32_t temp, temp_state, temp_val;
2037 int status = QLA_SUCCESS;
2038
2039 temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
2040 temp_state = qla82xx_get_temp_state(temp);
2041 temp_val = qla82xx_get_temp_val(temp);
2042
2043 if (temp_state == QLA82XX_TEMP_PANIC) {
2044 ql_log(ql_log_warn, vha, 0xb0d2,
2045 "Device temperature %d degrees C"
2046 " exceeds maximum allowed. Hardware has been shut"
2047 " down\n", temp_val);
2048 status = QLA_FUNCTION_FAILED;
2049 return status;
2050 } else if (temp_state == QLA82XX_TEMP_WARN) {
2051 ql_log(ql_log_warn, vha, 0xb0d3,
2052 "Device temperature %d"
2053 " degrees C exceeds operating range."
2054 " Immediate action needed.\n", temp_val);
2055 }
2056 return 0;
2057 }
2058
2059 int qla8044_read_temperature(scsi_qla_host_t *vha)
2060 {
2061 uint32_t temp;
2062
2063 temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
2064 return qla82xx_get_temp_val(temp);
2065 }
2066
2067
2068
2069
2070
2071
2072
2073 int
2074 qla8044_check_fw_alive(struct scsi_qla_host *vha)
2075 {
2076 uint32_t fw_heartbeat_counter;
2077 uint32_t halt_status1, halt_status2;
2078 int status = QLA_SUCCESS;
2079
2080 fw_heartbeat_counter = qla8044_rd_direct(vha,
2081 QLA8044_PEG_ALIVE_COUNTER_INDEX);
2082
2083
2084 if (fw_heartbeat_counter == 0xffffffff) {
2085 ql_dbg(ql_dbg_p3p, vha, 0xb0d4,
2086 "scsi%ld: %s: Device in frozen "
2087 "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
2088 vha->host_no, __func__);
2089 return status;
2090 }
2091
2092 if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
2093 vha->seconds_since_last_heartbeat++;
2094
2095 if (vha->seconds_since_last_heartbeat == 2) {
2096 vha->seconds_since_last_heartbeat = 0;
2097 halt_status1 = qla8044_rd_direct(vha,
2098 QLA8044_PEG_HALT_STATUS1_INDEX);
2099 halt_status2 = qla8044_rd_direct(vha,
2100 QLA8044_PEG_HALT_STATUS2_INDEX);
2101
2102 ql_log(ql_log_info, vha, 0xb0d5,
2103 "scsi(%ld): %s, ISP8044 "
2104 "Dumping hw/fw registers:\n"
2105 " PEG_HALT_STATUS1: 0x%x, "
2106 "PEG_HALT_STATUS2: 0x%x,\n",
2107 vha->host_no, __func__, halt_status1,
2108 halt_status2);
2109 status = QLA_FUNCTION_FAILED;
2110 }
2111 } else
2112 vha->seconds_since_last_heartbeat = 0;
2113
2114 vha->fw_heartbeat_counter = fw_heartbeat_counter;
2115 return status;
2116 }
2117
2118 void
2119 qla8044_watchdog(struct scsi_qla_host *vha)
2120 {
2121 uint32_t dev_state, halt_status;
2122 int halt_status_unrecoverable = 0;
2123 struct qla_hw_data *ha = vha->hw;
2124
2125
2126 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2127 test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
2128 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
2129
2130 if (qla8044_check_fw_alive(vha)) {
2131 ha->flags.isp82xx_fw_hung = 1;
2132 ql_log(ql_log_warn, vha, 0xb10a,
2133 "Firmware hung.\n");
2134 qla82xx_clear_pending_mbx(vha);
2135 }
2136
2137 if (qla8044_check_temp(vha)) {
2138 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
2139 ha->flags.isp82xx_fw_hung = 1;
2140 qla2xxx_wake_dpc(vha);
2141 } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
2142 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
2143 ql_log(ql_log_info, vha, 0xb0d6,
2144 "%s: HW State: NEED RESET!\n",
2145 __func__);
2146 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2147 qla2xxx_wake_dpc(vha);
2148 } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
2149 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
2150 ql_log(ql_log_info, vha, 0xb0d7,
2151 "%s: HW State: NEED QUIES detected!\n",
2152 __func__);
2153 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
2154 qla2xxx_wake_dpc(vha);
2155 } else {
2156
2157 if (ha->flags.isp82xx_fw_hung) {
2158 halt_status = qla8044_rd_direct(vha,
2159 QLA8044_PEG_HALT_STATUS1_INDEX);
2160 if (halt_status &
2161 QLA8044_HALT_STATUS_FW_RESET) {
2162 ql_log(ql_log_fatal, vha,
2163 0xb0d8, "%s: Firmware "
2164 "error detected device "
2165 "is being reset\n",
2166 __func__);
2167 } else if (halt_status &
2168 QLA8044_HALT_STATUS_UNRECOVERABLE) {
2169 halt_status_unrecoverable = 1;
2170 }
2171
2172
2173
2174
2175 if (halt_status_unrecoverable) {
2176 set_bit(ISP_UNRECOVERABLE,
2177 &vha->dpc_flags);
2178 } else {
2179 if (dev_state ==
2180 QLA8XXX_DEV_QUIESCENT) {
2181 set_bit(FCOE_CTX_RESET_NEEDED,
2182 &vha->dpc_flags);
2183 ql_log(ql_log_info, vha, 0xb0d9,
2184 "%s: FW CONTEXT Reset "
2185 "needed!\n", __func__);
2186 } else {
2187 ql_log(ql_log_info, vha,
2188 0xb0da, "%s: "
2189 "detect abort needed\n",
2190 __func__);
2191 set_bit(ISP_ABORT_NEEDED,
2192 &vha->dpc_flags);
2193 }
2194 }
2195 qla2xxx_wake_dpc(vha);
2196 }
2197 }
2198
2199 }
2200 }
2201
2202 static int
2203 qla8044_minidump_process_control(struct scsi_qla_host *vha,
2204 struct qla8044_minidump_entry_hdr *entry_hdr)
2205 {
2206 struct qla8044_minidump_entry_crb *crb_entry;
2207 uint32_t read_value, opcode, poll_time, addr, index;
2208 uint32_t crb_addr, rval = QLA_SUCCESS;
2209 unsigned long wtime;
2210 struct qla8044_minidump_template_hdr *tmplt_hdr;
2211 int i;
2212 struct qla_hw_data *ha = vha->hw;
2213
2214 ql_dbg(ql_dbg_p3p, vha, 0xb0dd, "Entering fn: %s\n", __func__);
2215 tmplt_hdr = (struct qla8044_minidump_template_hdr *)
2216 ha->md_tmplt_hdr;
2217 crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr;
2218
2219 crb_addr = crb_entry->addr;
2220 for (i = 0; i < crb_entry->op_count; i++) {
2221 opcode = crb_entry->crb_ctrl.opcode;
2222
2223 if (opcode & QLA82XX_DBG_OPCODE_WR) {
2224 qla8044_wr_reg_indirect(vha, crb_addr,
2225 crb_entry->value_1);
2226 }
2227
2228 if (opcode & QLA82XX_DBG_OPCODE_RW) {
2229 qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2230 qla8044_wr_reg_indirect(vha, crb_addr, read_value);
2231 }
2232
2233 if (opcode & QLA82XX_DBG_OPCODE_AND) {
2234 qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2235 read_value &= crb_entry->value_2;
2236 if (opcode & QLA82XX_DBG_OPCODE_OR) {
2237 read_value |= crb_entry->value_3;
2238 opcode &= ~QLA82XX_DBG_OPCODE_OR;
2239 }
2240 qla8044_wr_reg_indirect(vha, crb_addr, read_value);
2241 }
2242 if (opcode & QLA82XX_DBG_OPCODE_OR) {
2243 qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2244 read_value |= crb_entry->value_3;
2245 qla8044_wr_reg_indirect(vha, crb_addr, read_value);
2246 }
2247 if (opcode & QLA82XX_DBG_OPCODE_POLL) {
2248 poll_time = crb_entry->crb_strd.poll_timeout;
2249 wtime = jiffies + poll_time;
2250 qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
2251
2252 do {
2253 if ((read_value & crb_entry->value_2) ==
2254 crb_entry->value_1) {
2255 break;
2256 } else if (time_after_eq(jiffies, wtime)) {
2257
2258 rval = QLA_FUNCTION_FAILED;
2259 break;
2260 } else {
2261 qla8044_rd_reg_indirect(vha,
2262 crb_addr, &read_value);
2263 }
2264 } while (1);
2265 }
2266
2267 if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
2268 if (crb_entry->crb_strd.state_index_a) {
2269 index = crb_entry->crb_strd.state_index_a;
2270 addr = tmplt_hdr->saved_state_array[index];
2271 } else {
2272 addr = crb_addr;
2273 }
2274
2275 qla8044_rd_reg_indirect(vha, addr, &read_value);
2276 index = crb_entry->crb_ctrl.state_index_v;
2277 tmplt_hdr->saved_state_array[index] = read_value;
2278 }
2279
2280 if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
2281 if (crb_entry->crb_strd.state_index_a) {
2282 index = crb_entry->crb_strd.state_index_a;
2283 addr = tmplt_hdr->saved_state_array[index];
2284 } else {
2285 addr = crb_addr;
2286 }
2287
2288 if (crb_entry->crb_ctrl.state_index_v) {
2289 index = crb_entry->crb_ctrl.state_index_v;
2290 read_value =
2291 tmplt_hdr->saved_state_array[index];
2292 } else {
2293 read_value = crb_entry->value_1;
2294 }
2295
2296 qla8044_wr_reg_indirect(vha, addr, read_value);
2297 }
2298
2299 if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
2300 index = crb_entry->crb_ctrl.state_index_v;
2301 read_value = tmplt_hdr->saved_state_array[index];
2302 read_value <<= crb_entry->crb_ctrl.shl;
2303 read_value >>= crb_entry->crb_ctrl.shr;
2304 if (crb_entry->value_2)
2305 read_value &= crb_entry->value_2;
2306 read_value |= crb_entry->value_3;
2307 read_value += crb_entry->value_1;
2308 tmplt_hdr->saved_state_array[index] = read_value;
2309 }
2310 crb_addr += crb_entry->crb_strd.addr_stride;
2311 }
2312 return rval;
2313 }
2314
2315 static void
2316 qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha,
2317 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2318 {
2319 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
2320 struct qla8044_minidump_entry_crb *crb_hdr;
2321 uint32_t *data_ptr = *d_ptr;
2322
2323 ql_dbg(ql_dbg_p3p, vha, 0xb0de, "Entering fn: %s\n", __func__);
2324 crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr;
2325 r_addr = crb_hdr->addr;
2326 r_stride = crb_hdr->crb_strd.addr_stride;
2327 loop_cnt = crb_hdr->op_count;
2328
2329 for (i = 0; i < loop_cnt; i++) {
2330 qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2331 *data_ptr++ = r_addr;
2332 *data_ptr++ = r_value;
2333 r_addr += r_stride;
2334 }
2335 *d_ptr = data_ptr;
2336 }
2337
2338 static int
2339 qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,
2340 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2341 {
2342 uint32_t r_addr, r_value, r_data;
2343 uint32_t i, j, loop_cnt;
2344 struct qla8044_minidump_entry_rdmem *m_hdr;
2345 unsigned long flags;
2346 uint32_t *data_ptr = *d_ptr;
2347 struct qla_hw_data *ha = vha->hw;
2348
2349 ql_dbg(ql_dbg_p3p, vha, 0xb0df, "Entering fn: %s\n", __func__);
2350 m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr;
2351 r_addr = m_hdr->read_addr;
2352 loop_cnt = m_hdr->read_data_size/16;
2353
2354 ql_dbg(ql_dbg_p3p, vha, 0xb0f0,
2355 "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
2356 __func__, r_addr, m_hdr->read_data_size);
2357
2358 if (r_addr & 0xf) {
2359 ql_dbg(ql_dbg_p3p, vha, 0xb0f1,
2360 "[%s]: Read addr 0x%x not 16 bytes aligned\n",
2361 __func__, r_addr);
2362 return QLA_FUNCTION_FAILED;
2363 }
2364
2365 if (m_hdr->read_data_size % 16) {
2366 ql_dbg(ql_dbg_p3p, vha, 0xb0f2,
2367 "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
2368 __func__, m_hdr->read_data_size);
2369 return QLA_FUNCTION_FAILED;
2370 }
2371
2372 ql_dbg(ql_dbg_p3p, vha, 0xb0f3,
2373 "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
2374 __func__, r_addr, m_hdr->read_data_size, loop_cnt);
2375
2376 write_lock_irqsave(&ha->hw_lock, flags);
2377 for (i = 0; i < loop_cnt; i++) {
2378 qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, r_addr);
2379 r_value = 0;
2380 qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, r_value);
2381 r_value = MIU_TA_CTL_ENABLE;
2382 qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
2383 r_value = MIU_TA_CTL_START_ENABLE;
2384 qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
2385
2386 for (j = 0; j < MAX_CTL_CHECK; j++) {
2387 qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
2388 &r_value);
2389 if ((r_value & MIU_TA_CTL_BUSY) == 0)
2390 break;
2391 }
2392
2393 if (j >= MAX_CTL_CHECK) {
2394 write_unlock_irqrestore(&ha->hw_lock, flags);
2395 return QLA_SUCCESS;
2396 }
2397
2398 for (j = 0; j < 4; j++) {
2399 qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_RDDATA[j],
2400 &r_data);
2401 *data_ptr++ = r_data;
2402 }
2403
2404 r_addr += 16;
2405 }
2406 write_unlock_irqrestore(&ha->hw_lock, flags);
2407
2408 ql_dbg(ql_dbg_p3p, vha, 0xb0f4,
2409 "Leaving fn: %s datacount: 0x%x\n",
2410 __func__, (loop_cnt * 16));
2411
2412 *d_ptr = data_ptr;
2413 return QLA_SUCCESS;
2414 }
2415
2416
2417 static uint32_t
2418 qla8044_minidump_process_rdrom(struct scsi_qla_host *vha,
2419 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2420 {
2421 uint32_t fl_addr, u32_count, rval;
2422 struct qla8044_minidump_entry_rdrom *rom_hdr;
2423 uint32_t *data_ptr = *d_ptr;
2424
2425 rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr;
2426 fl_addr = rom_hdr->read_addr;
2427 u32_count = (rom_hdr->read_data_size)/sizeof(uint32_t);
2428
2429 ql_dbg(ql_dbg_p3p, vha, 0xb0f5, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
2430 __func__, fl_addr, u32_count);
2431
2432 rval = qla8044_lockless_flash_read_u32(vha, fl_addr,
2433 (u8 *)(data_ptr), u32_count);
2434
2435 if (rval != QLA_SUCCESS) {
2436 ql_log(ql_log_fatal, vha, 0xb0f6,
2437 "%s: Flash Read Error,Count=%d\n", __func__, u32_count);
2438 return QLA_FUNCTION_FAILED;
2439 } else {
2440 data_ptr += u32_count;
2441 *d_ptr = data_ptr;
2442 return QLA_SUCCESS;
2443 }
2444 }
2445
2446 static void
2447 qla8044_mark_entry_skipped(struct scsi_qla_host *vha,
2448 struct qla8044_minidump_entry_hdr *entry_hdr, int index)
2449 {
2450 entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
2451
2452 ql_log(ql_log_info, vha, 0xb0f7,
2453 "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
2454 vha->host_no, index, entry_hdr->entry_type,
2455 entry_hdr->d_ctrl.entry_capture_mask);
2456 }
2457
2458 static int
2459 qla8044_minidump_process_l2tag(struct scsi_qla_host *vha,
2460 struct qla8044_minidump_entry_hdr *entry_hdr,
2461 uint32_t **d_ptr)
2462 {
2463 uint32_t addr, r_addr, c_addr, t_r_addr;
2464 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
2465 unsigned long p_wait, w_time, p_mask;
2466 uint32_t c_value_w, c_value_r;
2467 struct qla8044_minidump_entry_cache *cache_hdr;
2468 int rval = QLA_FUNCTION_FAILED;
2469 uint32_t *data_ptr = *d_ptr;
2470
2471 ql_dbg(ql_dbg_p3p, vha, 0xb0f8, "Entering fn: %s\n", __func__);
2472 cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
2473
2474 loop_count = cache_hdr->op_count;
2475 r_addr = cache_hdr->read_addr;
2476 c_addr = cache_hdr->control_addr;
2477 c_value_w = cache_hdr->cache_ctrl.write_value;
2478
2479 t_r_addr = cache_hdr->tag_reg_addr;
2480 t_value = cache_hdr->addr_ctrl.init_tag_value;
2481 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
2482 p_wait = cache_hdr->cache_ctrl.poll_wait;
2483 p_mask = cache_hdr->cache_ctrl.poll_mask;
2484
2485 for (i = 0; i < loop_count; i++) {
2486 qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
2487 if (c_value_w)
2488 qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
2489
2490 if (p_mask) {
2491 w_time = jiffies + p_wait;
2492 do {
2493 qla8044_rd_reg_indirect(vha, c_addr,
2494 &c_value_r);
2495 if ((c_value_r & p_mask) == 0) {
2496 break;
2497 } else if (time_after_eq(jiffies, w_time)) {
2498
2499 return rval;
2500 }
2501 } while (1);
2502 }
2503
2504 addr = r_addr;
2505 for (k = 0; k < r_cnt; k++) {
2506 qla8044_rd_reg_indirect(vha, addr, &r_value);
2507 *data_ptr++ = r_value;
2508 addr += cache_hdr->read_ctrl.read_addr_stride;
2509 }
2510 t_value += cache_hdr->addr_ctrl.tag_value_stride;
2511 }
2512 *d_ptr = data_ptr;
2513 return QLA_SUCCESS;
2514 }
2515
2516 static void
2517 qla8044_minidump_process_l1cache(struct scsi_qla_host *vha,
2518 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2519 {
2520 uint32_t addr, r_addr, c_addr, t_r_addr;
2521 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
2522 uint32_t c_value_w;
2523 struct qla8044_minidump_entry_cache *cache_hdr;
2524 uint32_t *data_ptr = *d_ptr;
2525
2526 cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
2527 loop_count = cache_hdr->op_count;
2528 r_addr = cache_hdr->read_addr;
2529 c_addr = cache_hdr->control_addr;
2530 c_value_w = cache_hdr->cache_ctrl.write_value;
2531
2532 t_r_addr = cache_hdr->tag_reg_addr;
2533 t_value = cache_hdr->addr_ctrl.init_tag_value;
2534 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
2535
2536 for (i = 0; i < loop_count; i++) {
2537 qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
2538 qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
2539 addr = r_addr;
2540 for (k = 0; k < r_cnt; k++) {
2541 qla8044_rd_reg_indirect(vha, addr, &r_value);
2542 *data_ptr++ = r_value;
2543 addr += cache_hdr->read_ctrl.read_addr_stride;
2544 }
2545 t_value += cache_hdr->addr_ctrl.tag_value_stride;
2546 }
2547 *d_ptr = data_ptr;
2548 }
2549
2550 static void
2551 qla8044_minidump_process_rdocm(struct scsi_qla_host *vha,
2552 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2553 {
2554 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
2555 struct qla8044_minidump_entry_rdocm *ocm_hdr;
2556 uint32_t *data_ptr = *d_ptr;
2557 struct qla_hw_data *ha = vha->hw;
2558
2559 ql_dbg(ql_dbg_p3p, vha, 0xb0f9, "Entering fn: %s\n", __func__);
2560
2561 ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr;
2562 r_addr = ocm_hdr->read_addr;
2563 r_stride = ocm_hdr->read_addr_stride;
2564 loop_cnt = ocm_hdr->op_count;
2565
2566 ql_dbg(ql_dbg_p3p, vha, 0xb0fa,
2567 "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
2568 __func__, r_addr, r_stride, loop_cnt);
2569
2570 for (i = 0; i < loop_cnt; i++) {
2571 r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
2572 *data_ptr++ = r_value;
2573 r_addr += r_stride;
2574 }
2575 ql_dbg(ql_dbg_p3p, vha, 0xb0fb, "Leaving fn: %s datacount: 0x%lx\n",
2576 __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t)));
2577
2578 *d_ptr = data_ptr;
2579 }
2580
2581 static void
2582 qla8044_minidump_process_rdmux(struct scsi_qla_host *vha,
2583 struct qla8044_minidump_entry_hdr *entry_hdr,
2584 uint32_t **d_ptr)
2585 {
2586 uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value = 0;
2587 struct qla8044_minidump_entry_mux *mux_hdr;
2588 uint32_t *data_ptr = *d_ptr;
2589
2590 ql_dbg(ql_dbg_p3p, vha, 0xb0fc, "Entering fn: %s\n", __func__);
2591
2592 mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr;
2593 r_addr = mux_hdr->read_addr;
2594 s_addr = mux_hdr->select_addr;
2595 s_stride = mux_hdr->select_value_stride;
2596 s_value = mux_hdr->select_value;
2597 loop_cnt = mux_hdr->op_count;
2598
2599 for (i = 0; i < loop_cnt; i++) {
2600 qla8044_wr_reg_indirect(vha, s_addr, s_value);
2601 qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2602 *data_ptr++ = s_value;
2603 *data_ptr++ = r_value;
2604 s_value += s_stride;
2605 }
2606 *d_ptr = data_ptr;
2607 }
2608
2609 static void
2610 qla8044_minidump_process_queue(struct scsi_qla_host *vha,
2611 struct qla8044_minidump_entry_hdr *entry_hdr,
2612 uint32_t **d_ptr)
2613 {
2614 uint32_t s_addr, r_addr;
2615 uint32_t r_stride, r_value, r_cnt, qid = 0;
2616 uint32_t i, k, loop_cnt;
2617 struct qla8044_minidump_entry_queue *q_hdr;
2618 uint32_t *data_ptr = *d_ptr;
2619
2620 ql_dbg(ql_dbg_p3p, vha, 0xb0fd, "Entering fn: %s\n", __func__);
2621 q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr;
2622 s_addr = q_hdr->select_addr;
2623 r_cnt = q_hdr->rd_strd.read_addr_cnt;
2624 r_stride = q_hdr->rd_strd.read_addr_stride;
2625 loop_cnt = q_hdr->op_count;
2626
2627 for (i = 0; i < loop_cnt; i++) {
2628 qla8044_wr_reg_indirect(vha, s_addr, qid);
2629 r_addr = q_hdr->read_addr;
2630 for (k = 0; k < r_cnt; k++) {
2631 qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2632 *data_ptr++ = r_value;
2633 r_addr += r_stride;
2634 }
2635 qid += q_hdr->q_strd.queue_id_stride;
2636 }
2637 *d_ptr = data_ptr;
2638 }
2639
2640
2641 static uint32_t
2642 qla8044_minidump_process_pollrd(struct scsi_qla_host *vha,
2643 struct qla8044_minidump_entry_hdr *entry_hdr,
2644 uint32_t **d_ptr)
2645 {
2646 uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
2647 uint16_t s_stride, i;
2648 struct qla8044_minidump_entry_pollrd *pollrd_hdr;
2649 uint32_t *data_ptr = *d_ptr;
2650
2651 pollrd_hdr = (struct qla8044_minidump_entry_pollrd *) entry_hdr;
2652 s_addr = pollrd_hdr->select_addr;
2653 r_addr = pollrd_hdr->read_addr;
2654 s_value = pollrd_hdr->select_value;
2655 s_stride = pollrd_hdr->select_value_stride;
2656
2657 poll_wait = pollrd_hdr->poll_wait;
2658 poll_mask = pollrd_hdr->poll_mask;
2659
2660 for (i = 0; i < pollrd_hdr->op_count; i++) {
2661 qla8044_wr_reg_indirect(vha, s_addr, s_value);
2662 poll_wait = pollrd_hdr->poll_wait;
2663 while (1) {
2664 qla8044_rd_reg_indirect(vha, s_addr, &r_value);
2665 if ((r_value & poll_mask) != 0) {
2666 break;
2667 } else {
2668 usleep_range(1000, 1100);
2669 if (--poll_wait == 0) {
2670 ql_log(ql_log_fatal, vha, 0xb0fe,
2671 "%s: TIMEOUT\n", __func__);
2672 goto error;
2673 }
2674 }
2675 }
2676 qla8044_rd_reg_indirect(vha, r_addr, &r_value);
2677 *data_ptr++ = s_value;
2678 *data_ptr++ = r_value;
2679
2680 s_value += s_stride;
2681 }
2682 *d_ptr = data_ptr;
2683 return QLA_SUCCESS;
2684
2685 error:
2686 return QLA_FUNCTION_FAILED;
2687 }
2688
2689 static void
2690 qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha,
2691 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2692 {
2693 uint32_t sel_val1, sel_val2, t_sel_val, data, i;
2694 uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
2695 struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr;
2696 uint32_t *data_ptr = *d_ptr;
2697
2698 rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *) entry_hdr;
2699 sel_val1 = rdmux2_hdr->select_value_1;
2700 sel_val2 = rdmux2_hdr->select_value_2;
2701 sel_addr1 = rdmux2_hdr->select_addr_1;
2702 sel_addr2 = rdmux2_hdr->select_addr_2;
2703 sel_val_mask = rdmux2_hdr->select_value_mask;
2704 read_addr = rdmux2_hdr->read_addr;
2705
2706 for (i = 0; i < rdmux2_hdr->op_count; i++) {
2707 qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1);
2708 t_sel_val = sel_val1 & sel_val_mask;
2709 *data_ptr++ = t_sel_val;
2710
2711 qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
2712 qla8044_rd_reg_indirect(vha, read_addr, &data);
2713
2714 *data_ptr++ = data;
2715
2716 qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2);
2717 t_sel_val = sel_val2 & sel_val_mask;
2718 *data_ptr++ = t_sel_val;
2719
2720 qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
2721 qla8044_rd_reg_indirect(vha, read_addr, &data);
2722
2723 *data_ptr++ = data;
2724
2725 sel_val1 += rdmux2_hdr->select_value_stride;
2726 sel_val2 += rdmux2_hdr->select_value_stride;
2727 }
2728
2729 *d_ptr = data_ptr;
2730 }
2731
2732 static uint32_t
2733 qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha,
2734 struct qla8044_minidump_entry_hdr *entry_hdr,
2735 uint32_t **d_ptr)
2736 {
2737 uint32_t poll_wait, poll_mask, r_value, data;
2738 uint32_t addr_1, addr_2, value_1, value_2;
2739 struct qla8044_minidump_entry_pollrdmwr *poll_hdr;
2740 uint32_t *data_ptr = *d_ptr;
2741
2742 poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *) entry_hdr;
2743 addr_1 = poll_hdr->addr_1;
2744 addr_2 = poll_hdr->addr_2;
2745 value_1 = poll_hdr->value_1;
2746 value_2 = poll_hdr->value_2;
2747 poll_mask = poll_hdr->poll_mask;
2748
2749 qla8044_wr_reg_indirect(vha, addr_1, value_1);
2750
2751 poll_wait = poll_hdr->poll_wait;
2752 while (1) {
2753 qla8044_rd_reg_indirect(vha, addr_1, &r_value);
2754
2755 if ((r_value & poll_mask) != 0) {
2756 break;
2757 } else {
2758 usleep_range(1000, 1100);
2759 if (--poll_wait == 0) {
2760 ql_log(ql_log_fatal, vha, 0xb0ff,
2761 "%s: TIMEOUT\n", __func__);
2762 goto error;
2763 }
2764 }
2765 }
2766
2767 qla8044_rd_reg_indirect(vha, addr_2, &data);
2768 data &= poll_hdr->modify_mask;
2769 qla8044_wr_reg_indirect(vha, addr_2, data);
2770 qla8044_wr_reg_indirect(vha, addr_1, value_2);
2771
2772 poll_wait = poll_hdr->poll_wait;
2773 while (1) {
2774 qla8044_rd_reg_indirect(vha, addr_1, &r_value);
2775
2776 if ((r_value & poll_mask) != 0) {
2777 break;
2778 } else {
2779 usleep_range(1000, 1100);
2780 if (--poll_wait == 0) {
2781 ql_log(ql_log_fatal, vha, 0xb100,
2782 "%s: TIMEOUT2\n", __func__);
2783 goto error;
2784 }
2785 }
2786 }
2787
2788 *data_ptr++ = addr_2;
2789 *data_ptr++ = data;
2790
2791 *d_ptr = data_ptr;
2792
2793 return QLA_SUCCESS;
2794
2795 error:
2796 return QLA_FUNCTION_FAILED;
2797 }
2798
2799 #define ISP8044_PEX_DMA_ENGINE_INDEX 8
2800 #define ISP8044_PEX_DMA_BASE_ADDRESS 0x77320000
2801 #define ISP8044_PEX_DMA_NUM_OFFSET 0x10000UL
2802 #define ISP8044_PEX_DMA_CMD_ADDR_LOW 0x0
2803 #define ISP8044_PEX_DMA_CMD_ADDR_HIGH 0x04
2804 #define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL 0x08
2805
2806 #define ISP8044_PEX_DMA_READ_SIZE (16 * 1024)
2807 #define ISP8044_PEX_DMA_MAX_WAIT (100 * 100)
2808
2809 static int
2810 qla8044_check_dma_engine_state(struct scsi_qla_host *vha)
2811 {
2812 struct qla_hw_data *ha = vha->hw;
2813 int rval = QLA_SUCCESS;
2814 uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
2815 uint64_t dma_base_addr = 0;
2816 struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
2817
2818 tmplt_hdr = ha->md_tmplt_hdr;
2819 dma_eng_num =
2820 tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
2821 dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
2822 (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
2823
2824
2825 rval = qla8044_rd_reg_indirect(vha,
2826 (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
2827 &cmd_sts_and_cntrl);
2828 if (rval)
2829 return QLA_FUNCTION_FAILED;
2830
2831
2832 if (cmd_sts_and_cntrl & BIT_31)
2833 return QLA_SUCCESS;
2834
2835 return QLA_FUNCTION_FAILED;
2836 }
2837
2838 static int
2839 qla8044_start_pex_dma(struct scsi_qla_host *vha,
2840 struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr)
2841 {
2842 struct qla_hw_data *ha = vha->hw;
2843 int rval = QLA_SUCCESS, wait = 0;
2844 uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
2845 uint64_t dma_base_addr = 0;
2846 struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
2847
2848 tmplt_hdr = ha->md_tmplt_hdr;
2849 dma_eng_num =
2850 tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
2851 dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
2852 (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
2853
2854 rval = qla8044_wr_reg_indirect(vha,
2855 dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_LOW,
2856 m_hdr->desc_card_addr);
2857 if (rval)
2858 goto error_exit;
2859
2860 rval = qla8044_wr_reg_indirect(vha,
2861 dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_HIGH, 0);
2862 if (rval)
2863 goto error_exit;
2864
2865 rval = qla8044_wr_reg_indirect(vha,
2866 dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL,
2867 m_hdr->start_dma_cmd);
2868 if (rval)
2869 goto error_exit;
2870
2871
2872 for (wait = 0; wait < ISP8044_PEX_DMA_MAX_WAIT; wait++) {
2873 rval = qla8044_rd_reg_indirect(vha,
2874 (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
2875 &cmd_sts_and_cntrl);
2876 if (rval)
2877 goto error_exit;
2878
2879 if ((cmd_sts_and_cntrl & BIT_1) == 0)
2880 break;
2881
2882 udelay(10);
2883 }
2884
2885
2886 if (wait >= ISP8044_PEX_DMA_MAX_WAIT) {
2887 rval = QLA_FUNCTION_FAILED;
2888 goto error_exit;
2889 }
2890
2891 error_exit:
2892 return rval;
2893 }
2894
2895 static int
2896 qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
2897 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2898 {
2899 struct qla_hw_data *ha = vha->hw;
2900 int rval = QLA_SUCCESS;
2901 struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
2902 uint32_t chunk_size, read_size;
2903 uint8_t *data_ptr = (uint8_t *)*d_ptr;
2904 void *rdmem_buffer = NULL;
2905 dma_addr_t rdmem_dma;
2906 struct qla8044_pex_dma_descriptor dma_desc;
2907
2908 rval = qla8044_check_dma_engine_state(vha);
2909 if (rval != QLA_SUCCESS) {
2910 ql_dbg(ql_dbg_p3p, vha, 0xb147,
2911 "DMA engine not available. Fallback to rdmem-read.\n");
2912 return QLA_FUNCTION_FAILED;
2913 }
2914
2915 m_hdr = (void *)entry_hdr;
2916
2917 rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
2918 ISP8044_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL);
2919 if (!rdmem_buffer) {
2920 ql_dbg(ql_dbg_p3p, vha, 0xb148,
2921 "Unable to allocate rdmem dma buffer\n");
2922 return QLA_FUNCTION_FAILED;
2923 }
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933 dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
2934 dma_desc.cmd.dma_desc_cmd |=
2935 ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
2936
2937 dma_desc.dma_bus_addr = rdmem_dma;
2938 dma_desc.cmd.read_data_size = chunk_size = ISP8044_PEX_DMA_READ_SIZE;
2939 read_size = 0;
2940
2941
2942
2943
2944
2945 while (read_size < m_hdr->read_data_size) {
2946 if (m_hdr->read_data_size - read_size <
2947 ISP8044_PEX_DMA_READ_SIZE) {
2948 chunk_size = (m_hdr->read_data_size - read_size);
2949 dma_desc.cmd.read_data_size = chunk_size;
2950 }
2951
2952 dma_desc.src_addr = m_hdr->read_addr + read_size;
2953
2954
2955 rval = qla8044_ms_mem_write_128b(vha,
2956 m_hdr->desc_card_addr, (uint32_t *)&dma_desc,
2957 (sizeof(struct qla8044_pex_dma_descriptor)/16));
2958 if (rval) {
2959 ql_log(ql_log_warn, vha, 0xb14a,
2960 "%s: Error writing rdmem-dma-init to MS !!!\n",
2961 __func__);
2962 goto error_exit;
2963 }
2964 ql_dbg(ql_dbg_p3p, vha, 0xb14b,
2965 "%s: Dma-descriptor: Instruct for rdmem dma "
2966 "(chunk_size 0x%x).\n", __func__, chunk_size);
2967
2968
2969 rval = qla8044_start_pex_dma(vha, m_hdr);
2970 if (rval)
2971 goto error_exit;
2972
2973 memcpy(data_ptr, rdmem_buffer, chunk_size);
2974 data_ptr += chunk_size;
2975 read_size += chunk_size;
2976 }
2977
2978 *d_ptr = (uint32_t *)data_ptr;
2979
2980 error_exit:
2981 if (rdmem_buffer)
2982 dma_free_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE,
2983 rdmem_buffer, rdmem_dma);
2984
2985 return rval;
2986 }
2987
2988 static uint32_t
2989 qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
2990 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
2991 {
2992 int loop_cnt;
2993 uint32_t addr1, addr2, value, data, temp, wrVal;
2994 uint8_t stride, stride2;
2995 uint16_t count;
2996 uint32_t poll, mask, modify_mask;
2997 uint32_t wait_count = 0;
2998 uint32_t *data_ptr = *d_ptr;
2999 struct qla8044_minidump_entry_rddfe *rddfe;
3000
3001 rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr;
3002
3003 addr1 = rddfe->addr_1;
3004 value = rddfe->value;
3005 stride = rddfe->stride;
3006 stride2 = rddfe->stride2;
3007 count = rddfe->count;
3008
3009 poll = rddfe->poll;
3010 mask = rddfe->mask;
3011 modify_mask = rddfe->modify_mask;
3012
3013 addr2 = addr1 + stride;
3014
3015 for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
3016 qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value));
3017
3018 wait_count = 0;
3019 while (wait_count < poll) {
3020 qla8044_rd_reg_indirect(vha, addr1, &temp);
3021 if ((temp & mask) != 0)
3022 break;
3023 wait_count++;
3024 }
3025
3026 if (wait_count == poll) {
3027 ql_log(ql_log_warn, vha, 0xb153,
3028 "%s: TIMEOUT\n", __func__);
3029 goto error;
3030 } else {
3031 qla8044_rd_reg_indirect(vha, addr2, &temp);
3032 temp = temp & modify_mask;
3033 temp = (temp | ((loop_cnt << 16) | loop_cnt));
3034 wrVal = ((temp << 16) | temp);
3035
3036 qla8044_wr_reg_indirect(vha, addr2, wrVal);
3037 qla8044_wr_reg_indirect(vha, addr1, value);
3038
3039 wait_count = 0;
3040 while (wait_count < poll) {
3041 qla8044_rd_reg_indirect(vha, addr1, &temp);
3042 if ((temp & mask) != 0)
3043 break;
3044 wait_count++;
3045 }
3046 if (wait_count == poll) {
3047 ql_log(ql_log_warn, vha, 0xb154,
3048 "%s: TIMEOUT\n", __func__);
3049 goto error;
3050 }
3051
3052 qla8044_wr_reg_indirect(vha, addr1,
3053 ((0x40000000 | value) + stride2));
3054 wait_count = 0;
3055 while (wait_count < poll) {
3056 qla8044_rd_reg_indirect(vha, addr1, &temp);
3057 if ((temp & mask) != 0)
3058 break;
3059 wait_count++;
3060 }
3061
3062 if (wait_count == poll) {
3063 ql_log(ql_log_warn, vha, 0xb155,
3064 "%s: TIMEOUT\n", __func__);
3065 goto error;
3066 }
3067
3068 qla8044_rd_reg_indirect(vha, addr2, &data);
3069
3070 *data_ptr++ = wrVal;
3071 *data_ptr++ = data;
3072 }
3073
3074 }
3075
3076 *d_ptr = data_ptr;
3077 return QLA_SUCCESS;
3078
3079 error:
3080 return -1;
3081
3082 }
3083
3084 static uint32_t
3085 qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
3086 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
3087 {
3088 int ret = 0;
3089 uint32_t addr1, addr2, value1, value2, data, selVal;
3090 uint8_t stride1, stride2;
3091 uint32_t addr3, addr4, addr5, addr6, addr7;
3092 uint16_t count, loop_cnt;
3093 uint32_t mask;
3094 uint32_t *data_ptr = *d_ptr;
3095
3096 struct qla8044_minidump_entry_rdmdio *rdmdio;
3097
3098 rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr;
3099
3100 addr1 = rdmdio->addr_1;
3101 addr2 = rdmdio->addr_2;
3102 value1 = rdmdio->value_1;
3103 stride1 = rdmdio->stride_1;
3104 stride2 = rdmdio->stride_2;
3105 count = rdmdio->count;
3106
3107 mask = rdmdio->mask;
3108 value2 = rdmdio->value_2;
3109
3110 addr3 = addr1 + stride1;
3111
3112 for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
3113 ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
3114 addr3, mask);
3115 if (ret == -1)
3116 goto error;
3117
3118 addr4 = addr2 - stride1;
3119 ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4,
3120 value2);
3121 if (ret == -1)
3122 goto error;
3123
3124 addr5 = addr2 - (2 * stride1);
3125 ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5,
3126 value1);
3127 if (ret == -1)
3128 goto error;
3129
3130 addr6 = addr2 - (3 * stride1);
3131 ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask,
3132 addr6, 0x2);
3133 if (ret == -1)
3134 goto error;
3135
3136 ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
3137 addr3, mask);
3138 if (ret == -1)
3139 goto error;
3140
3141 addr7 = addr2 - (4 * stride1);
3142 data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr7);
3143 if (data == -1)
3144 goto error;
3145
3146 selVal = (value2 << 18) | (value1 << 2) | 2;
3147
3148 stride2 = rdmdio->stride_2;
3149 *data_ptr++ = selVal;
3150 *data_ptr++ = data;
3151
3152 value1 = value1 + stride2;
3153 *d_ptr = data_ptr;
3154 }
3155
3156 return 0;
3157
3158 error:
3159 return -1;
3160 }
3161
3162 static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
3163 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
3164 {
3165 uint32_t addr1, addr2, value1, value2, poll, r_value;
3166 uint32_t wait_count = 0;
3167 struct qla8044_minidump_entry_pollwr *pollwr_hdr;
3168
3169 pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
3170 addr1 = pollwr_hdr->addr_1;
3171 addr2 = pollwr_hdr->addr_2;
3172 value1 = pollwr_hdr->value_1;
3173 value2 = pollwr_hdr->value_2;
3174
3175 poll = pollwr_hdr->poll;
3176
3177 while (wait_count < poll) {
3178 qla8044_rd_reg_indirect(vha, addr1, &r_value);
3179
3180 if ((r_value & poll) != 0)
3181 break;
3182 wait_count++;
3183 }
3184
3185 if (wait_count == poll) {
3186 ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__);
3187 goto error;
3188 }
3189
3190 qla8044_wr_reg_indirect(vha, addr2, value2);
3191 qla8044_wr_reg_indirect(vha, addr1, value1);
3192
3193 wait_count = 0;
3194 while (wait_count < poll) {
3195 qla8044_rd_reg_indirect(vha, addr1, &r_value);
3196
3197 if ((r_value & poll) != 0)
3198 break;
3199 wait_count++;
3200 }
3201
3202 return QLA_SUCCESS;
3203
3204 error:
3205 return -1;
3206 }
3207
3208
3209
3210
3211
3212
3213 int
3214 qla8044_collect_md_data(struct scsi_qla_host *vha)
3215 {
3216 int num_entry_hdr = 0;
3217 struct qla8044_minidump_entry_hdr *entry_hdr;
3218 struct qla8044_minidump_template_hdr *tmplt_hdr;
3219 uint32_t *data_ptr;
3220 uint32_t data_collected = 0, f_capture_mask;
3221 int i, rval = QLA_FUNCTION_FAILED;
3222 uint64_t now;
3223 uint32_t timestamp, idc_control;
3224 struct qla_hw_data *ha = vha->hw;
3225
3226 if (!ha->md_dump) {
3227 ql_log(ql_log_info, vha, 0xb101,
3228 "%s(%ld) No buffer to dump\n",
3229 __func__, vha->host_no);
3230 return rval;
3231 }
3232
3233 if (ha->fw_dumped) {
3234 ql_log(ql_log_warn, vha, 0xb10d,
3235 "Firmware has been previously dumped (%p) "
3236 "-- ignoring request.\n", ha->fw_dump);
3237 goto md_failed;
3238 }
3239
3240 ha->fw_dumped = false;
3241
3242 if (!ha->md_tmplt_hdr || !ha->md_dump) {
3243 ql_log(ql_log_warn, vha, 0xb10e,
3244 "Memory not allocated for minidump capture\n");
3245 goto md_failed;
3246 }
3247
3248 qla8044_idc_lock(ha);
3249 idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
3250 if (idc_control & GRACEFUL_RESET_BIT1) {
3251 ql_log(ql_log_warn, vha, 0xb112,
3252 "Forced reset from application, "
3253 "ignore minidump capture\n");
3254 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
3255 (idc_control & ~GRACEFUL_RESET_BIT1));
3256 qla8044_idc_unlock(ha);
3257
3258 goto md_failed;
3259 }
3260 qla8044_idc_unlock(ha);
3261
3262 if (qla82xx_validate_template_chksum(vha)) {
3263 ql_log(ql_log_info, vha, 0xb109,
3264 "Template checksum validation error\n");
3265 goto md_failed;
3266 }
3267
3268 tmplt_hdr = (struct qla8044_minidump_template_hdr *)
3269 ha->md_tmplt_hdr;
3270 data_ptr = (uint32_t *)((uint8_t *)ha->md_dump);
3271 num_entry_hdr = tmplt_hdr->num_of_entries;
3272
3273 ql_dbg(ql_dbg_p3p, vha, 0xb11a,
3274 "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
3275
3276 f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
3277
3278
3279 if ((f_capture_mask & 0x3) != 0x3) {
3280 ql_log(ql_log_warn, vha, 0xb10f,
3281 "Minimum required capture mask[0x%x] level not set\n",
3282 f_capture_mask);
3283
3284 }
3285 tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
3286 ql_log(ql_log_info, vha, 0xb102,
3287 "[%s]: starting data ptr: %p\n",
3288 __func__, data_ptr);
3289 ql_log(ql_log_info, vha, 0xb10b,
3290 "[%s]: no of entry headers in Template: 0x%x\n",
3291 __func__, num_entry_hdr);
3292 ql_log(ql_log_info, vha, 0xb10c,
3293 "[%s]: Total_data_size 0x%x, %d obtained\n",
3294 __func__, ha->md_dump_size, ha->md_dump_size);
3295
3296
3297 now = get_jiffies_64();
3298 timestamp = (u32)(jiffies_to_msecs(now) / 1000);
3299 tmplt_hdr->driver_timestamp = timestamp;
3300
3301 entry_hdr = (struct qla8044_minidump_entry_hdr *)
3302 (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
3303 tmplt_hdr->saved_state_array[QLA8044_SS_OCM_WNDREG_INDEX] =
3304 tmplt_hdr->ocm_window_reg[ha->portnum];
3305
3306
3307 for (i = 0; i < num_entry_hdr; i++) {
3308 if (data_collected > ha->md_dump_size) {
3309 ql_log(ql_log_info, vha, 0xb103,
3310 "Data collected: [0x%x], "
3311 "Total Dump size: [0x%x]\n",
3312 data_collected, ha->md_dump_size);
3313 return rval;
3314 }
3315
3316 if (!(entry_hdr->d_ctrl.entry_capture_mask &
3317 ql2xmdcapmask)) {
3318 entry_hdr->d_ctrl.driver_flags |=
3319 QLA82XX_DBG_SKIPPED_FLAG;
3320 goto skip_nxt_entry;
3321 }
3322
3323 ql_dbg(ql_dbg_p3p, vha, 0xb104,
3324 "Data collected: [0x%x], Dump size left:[0x%x]\n",
3325 data_collected,
3326 (ha->md_dump_size - data_collected));
3327
3328
3329
3330
3331 switch (entry_hdr->entry_type) {
3332 case QLA82XX_RDEND:
3333 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3334 break;
3335 case QLA82XX_CNTRL:
3336 rval = qla8044_minidump_process_control(vha,
3337 entry_hdr);
3338 if (rval != QLA_SUCCESS) {
3339 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3340 goto md_failed;
3341 }
3342 break;
3343 case QLA82XX_RDCRB:
3344 qla8044_minidump_process_rdcrb(vha,
3345 entry_hdr, &data_ptr);
3346 break;
3347 case QLA82XX_RDMEM:
3348 rval = qla8044_minidump_pex_dma_read(vha,
3349 entry_hdr, &data_ptr);
3350 if (rval != QLA_SUCCESS) {
3351 rval = qla8044_minidump_process_rdmem(vha,
3352 entry_hdr, &data_ptr);
3353 if (rval != QLA_SUCCESS) {
3354 qla8044_mark_entry_skipped(vha,
3355 entry_hdr, i);
3356 goto md_failed;
3357 }
3358 }
3359 break;
3360 case QLA82XX_BOARD:
3361 case QLA82XX_RDROM:
3362 rval = qla8044_minidump_process_rdrom(vha,
3363 entry_hdr, &data_ptr);
3364 if (rval != QLA_SUCCESS) {
3365 qla8044_mark_entry_skipped(vha,
3366 entry_hdr, i);
3367 }
3368 break;
3369 case QLA82XX_L2DTG:
3370 case QLA82XX_L2ITG:
3371 case QLA82XX_L2DAT:
3372 case QLA82XX_L2INS:
3373 rval = qla8044_minidump_process_l2tag(vha,
3374 entry_hdr, &data_ptr);
3375 if (rval != QLA_SUCCESS) {
3376 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3377 goto md_failed;
3378 }
3379 break;
3380 case QLA8044_L1DTG:
3381 case QLA8044_L1ITG:
3382 case QLA82XX_L1DAT:
3383 case QLA82XX_L1INS:
3384 qla8044_minidump_process_l1cache(vha,
3385 entry_hdr, &data_ptr);
3386 break;
3387 case QLA82XX_RDOCM:
3388 qla8044_minidump_process_rdocm(vha,
3389 entry_hdr, &data_ptr);
3390 break;
3391 case QLA82XX_RDMUX:
3392 qla8044_minidump_process_rdmux(vha,
3393 entry_hdr, &data_ptr);
3394 break;
3395 case QLA82XX_QUEUE:
3396 qla8044_minidump_process_queue(vha,
3397 entry_hdr, &data_ptr);
3398 break;
3399 case QLA8044_POLLRD:
3400 rval = qla8044_minidump_process_pollrd(vha,
3401 entry_hdr, &data_ptr);
3402 if (rval != QLA_SUCCESS)
3403 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3404 break;
3405 case QLA8044_RDMUX2:
3406 qla8044_minidump_process_rdmux2(vha,
3407 entry_hdr, &data_ptr);
3408 break;
3409 case QLA8044_POLLRDMWR:
3410 rval = qla8044_minidump_process_pollrdmwr(vha,
3411 entry_hdr, &data_ptr);
3412 if (rval != QLA_SUCCESS)
3413 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3414 break;
3415 case QLA8044_RDDFE:
3416 rval = qla8044_minidump_process_rddfe(vha, entry_hdr,
3417 &data_ptr);
3418 if (rval != QLA_SUCCESS)
3419 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3420 break;
3421 case QLA8044_RDMDIO:
3422 rval = qla8044_minidump_process_rdmdio(vha, entry_hdr,
3423 &data_ptr);
3424 if (rval != QLA_SUCCESS)
3425 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3426 break;
3427 case QLA8044_POLLWR:
3428 rval = qla8044_minidump_process_pollwr(vha, entry_hdr,
3429 &data_ptr);
3430 if (rval != QLA_SUCCESS)
3431 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3432 break;
3433 case QLA82XX_RDNOP:
3434 default:
3435 qla8044_mark_entry_skipped(vha, entry_hdr, i);
3436 break;
3437 }
3438
3439 data_collected = (uint8_t *)data_ptr -
3440 (uint8_t *)((uint8_t *)ha->md_dump);
3441 skip_nxt_entry:
3442
3443
3444
3445 entry_hdr = (struct qla8044_minidump_entry_hdr *)
3446 (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
3447 }
3448
3449 if (data_collected != ha->md_dump_size) {
3450 ql_log(ql_log_info, vha, 0xb105,
3451 "Dump data mismatch: Data collected: "
3452 "[0x%x], total_data_size:[0x%x]\n",
3453 data_collected, ha->md_dump_size);
3454 rval = QLA_FUNCTION_FAILED;
3455 goto md_failed;
3456 }
3457
3458 ql_log(ql_log_info, vha, 0xb110,
3459 "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
3460 vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
3461 ha->fw_dumped = true;
3462 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
3463
3464
3465 ql_log(ql_log_info, vha, 0xb106,
3466 "Leaving fn: %s Last entry: 0x%x\n",
3467 __func__, i);
3468 md_failed:
3469 return rval;
3470 }
3471
3472 void
3473 qla8044_get_minidump(struct scsi_qla_host *vha)
3474 {
3475 struct qla_hw_data *ha = vha->hw;
3476
3477 if (!qla8044_collect_md_data(vha)) {
3478 ha->fw_dumped = true;
3479 ha->prev_minidump_failed = 0;
3480 } else {
3481 ql_log(ql_log_fatal, vha, 0xb0db,
3482 "%s: Unable to collect minidump\n",
3483 __func__);
3484 ha->prev_minidump_failed = 1;
3485 }
3486 }
3487
3488 static int
3489 qla8044_poll_flash_status_reg(struct scsi_qla_host *vha)
3490 {
3491 uint32_t flash_status;
3492 int retries = QLA8044_FLASH_READ_RETRY_COUNT;
3493 int ret_val = QLA_SUCCESS;
3494
3495 while (retries--) {
3496 ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_STATUS,
3497 &flash_status);
3498 if (ret_val) {
3499 ql_log(ql_log_warn, vha, 0xb13c,
3500 "%s: Failed to read FLASH_STATUS reg.\n",
3501 __func__);
3502 break;
3503 }
3504 if ((flash_status & QLA8044_FLASH_STATUS_READY) ==
3505 QLA8044_FLASH_STATUS_READY)
3506 break;
3507 msleep(QLA8044_FLASH_STATUS_REG_POLL_DELAY);
3508 }
3509
3510 if (!retries)
3511 ret_val = QLA_FUNCTION_FAILED;
3512
3513 return ret_val;
3514 }
3515
3516 static int
3517 qla8044_write_flash_status_reg(struct scsi_qla_host *vha,
3518 uint32_t data)
3519 {
3520 int ret_val = QLA_SUCCESS;
3521 uint32_t cmd;
3522
3523 cmd = vha->hw->fdt_wrt_sts_reg_cmd;
3524
3525 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3526 QLA8044_FLASH_STATUS_WRITE_DEF_SIG | cmd);
3527 if (ret_val) {
3528 ql_log(ql_log_warn, vha, 0xb125,
3529 "%s: Failed to write to FLASH_ADDR.\n", __func__);
3530 goto exit_func;
3531 }
3532
3533 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, data);
3534 if (ret_val) {
3535 ql_log(ql_log_warn, vha, 0xb126,
3536 "%s: Failed to write to FLASH_WRDATA.\n", __func__);
3537 goto exit_func;
3538 }
3539
3540 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3541 QLA8044_FLASH_SECOND_ERASE_MS_VAL);
3542 if (ret_val) {
3543 ql_log(ql_log_warn, vha, 0xb127,
3544 "%s: Failed to write to FLASH_CONTROL.\n", __func__);
3545 goto exit_func;
3546 }
3547
3548 ret_val = qla8044_poll_flash_status_reg(vha);
3549 if (ret_val)
3550 ql_log(ql_log_warn, vha, 0xb128,
3551 "%s: Error polling flash status reg.\n", __func__);
3552
3553 exit_func:
3554 return ret_val;
3555 }
3556
3557
3558
3559
3560 static int
3561 qla8044_unprotect_flash(scsi_qla_host_t *vha)
3562 {
3563 int ret_val;
3564 struct qla_hw_data *ha = vha->hw;
3565
3566 ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable);
3567 if (ret_val)
3568 ql_log(ql_log_warn, vha, 0xb139,
3569 "%s: Write flash status failed.\n", __func__);
3570
3571 return ret_val;
3572 }
3573
3574
3575
3576
3577 static int
3578 qla8044_protect_flash(scsi_qla_host_t *vha)
3579 {
3580 int ret_val;
3581 struct qla_hw_data *ha = vha->hw;
3582
3583 ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable);
3584 if (ret_val)
3585 ql_log(ql_log_warn, vha, 0xb13b,
3586 "%s: Write flash status failed.\n", __func__);
3587
3588 return ret_val;
3589 }
3590
3591
3592 static int
3593 qla8044_erase_flash_sector(struct scsi_qla_host *vha,
3594 uint32_t sector_start_addr)
3595 {
3596 uint32_t reversed_addr;
3597 int ret_val = QLA_SUCCESS;
3598
3599 ret_val = qla8044_poll_flash_status_reg(vha);
3600 if (ret_val) {
3601 ql_log(ql_log_warn, vha, 0xb12e,
3602 "%s: Poll flash status after erase failed..\n", __func__);
3603 }
3604
3605 reversed_addr = (((sector_start_addr & 0xFF) << 16) |
3606 (sector_start_addr & 0xFF00) |
3607 ((sector_start_addr & 0xFF0000) >> 16));
3608
3609 ret_val = qla8044_wr_reg_indirect(vha,
3610 QLA8044_FLASH_WRDATA, reversed_addr);
3611 if (ret_val) {
3612 ql_log(ql_log_warn, vha, 0xb12f,
3613 "%s: Failed to write to FLASH_WRDATA.\n", __func__);
3614 }
3615 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3616 QLA8044_FLASH_ERASE_SIG | vha->hw->fdt_erase_cmd);
3617 if (ret_val) {
3618 ql_log(ql_log_warn, vha, 0xb130,
3619 "%s: Failed to write to FLASH_ADDR.\n", __func__);
3620 }
3621 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3622 QLA8044_FLASH_LAST_ERASE_MS_VAL);
3623 if (ret_val) {
3624 ql_log(ql_log_warn, vha, 0xb131,
3625 "%s: Failed write to FLASH_CONTROL.\n", __func__);
3626 }
3627 ret_val = qla8044_poll_flash_status_reg(vha);
3628 if (ret_val) {
3629 ql_log(ql_log_warn, vha, 0xb132,
3630 "%s: Poll flash status failed.\n", __func__);
3631 }
3632
3633
3634 return ret_val;
3635 }
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648 static int
3649 qla8044_flash_write_u32(struct scsi_qla_host *vha, uint32_t addr,
3650 uint32_t *p_data)
3651 {
3652 int ret_val = QLA_SUCCESS;
3653
3654 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3655 0x00800000 | (addr >> 2));
3656 if (ret_val) {
3657 ql_log(ql_log_warn, vha, 0xb134,
3658 "%s: Failed write to FLASH_ADDR.\n", __func__);
3659 goto exit_func;
3660 }
3661 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *p_data);
3662 if (ret_val) {
3663 ql_log(ql_log_warn, vha, 0xb135,
3664 "%s: Failed write to FLASH_WRDATA.\n", __func__);
3665 goto exit_func;
3666 }
3667 ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 0x3D);
3668 if (ret_val) {
3669 ql_log(ql_log_warn, vha, 0xb136,
3670 "%s: Failed write to FLASH_CONTROL.\n", __func__);
3671 goto exit_func;
3672 }
3673 ret_val = qla8044_poll_flash_status_reg(vha);
3674 if (ret_val) {
3675 ql_log(ql_log_warn, vha, 0xb137,
3676 "%s: Poll flash status failed.\n", __func__);
3677 }
3678
3679 exit_func:
3680 return ret_val;
3681 }
3682
3683 static int
3684 qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
3685 uint32_t faddr, uint32_t dwords)
3686 {
3687 int ret = QLA_FUNCTION_FAILED;
3688 uint32_t spi_val;
3689
3690 if (dwords < QLA8044_MIN_OPTROM_BURST_DWORDS ||
3691 dwords > QLA8044_MAX_OPTROM_BURST_DWORDS) {
3692 ql_dbg(ql_dbg_user, vha, 0xb123,
3693 "Got unsupported dwords = 0x%x.\n",
3694 dwords);
3695 return QLA_FUNCTION_FAILED;
3696 }
3697
3698 qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val);
3699 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
3700 spi_val | QLA8044_FLASH_SPI_CTL);
3701 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3702 QLA8044_FLASH_FIRST_TEMP_VAL);
3703
3704
3705 ret = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA,
3706 *dwptr++);
3707 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3708 QLA8044_FLASH_FIRST_MS_PATTERN);
3709
3710 ret = qla8044_poll_flash_status_reg(vha);
3711 if (ret) {
3712 ql_log(ql_log_warn, vha, 0xb124,
3713 "%s: Failed.\n", __func__);
3714 goto exit_func;
3715 }
3716
3717 dwords--;
3718
3719 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3720 QLA8044_FLASH_SECOND_TEMP_VAL);
3721
3722
3723
3724 while (dwords != 1) {
3725 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
3726 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3727 QLA8044_FLASH_SECOND_MS_PATTERN);
3728 ret = qla8044_poll_flash_status_reg(vha);
3729 if (ret) {
3730 ql_log(ql_log_warn, vha, 0xb129,
3731 "%s: Failed.\n", __func__);
3732 goto exit_func;
3733 }
3734 dwords--;
3735 }
3736
3737 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
3738 QLA8044_FLASH_FIRST_TEMP_VAL | (faddr >> 2));
3739
3740
3741 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
3742 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
3743 QLA8044_FLASH_LAST_MS_PATTERN);
3744 ret = qla8044_poll_flash_status_reg(vha);
3745 if (ret) {
3746 ql_log(ql_log_warn, vha, 0xb12a,
3747 "%s: Failed.\n", __func__);
3748 goto exit_func;
3749 }
3750 qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_STATUS, &spi_val);
3751
3752 if ((spi_val & QLA8044_FLASH_SPI_CTL) == QLA8044_FLASH_SPI_CTL) {
3753 ql_log(ql_log_warn, vha, 0xb12b,
3754 "%s: Failed.\n", __func__);
3755 spi_val = 0;
3756
3757 qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
3758 &spi_val);
3759 qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
3760 spi_val | QLA8044_FLASH_SPI_CTL);
3761 }
3762 exit_func:
3763 return ret;
3764 }
3765
3766 static int
3767 qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
3768 uint32_t faddr, uint32_t dwords)
3769 {
3770 int ret = QLA_FUNCTION_FAILED;
3771 uint32_t liter;
3772
3773 for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
3774 ret = qla8044_flash_write_u32(vha, faddr, dwptr);
3775 if (ret) {
3776 ql_dbg(ql_dbg_p3p, vha, 0xb141,
3777 "%s: flash address=%x data=%x.\n", __func__,
3778 faddr, *dwptr);
3779 break;
3780 }
3781 }
3782
3783 return ret;
3784 }
3785
3786 int
3787 qla8044_write_optrom_data(struct scsi_qla_host *vha, void *buf,
3788 uint32_t offset, uint32_t length)
3789 {
3790 int rval = QLA_FUNCTION_FAILED, i, burst_iter_count;
3791 int dword_count, erase_sec_count;
3792 uint32_t erase_offset;
3793 uint8_t *p_cache, *p_src;
3794
3795 erase_offset = offset;
3796
3797 p_cache = kcalloc(length, sizeof(uint8_t), GFP_KERNEL);
3798 if (!p_cache)
3799 return QLA_FUNCTION_FAILED;
3800
3801 memcpy(p_cache, buf, length);
3802 p_src = p_cache;
3803 dword_count = length / sizeof(uint32_t);
3804
3805
3806
3807 burst_iter_count = dword_count / QLA8044_MAX_OPTROM_BURST_DWORDS;
3808 erase_sec_count = length / QLA8044_SECTOR_SIZE;
3809
3810
3811 scsi_block_requests(vha->host);
3812
3813 qla8044_flash_lock(vha);
3814 qla8044_unprotect_flash(vha);
3815
3816
3817 for (i = 0; i < erase_sec_count; i++) {
3818 rval = qla8044_erase_flash_sector(vha, erase_offset);
3819 ql_dbg(ql_dbg_user, vha, 0xb138,
3820 "Done erase of sector=0x%x.\n",
3821 erase_offset);
3822 if (rval) {
3823 ql_log(ql_log_warn, vha, 0xb121,
3824 "Failed to erase the sector having address: "
3825 "0x%x.\n", erase_offset);
3826 goto out;
3827 }
3828 erase_offset += QLA8044_SECTOR_SIZE;
3829 }
3830 ql_dbg(ql_dbg_user, vha, 0xb13f,
3831 "Got write for addr = 0x%x length=0x%x.\n",
3832 offset, length);
3833
3834 for (i = 0; i < burst_iter_count; i++) {
3835
3836
3837 rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src,
3838 offset, QLA8044_MAX_OPTROM_BURST_DWORDS);
3839 if (rval) {
3840
3841 ql_log(ql_log_warn, vha, 0xb122,
3842 "Failed to write flash in buffer mode, "
3843 "Reverting to slow-write.\n");
3844 rval = qla8044_write_flash_dword_mode(vha,
3845 (uint32_t *)p_src, offset,
3846 QLA8044_MAX_OPTROM_BURST_DWORDS);
3847 }
3848 p_src += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
3849 offset += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
3850 }
3851 ql_dbg(ql_dbg_user, vha, 0xb133,
3852 "Done writing.\n");
3853
3854 out:
3855 qla8044_protect_flash(vha);
3856 qla8044_flash_unlock(vha);
3857 scsi_unblock_requests(vha->host);
3858 kfree(p_cache);
3859
3860 return rval;
3861 }
3862
3863 #define LEG_INT_PTR_B31 (1 << 31)
3864 #define LEG_INT_PTR_B30 (1 << 30)
3865 #define PF_BITS_MASK (0xF << 16)
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875 irqreturn_t
3876 qla8044_intr_handler(int irq, void *dev_id)
3877 {
3878 scsi_qla_host_t *vha;
3879 struct qla_hw_data *ha;
3880 struct rsp_que *rsp;
3881 struct device_reg_82xx __iomem *reg;
3882 int status = 0;
3883 unsigned long flags;
3884 unsigned long iter;
3885 uint32_t stat;
3886 uint16_t mb[8];
3887 uint32_t leg_int_ptr = 0, pf_bit;
3888
3889 rsp = (struct rsp_que *) dev_id;
3890 if (!rsp) {
3891 ql_log(ql_log_info, NULL, 0xb143,
3892 "%s(): NULL response queue pointer\n", __func__);
3893 return IRQ_NONE;
3894 }
3895 ha = rsp->hw;
3896 vha = pci_get_drvdata(ha->pdev);
3897
3898 if (unlikely(pci_channel_offline(ha->pdev)))
3899 return IRQ_HANDLED;
3900
3901 leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
3902
3903
3904 if (!(leg_int_ptr & (LEG_INT_PTR_B31))) {
3905 ql_dbg(ql_dbg_p3p, vha, 0xb144,
3906 "%s: Legacy Interrupt Bit 31 not set, "
3907 "spurious interrupt!\n", __func__);
3908 return IRQ_NONE;
3909 }
3910
3911 pf_bit = ha->portnum << 16;
3912
3913 if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) {
3914 ql_dbg(ql_dbg_p3p, vha, 0xb145,
3915 "%s: Incorrect function ID 0x%x in "
3916 "legacy interrupt register, "
3917 "ha->pf_bit = 0x%x\n", __func__,
3918 (leg_int_ptr & (PF_BITS_MASK)), pf_bit);
3919 return IRQ_NONE;
3920 }
3921
3922
3923
3924
3925
3926 qla8044_wr_reg(ha, LEG_INTR_TRIG_OFFSET, 0);
3927 do {
3928 leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
3929 if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit)
3930 break;
3931 } while (leg_int_ptr & (LEG_INT_PTR_B30));
3932
3933 reg = &ha->iobase->isp82;
3934 spin_lock_irqsave(&ha->hardware_lock, flags);
3935 for (iter = 1; iter--; ) {
3936
3937 if (rd_reg_dword(®->host_int)) {
3938 stat = rd_reg_dword(®->host_status);
3939 if ((stat & HSRX_RISC_INT) == 0)
3940 break;
3941
3942 switch (stat & 0xff) {
3943 case 0x1:
3944 case 0x2:
3945 case 0x10:
3946 case 0x11:
3947 qla82xx_mbx_completion(vha, MSW(stat));
3948 status |= MBX_INTERRUPT;
3949 break;
3950 case 0x12:
3951 mb[0] = MSW(stat);
3952 mb[1] = rd_reg_word(®->mailbox_out[1]);
3953 mb[2] = rd_reg_word(®->mailbox_out[2]);
3954 mb[3] = rd_reg_word(®->mailbox_out[3]);
3955 qla2x00_async_event(vha, rsp, mb);
3956 break;
3957 case 0x13:
3958 qla24xx_process_response_queue(vha, rsp);
3959 break;
3960 default:
3961 ql_dbg(ql_dbg_p3p, vha, 0xb146,
3962 "Unrecognized interrupt type "
3963 "(%d).\n", stat & 0xff);
3964 break;
3965 }
3966 }
3967 wrt_reg_dword(®->host_int, 0);
3968 }
3969
3970 qla2x00_handle_mbx_completion(ha, status);
3971 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3972
3973 return IRQ_HANDLED;
3974 }
3975
3976 static int
3977 qla8044_idc_dontreset(struct qla_hw_data *ha)
3978 {
3979 uint32_t idc_ctrl;
3980
3981 idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
3982 return idc_ctrl & DONTRESET_BIT0;
3983 }
3984
3985 static void
3986 qla8044_clear_rst_ready(scsi_qla_host_t *vha)
3987 {
3988 uint32_t drv_state;
3989
3990 drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
3991
3992
3993
3994
3995
3996
3997 drv_state &= ~(1 << vha->hw->portnum);
3998
3999 ql_dbg(ql_dbg_p3p, vha, 0xb13d,
4000 "drv_state: 0x%08x\n", drv_state);
4001 qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
4002 }
4003
4004 int
4005 qla8044_abort_isp(scsi_qla_host_t *vha)
4006 {
4007 int rval;
4008 uint32_t dev_state;
4009 struct qla_hw_data *ha = vha->hw;
4010
4011 qla8044_idc_lock(ha);
4012 dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
4013
4014 if (ql2xdontresethba)
4015 qla8044_set_idc_dontreset(vha);
4016
4017
4018
4019
4020
4021
4022
4023 if (dev_state == QLA8XXX_DEV_READY) {
4024
4025
4026 if (qla8044_idc_dontreset(ha) == DONTRESET_BIT0) {
4027 ql_dbg(ql_dbg_p3p, vha, 0xb13e,
4028 "Reset recovery disabled\n");
4029 rval = QLA_FUNCTION_FAILED;
4030 goto exit_isp_reset;
4031 }
4032
4033 ql_dbg(ql_dbg_p3p, vha, 0xb140,
4034 "HW State: NEED RESET\n");
4035 qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
4036 QLA8XXX_DEV_NEED_RESET);
4037 }
4038
4039
4040
4041
4042 qla83xx_reset_ownership(vha);
4043
4044 qla8044_idc_unlock(ha);
4045 rval = qla8044_device_state_handler(vha);
4046 qla8044_idc_lock(ha);
4047 qla8044_clear_rst_ready(vha);
4048
4049 exit_isp_reset:
4050 qla8044_idc_unlock(ha);
4051 if (rval == QLA_SUCCESS) {
4052 ha->flags.isp82xx_fw_hung = 0;
4053 ha->flags.nic_core_reset_hdlr_active = 0;
4054 rval = qla82xx_restart_isp(vha);
4055 }
4056
4057 return rval;
4058 }
4059
4060 void
4061 qla8044_fw_dump(scsi_qla_host_t *vha)
4062 {
4063 struct qla_hw_data *ha = vha->hw;
4064
4065 if (!ha->allow_cna_fw_dump)
4066 return;
4067
4068 scsi_block_requests(vha->host);
4069 ha->flags.isp82xx_no_md_cap = 1;
4070 qla8044_idc_lock(ha);
4071 qla82xx_set_reset_owner(vha);
4072 qla8044_idc_unlock(ha);
4073 qla2x00_wait_for_chip_reset(vha);
4074 scsi_unblock_requests(vha->host);
4075 }