0001
0002
0003
0004
0005
0006
0007 #include <net/ip.h>
0008
0009 #include "qlcnic.h"
0010 #include "qlcnic_hdr.h"
0011 #include "qlcnic_83xx_hw.h"
0012 #include "qlcnic_hw.h"
0013
0014 #define QLC_83XX_MINIDUMP_FLASH 0x520000
0015 #define QLC_83XX_OCM_INDEX 3
0016 #define QLC_83XX_PCI_INDEX 0
0017 #define QLC_83XX_DMA_ENGINE_INDEX 8
0018
0019 static const u32 qlcnic_ms_read_data[] = {
0020 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
0021 };
0022
0023 #define QLCNIC_DUMP_WCRB BIT_0
0024 #define QLCNIC_DUMP_RWCRB BIT_1
0025 #define QLCNIC_DUMP_ANDCRB BIT_2
0026 #define QLCNIC_DUMP_ORCRB BIT_3
0027 #define QLCNIC_DUMP_POLLCRB BIT_4
0028 #define QLCNIC_DUMP_RD_SAVE BIT_5
0029 #define QLCNIC_DUMP_WRT_SAVED BIT_6
0030 #define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
0031 #define QLCNIC_DUMP_SKIP BIT_7
0032
0033 #define QLCNIC_DUMP_MASK_MAX 0xff
0034
0035 struct qlcnic_pex_dma_descriptor {
0036 u32 read_data_size;
0037 u32 dma_desc_cmd;
0038 u32 src_addr_low;
0039 u32 src_addr_high;
0040 u32 dma_bus_addr_low;
0041 u32 dma_bus_addr_high;
0042 u32 rsvd[6];
0043 } __packed;
0044
0045 struct qlcnic_common_entry_hdr {
0046 u32 type;
0047 u32 offset;
0048 u32 cap_size;
0049 #if defined(__LITTLE_ENDIAN)
0050 u8 mask;
0051 u8 rsvd[2];
0052 u8 flags;
0053 #else
0054 u8 flags;
0055 u8 rsvd[2];
0056 u8 mask;
0057 #endif
0058 } __packed;
0059
0060 struct __crb {
0061 u32 addr;
0062 #if defined(__LITTLE_ENDIAN)
0063 u8 stride;
0064 u8 rsvd1[3];
0065 #else
0066 u8 rsvd1[3];
0067 u8 stride;
0068 #endif
0069 u32 data_size;
0070 u32 no_ops;
0071 u32 rsvd2[4];
0072 } __packed;
0073
0074 struct __ctrl {
0075 u32 addr;
0076 #if defined(__LITTLE_ENDIAN)
0077 u8 stride;
0078 u8 index_a;
0079 u16 timeout;
0080 #else
0081 u16 timeout;
0082 u8 index_a;
0083 u8 stride;
0084 #endif
0085 u32 data_size;
0086 u32 no_ops;
0087 #if defined(__LITTLE_ENDIAN)
0088 u8 opcode;
0089 u8 index_v;
0090 u8 shl_val;
0091 u8 shr_val;
0092 #else
0093 u8 shr_val;
0094 u8 shl_val;
0095 u8 index_v;
0096 u8 opcode;
0097 #endif
0098 u32 val1;
0099 u32 val2;
0100 u32 val3;
0101 } __packed;
0102
0103 struct __cache {
0104 u32 addr;
0105 #if defined(__LITTLE_ENDIAN)
0106 u16 stride;
0107 u16 init_tag_val;
0108 #else
0109 u16 init_tag_val;
0110 u16 stride;
0111 #endif
0112 u32 size;
0113 u32 no_ops;
0114 u32 ctrl_addr;
0115 u32 ctrl_val;
0116 u32 read_addr;
0117 #if defined(__LITTLE_ENDIAN)
0118 u8 read_addr_stride;
0119 u8 read_addr_num;
0120 u8 rsvd1[2];
0121 #else
0122 u8 rsvd1[2];
0123 u8 read_addr_num;
0124 u8 read_addr_stride;
0125 #endif
0126 } __packed;
0127
0128 struct __ocm {
0129 u8 rsvd[8];
0130 u32 size;
0131 u32 no_ops;
0132 u8 rsvd1[8];
0133 u32 read_addr;
0134 u32 read_addr_stride;
0135 } __packed;
0136
0137 struct __mem {
0138 u32 desc_card_addr;
0139 u32 dma_desc_cmd;
0140 u32 start_dma_cmd;
0141 u32 rsvd[3];
0142 u32 addr;
0143 u32 size;
0144 } __packed;
0145
0146 struct __mux {
0147 u32 addr;
0148 u8 rsvd[4];
0149 u32 size;
0150 u32 no_ops;
0151 u32 val;
0152 u32 val_stride;
0153 u32 read_addr;
0154 u8 rsvd2[4];
0155 } __packed;
0156
0157 struct __queue {
0158 u32 sel_addr;
0159 #if defined(__LITTLE_ENDIAN)
0160 u16 stride;
0161 u8 rsvd[2];
0162 #else
0163 u8 rsvd[2];
0164 u16 stride;
0165 #endif
0166 u32 size;
0167 u32 no_ops;
0168 u8 rsvd2[8];
0169 u32 read_addr;
0170 #if defined(__LITTLE_ENDIAN)
0171 u8 read_addr_stride;
0172 u8 read_addr_cnt;
0173 u8 rsvd3[2];
0174 #else
0175 u8 rsvd3[2];
0176 u8 read_addr_cnt;
0177 u8 read_addr_stride;
0178 #endif
0179 } __packed;
0180
0181 struct __pollrd {
0182 u32 sel_addr;
0183 u32 read_addr;
0184 u32 sel_val;
0185 #if defined(__LITTLE_ENDIAN)
0186 u16 sel_val_stride;
0187 u16 no_ops;
0188 #else
0189 u16 no_ops;
0190 u16 sel_val_stride;
0191 #endif
0192 u32 poll_wait;
0193 u32 poll_mask;
0194 u32 data_size;
0195 u8 rsvd[4];
0196 } __packed;
0197
0198 struct __mux2 {
0199 u32 sel_addr1;
0200 u32 sel_addr2;
0201 u32 sel_val1;
0202 u32 sel_val2;
0203 u32 no_ops;
0204 u32 sel_val_mask;
0205 u32 read_addr;
0206 #if defined(__LITTLE_ENDIAN)
0207 u8 sel_val_stride;
0208 u8 data_size;
0209 u8 rsvd[2];
0210 #else
0211 u8 rsvd[2];
0212 u8 data_size;
0213 u8 sel_val_stride;
0214 #endif
0215 } __packed;
0216
0217 struct __pollrdmwr {
0218 u32 addr1;
0219 u32 addr2;
0220 u32 val1;
0221 u32 val2;
0222 u32 poll_wait;
0223 u32 poll_mask;
0224 u32 mod_mask;
0225 u32 data_size;
0226 } __packed;
0227
0228 struct qlcnic_dump_entry {
0229 struct qlcnic_common_entry_hdr hdr;
0230 union {
0231 struct __crb crb;
0232 struct __cache cache;
0233 struct __ocm ocm;
0234 struct __mem mem;
0235 struct __mux mux;
0236 struct __queue que;
0237 struct __ctrl ctrl;
0238 struct __pollrdmwr pollrdmwr;
0239 struct __mux2 mux2;
0240 struct __pollrd pollrd;
0241 } region;
0242 } __packed;
0243
0244 enum qlcnic_minidump_opcode {
0245 QLCNIC_DUMP_NOP = 0,
0246 QLCNIC_DUMP_READ_CRB = 1,
0247 QLCNIC_DUMP_READ_MUX = 2,
0248 QLCNIC_DUMP_QUEUE = 3,
0249 QLCNIC_DUMP_BRD_CONFIG = 4,
0250 QLCNIC_DUMP_READ_OCM = 6,
0251 QLCNIC_DUMP_PEG_REG = 7,
0252 QLCNIC_DUMP_L1_DTAG = 8,
0253 QLCNIC_DUMP_L1_ITAG = 9,
0254 QLCNIC_DUMP_L1_DATA = 11,
0255 QLCNIC_DUMP_L1_INST = 12,
0256 QLCNIC_DUMP_L2_DTAG = 21,
0257 QLCNIC_DUMP_L2_ITAG = 22,
0258 QLCNIC_DUMP_L2_DATA = 23,
0259 QLCNIC_DUMP_L2_INST = 24,
0260 QLCNIC_DUMP_POLL_RD = 35,
0261 QLCNIC_READ_MUX2 = 36,
0262 QLCNIC_READ_POLLRDMWR = 37,
0263 QLCNIC_DUMP_READ_ROM = 71,
0264 QLCNIC_DUMP_READ_MEM = 72,
0265 QLCNIC_DUMP_READ_CTRL = 98,
0266 QLCNIC_DUMP_TLHDR = 99,
0267 QLCNIC_DUMP_RDEND = 255
0268 };
0269
0270 inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
0271 {
0272 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
0273
0274 return hdr->saved_state[index];
0275 }
0276
0277 inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
0278 u32 value)
0279 {
0280 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
0281
0282 hdr->saved_state[index] = value;
0283 }
0284
0285 void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
0286 {
0287 struct qlcnic_82xx_dump_template_hdr *hdr;
0288
0289 hdr = fw_dump->tmpl_hdr;
0290 fw_dump->tmpl_hdr_size = hdr->size;
0291 fw_dump->version = hdr->version;
0292 fw_dump->num_entries = hdr->num_entries;
0293 fw_dump->offset = hdr->offset;
0294
0295 hdr->drv_cap_mask = hdr->cap_mask;
0296 fw_dump->cap_mask = hdr->cap_mask;
0297
0298 fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
0299 }
0300
0301 inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
0302 {
0303 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
0304
0305 return hdr->cap_sizes[index];
0306 }
0307
0308 void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
0309 {
0310 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
0311
0312 hdr->sys_info[idx] = value;
0313 }
0314
0315 void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
0316 {
0317 struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
0318
0319 hdr->drv_cap_mask = mask;
0320 }
0321
0322 inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
0323 {
0324 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
0325
0326 return hdr->saved_state[index];
0327 }
0328
0329 inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
0330 u32 value)
0331 {
0332 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
0333
0334 hdr->saved_state[index] = value;
0335 }
0336
0337 #define QLCNIC_TEMPLATE_VERSION (0x20001)
0338
0339 void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
0340 {
0341 struct qlcnic_83xx_dump_template_hdr *hdr;
0342
0343 hdr = fw_dump->tmpl_hdr;
0344 fw_dump->tmpl_hdr_size = hdr->size;
0345 fw_dump->version = hdr->version;
0346 fw_dump->num_entries = hdr->num_entries;
0347 fw_dump->offset = hdr->offset;
0348
0349 hdr->drv_cap_mask = hdr->cap_mask;
0350 fw_dump->cap_mask = hdr->cap_mask;
0351
0352 fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
0353 QLCNIC_TEMPLATE_VERSION;
0354 }
0355
0356 inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
0357 {
0358 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
0359
0360 return hdr->cap_sizes[index];
0361 }
0362
0363 void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
0364 {
0365 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
0366
0367 hdr->sys_info[idx] = value;
0368 }
0369
0370 void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
0371 {
0372 struct qlcnic_83xx_dump_template_hdr *hdr;
0373
0374 hdr = tmpl_hdr;
0375 hdr->drv_cap_mask = mask;
0376 }
0377
0378 struct qlcnic_dump_operations {
0379 enum qlcnic_minidump_opcode opcode;
0380 u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
0381 __le32 *);
0382 };
0383
0384 static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
0385 struct qlcnic_dump_entry *entry, __le32 *buffer)
0386 {
0387 int i;
0388 u32 addr, data;
0389 struct __crb *crb = &entry->region.crb;
0390
0391 addr = crb->addr;
0392
0393 for (i = 0; i < crb->no_ops; i++) {
0394 data = qlcnic_ind_rd(adapter, addr);
0395 *buffer++ = cpu_to_le32(addr);
0396 *buffer++ = cpu_to_le32(data);
0397 addr += crb->stride;
0398 }
0399 return crb->no_ops * 2 * sizeof(u32);
0400 }
0401
0402 static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
0403 struct qlcnic_dump_entry *entry, __le32 *buffer)
0404 {
0405 void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
0406 struct __ctrl *ctr = &entry->region.ctrl;
0407 int i, k, timeout = 0;
0408 u32 addr, data, temp;
0409 u8 no_ops;
0410
0411 addr = ctr->addr;
0412 no_ops = ctr->no_ops;
0413
0414 for (i = 0; i < no_ops; i++) {
0415 k = 0;
0416 for (k = 0; k < 8; k++) {
0417 if (!(ctr->opcode & (1 << k)))
0418 continue;
0419 switch (1 << k) {
0420 case QLCNIC_DUMP_WCRB:
0421 qlcnic_ind_wr(adapter, addr, ctr->val1);
0422 break;
0423 case QLCNIC_DUMP_RWCRB:
0424 data = qlcnic_ind_rd(adapter, addr);
0425 qlcnic_ind_wr(adapter, addr, data);
0426 break;
0427 case QLCNIC_DUMP_ANDCRB:
0428 data = qlcnic_ind_rd(adapter, addr);
0429 qlcnic_ind_wr(adapter, addr,
0430 (data & ctr->val2));
0431 break;
0432 case QLCNIC_DUMP_ORCRB:
0433 data = qlcnic_ind_rd(adapter, addr);
0434 qlcnic_ind_wr(adapter, addr,
0435 (data | ctr->val3));
0436 break;
0437 case QLCNIC_DUMP_POLLCRB:
0438 while (timeout <= ctr->timeout) {
0439 data = qlcnic_ind_rd(adapter, addr);
0440 if ((data & ctr->val2) == ctr->val1)
0441 break;
0442 usleep_range(1000, 2000);
0443 timeout++;
0444 }
0445 if (timeout > ctr->timeout) {
0446 dev_info(&adapter->pdev->dev,
0447 "Timed out, aborting poll CRB\n");
0448 return -EINVAL;
0449 }
0450 break;
0451 case QLCNIC_DUMP_RD_SAVE:
0452 temp = ctr->index_a;
0453 if (temp)
0454 addr = qlcnic_get_saved_state(adapter,
0455 hdr,
0456 temp);
0457 data = qlcnic_ind_rd(adapter, addr);
0458 qlcnic_set_saved_state(adapter, hdr,
0459 ctr->index_v, data);
0460 break;
0461 case QLCNIC_DUMP_WRT_SAVED:
0462 temp = ctr->index_v;
0463 if (temp)
0464 data = qlcnic_get_saved_state(adapter,
0465 hdr,
0466 temp);
0467 else
0468 data = ctr->val1;
0469
0470 temp = ctr->index_a;
0471 if (temp)
0472 addr = qlcnic_get_saved_state(adapter,
0473 hdr,
0474 temp);
0475 qlcnic_ind_wr(adapter, addr, data);
0476 break;
0477 case QLCNIC_DUMP_MOD_SAVE_ST:
0478 data = qlcnic_get_saved_state(adapter, hdr,
0479 ctr->index_v);
0480 data <<= ctr->shl_val;
0481 data >>= ctr->shr_val;
0482 if (ctr->val2)
0483 data &= ctr->val2;
0484 data |= ctr->val3;
0485 data += ctr->val1;
0486 qlcnic_set_saved_state(adapter, hdr,
0487 ctr->index_v, data);
0488 break;
0489 default:
0490 dev_info(&adapter->pdev->dev,
0491 "Unknown opcode\n");
0492 break;
0493 }
0494 }
0495 addr += ctr->stride;
0496 }
0497 return 0;
0498 }
0499
0500 static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
0501 struct qlcnic_dump_entry *entry, __le32 *buffer)
0502 {
0503 int loop;
0504 u32 val, data = 0;
0505 struct __mux *mux = &entry->region.mux;
0506
0507 val = mux->val;
0508 for (loop = 0; loop < mux->no_ops; loop++) {
0509 qlcnic_ind_wr(adapter, mux->addr, val);
0510 data = qlcnic_ind_rd(adapter, mux->read_addr);
0511 *buffer++ = cpu_to_le32(val);
0512 *buffer++ = cpu_to_le32(data);
0513 val += mux->val_stride;
0514 }
0515 return 2 * mux->no_ops * sizeof(u32);
0516 }
0517
0518 static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
0519 struct qlcnic_dump_entry *entry, __le32 *buffer)
0520 {
0521 int i, loop;
0522 u32 cnt, addr, data, que_id = 0;
0523 struct __queue *que = &entry->region.que;
0524
0525 addr = que->read_addr;
0526 cnt = que->read_addr_cnt;
0527
0528 for (loop = 0; loop < que->no_ops; loop++) {
0529 qlcnic_ind_wr(adapter, que->sel_addr, que_id);
0530 addr = que->read_addr;
0531 for (i = 0; i < cnt; i++) {
0532 data = qlcnic_ind_rd(adapter, addr);
0533 *buffer++ = cpu_to_le32(data);
0534 addr += que->read_addr_stride;
0535 }
0536 que_id += que->stride;
0537 }
0538 return que->no_ops * cnt * sizeof(u32);
0539 }
0540
0541 static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
0542 struct qlcnic_dump_entry *entry, __le32 *buffer)
0543 {
0544 int i;
0545 u32 data;
0546 void __iomem *addr;
0547 struct __ocm *ocm = &entry->region.ocm;
0548
0549 addr = adapter->ahw->pci_base0 + ocm->read_addr;
0550 for (i = 0; i < ocm->no_ops; i++) {
0551 data = readl(addr);
0552 *buffer++ = cpu_to_le32(data);
0553 addr += ocm->read_addr_stride;
0554 }
0555 return ocm->no_ops * sizeof(u32);
0556 }
0557
0558 static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
0559 struct qlcnic_dump_entry *entry, __le32 *buffer)
0560 {
0561 int i, count = 0;
0562 u32 fl_addr, size, val, lck_val, addr;
0563 struct __mem *rom = &entry->region.mem;
0564
0565 fl_addr = rom->addr;
0566 size = rom->size / 4;
0567 lock_try:
0568 lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
0569 if (!lck_val && count < MAX_CTL_CHECK) {
0570 usleep_range(10000, 11000);
0571 count++;
0572 goto lock_try;
0573 }
0574 QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
0575 adapter->ahw->pci_func);
0576 for (i = 0; i < size; i++) {
0577 addr = fl_addr & 0xFFFF0000;
0578 qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
0579 addr = LSW(fl_addr) + FLASH_ROM_DATA;
0580 val = qlcnic_ind_rd(adapter, addr);
0581 fl_addr += 4;
0582 *buffer++ = cpu_to_le32(val);
0583 }
0584 QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
0585 return rom->size;
0586 }
0587
0588 static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
0589 struct qlcnic_dump_entry *entry, __le32 *buffer)
0590 {
0591 int i;
0592 u32 cnt, val, data, addr;
0593 struct __cache *l1 = &entry->region.cache;
0594
0595 val = l1->init_tag_val;
0596
0597 for (i = 0; i < l1->no_ops; i++) {
0598 qlcnic_ind_wr(adapter, l1->addr, val);
0599 qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
0600 addr = l1->read_addr;
0601 cnt = l1->read_addr_num;
0602 while (cnt) {
0603 data = qlcnic_ind_rd(adapter, addr);
0604 *buffer++ = cpu_to_le32(data);
0605 addr += l1->read_addr_stride;
0606 cnt--;
0607 }
0608 val += l1->stride;
0609 }
0610 return l1->no_ops * l1->read_addr_num * sizeof(u32);
0611 }
0612
0613 static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
0614 struct qlcnic_dump_entry *entry, __le32 *buffer)
0615 {
0616 int i;
0617 u32 cnt, val, data, addr;
0618 u8 poll_mask, poll_to, time_out = 0;
0619 struct __cache *l2 = &entry->region.cache;
0620
0621 val = l2->init_tag_val;
0622 poll_mask = LSB(MSW(l2->ctrl_val));
0623 poll_to = MSB(MSW(l2->ctrl_val));
0624
0625 for (i = 0; i < l2->no_ops; i++) {
0626 qlcnic_ind_wr(adapter, l2->addr, val);
0627 if (LSW(l2->ctrl_val))
0628 qlcnic_ind_wr(adapter, l2->ctrl_addr,
0629 LSW(l2->ctrl_val));
0630 if (!poll_mask)
0631 goto skip_poll;
0632 do {
0633 data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
0634 if (!(data & poll_mask))
0635 break;
0636 usleep_range(1000, 2000);
0637 time_out++;
0638 } while (time_out <= poll_to);
0639
0640 if (time_out > poll_to) {
0641 dev_err(&adapter->pdev->dev,
0642 "Timeout exceeded in %s, aborting dump\n",
0643 __func__);
0644 return -EINVAL;
0645 }
0646 skip_poll:
0647 addr = l2->read_addr;
0648 cnt = l2->read_addr_num;
0649 while (cnt) {
0650 data = qlcnic_ind_rd(adapter, addr);
0651 *buffer++ = cpu_to_le32(data);
0652 addr += l2->read_addr_stride;
0653 cnt--;
0654 }
0655 val += l2->stride;
0656 }
0657 return l2->no_ops * l2->read_addr_num * sizeof(u32);
0658 }
0659
0660 static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
0661 struct __mem *mem, __le32 *buffer,
0662 int *ret)
0663 {
0664 u32 addr, data, test;
0665 int i, reg_read;
0666
0667 reg_read = mem->size;
0668 addr = mem->addr;
0669
0670 if ((addr & 0xf) || (reg_read%16)) {
0671 dev_info(&adapter->pdev->dev,
0672 "Unaligned memory addr:0x%x size:0x%x\n",
0673 addr, reg_read);
0674 *ret = -EINVAL;
0675 return 0;
0676 }
0677
0678 mutex_lock(&adapter->ahw->mem_lock);
0679
0680 while (reg_read != 0) {
0681 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
0682 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
0683 qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
0684
0685 for (i = 0; i < MAX_CTL_CHECK; i++) {
0686 test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
0687 if (!(test & TA_CTL_BUSY))
0688 break;
0689 }
0690 if (i == MAX_CTL_CHECK) {
0691 if (printk_ratelimit()) {
0692 dev_err(&adapter->pdev->dev,
0693 "failed to read through agent\n");
0694 *ret = -EIO;
0695 goto out;
0696 }
0697 }
0698 for (i = 0; i < 4; i++) {
0699 data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
0700 *buffer++ = cpu_to_le32(data);
0701 }
0702 addr += 16;
0703 reg_read -= 16;
0704 ret += 16;
0705 cond_resched();
0706 }
0707 out:
0708 mutex_unlock(&adapter->ahw->mem_lock);
0709 return mem->size;
0710 }
0711
0712
0713 #define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000))
0714
0715
0716 #define QLC_DMA_CMD_BUFF_ADDR_LOW 0
0717 #define QLC_DMA_CMD_BUFF_ADDR_HI 4
0718 #define QLC_DMA_CMD_STATUS_CTRL 8
0719
0720 static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
0721 struct __mem *mem)
0722 {
0723 struct device *dev = &adapter->pdev->dev;
0724 u32 dma_no, dma_base_addr, temp_addr;
0725 int i, ret, dma_sts;
0726 void *tmpl_hdr;
0727
0728 tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
0729 dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
0730 QLC_83XX_DMA_ENGINE_INDEX);
0731 dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
0732
0733 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
0734 ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
0735 if (ret)
0736 return ret;
0737
0738 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
0739 ret = qlcnic_ind_wr(adapter, temp_addr, 0);
0740 if (ret)
0741 return ret;
0742
0743 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
0744 ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
0745 if (ret)
0746 return ret;
0747
0748
0749 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
0750 for (i = 0; i < 400; i++) {
0751 dma_sts = qlcnic_ind_rd(adapter, temp_addr);
0752
0753 if (dma_sts & BIT_1)
0754 usleep_range(250, 500);
0755 else
0756 break;
0757 }
0758
0759 if (i >= 400) {
0760 dev_info(dev, "PEX DMA operation timed out");
0761 ret = -EIO;
0762 }
0763
0764 return ret;
0765 }
0766
0767 static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
0768 struct __mem *mem,
0769 __le32 *buffer, int *ret)
0770 {
0771 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
0772 u32 temp, dma_base_addr, size = 0, read_size = 0;
0773 struct qlcnic_pex_dma_descriptor *dma_descr;
0774 struct device *dev = &adapter->pdev->dev;
0775 dma_addr_t dma_phys_addr;
0776 void *dma_buffer;
0777 void *tmpl_hdr;
0778
0779 tmpl_hdr = fw_dump->tmpl_hdr;
0780
0781
0782 temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
0783 QLC_83XX_DMA_ENGINE_INDEX);
0784 dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
0785 temp = qlcnic_ind_rd(adapter,
0786 dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
0787
0788 if (!(temp & BIT_31)) {
0789 dev_info(dev, "%s: DMA engine is not available\n", __func__);
0790 *ret = -EIO;
0791 return 0;
0792 }
0793
0794
0795 dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor),
0796 GFP_KERNEL);
0797 if (!dma_descr) {
0798 *ret = -ENOMEM;
0799 return 0;
0800 }
0801
0802
0803
0804
0805
0806
0807 dma_phys_addr = fw_dump->phys_addr;
0808 dma_buffer = fw_dump->dma_buffer;
0809 temp = 0;
0810 temp = mem->dma_desc_cmd & 0xff0f;
0811 temp |= (adapter->ahw->pci_func & 0xf) << 4;
0812 dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
0813 dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
0814 dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
0815 dma_descr->src_addr_high = 0;
0816
0817
0818 while (read_size < mem->size) {
0819 if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
0820 size = QLC_PEX_DMA_READ_SIZE;
0821 else
0822 size = mem->size - read_size;
0823
0824 dma_descr->src_addr_low = mem->addr + read_size;
0825 dma_descr->read_data_size = size;
0826
0827
0828 temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
0829 *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
0830 (u32 *)dma_descr, temp);
0831 if (*ret) {
0832 dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
0833 mem->desc_card_addr);
0834 goto free_dma_descr;
0835 }
0836
0837 *ret = qlcnic_start_pex_dma(adapter, mem);
0838 if (*ret) {
0839 dev_info(dev, "Failed to start PEX DMA operation\n");
0840 goto free_dma_descr;
0841 }
0842
0843 memcpy(buffer, dma_buffer, size);
0844 buffer += size / 4;
0845 read_size += size;
0846 }
0847
0848 free_dma_descr:
0849 kfree(dma_descr);
0850
0851 return read_size;
0852 }
0853
0854 static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
0855 struct qlcnic_dump_entry *entry, __le32 *buffer)
0856 {
0857 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
0858 struct device *dev = &adapter->pdev->dev;
0859 struct __mem *mem = &entry->region.mem;
0860 u32 data_size;
0861 int ret = 0;
0862
0863 if (fw_dump->use_pex_dma) {
0864 data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
0865 &ret);
0866 if (ret)
0867 dev_info(dev,
0868 "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
0869 entry->hdr.mask);
0870 else
0871 return data_size;
0872 }
0873
0874 data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
0875 if (ret) {
0876 dev_info(dev,
0877 "Failed to read memory dump using test agent method: mask[0x%x]\n",
0878 entry->hdr.mask);
0879 return 0;
0880 } else {
0881 return data_size;
0882 }
0883 }
0884
0885 static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
0886 struct qlcnic_dump_entry *entry, __le32 *buffer)
0887 {
0888 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
0889 return 0;
0890 }
0891
0892 static int qlcnic_valid_dump_entry(struct device *dev,
0893 struct qlcnic_dump_entry *entry, u32 size)
0894 {
0895 int ret = 1;
0896 if (size != entry->hdr.cap_size) {
0897 dev_err(dev,
0898 "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
0899 entry->hdr.type, entry->hdr.mask, size,
0900 entry->hdr.cap_size);
0901 ret = 0;
0902 }
0903 return ret;
0904 }
0905
0906 static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
0907 struct qlcnic_dump_entry *entry,
0908 __le32 *buffer)
0909 {
0910 struct __pollrdmwr *poll = &entry->region.pollrdmwr;
0911 u32 data, wait_count, poll_wait, temp;
0912
0913 poll_wait = poll->poll_wait;
0914
0915 qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
0916 wait_count = 0;
0917
0918 while (wait_count < poll_wait) {
0919 data = qlcnic_ind_rd(adapter, poll->addr1);
0920 if ((data & poll->poll_mask) != 0)
0921 break;
0922 wait_count++;
0923 }
0924
0925 if (wait_count == poll_wait) {
0926 dev_err(&adapter->pdev->dev,
0927 "Timeout exceeded in %s, aborting dump\n",
0928 __func__);
0929 return 0;
0930 }
0931
0932 data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
0933 qlcnic_ind_wr(adapter, poll->addr2, data);
0934 qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
0935 wait_count = 0;
0936
0937 while (wait_count < poll_wait) {
0938 temp = qlcnic_ind_rd(adapter, poll->addr1);
0939 if ((temp & poll->poll_mask) != 0)
0940 break;
0941 wait_count++;
0942 }
0943
0944 *buffer++ = cpu_to_le32(poll->addr2);
0945 *buffer++ = cpu_to_le32(data);
0946
0947 return 2 * sizeof(u32);
0948
0949 }
0950
0951 static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
0952 struct qlcnic_dump_entry *entry, __le32 *buffer)
0953 {
0954 struct __pollrd *pollrd = &entry->region.pollrd;
0955 u32 data, wait_count, poll_wait, sel_val;
0956 int i;
0957
0958 poll_wait = pollrd->poll_wait;
0959 sel_val = pollrd->sel_val;
0960
0961 for (i = 0; i < pollrd->no_ops; i++) {
0962 qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
0963 wait_count = 0;
0964 while (wait_count < poll_wait) {
0965 data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
0966 if ((data & pollrd->poll_mask) != 0)
0967 break;
0968 wait_count++;
0969 }
0970
0971 if (wait_count == poll_wait) {
0972 dev_err(&adapter->pdev->dev,
0973 "Timeout exceeded in %s, aborting dump\n",
0974 __func__);
0975 return 0;
0976 }
0977
0978 data = qlcnic_ind_rd(adapter, pollrd->read_addr);
0979 *buffer++ = cpu_to_le32(sel_val);
0980 *buffer++ = cpu_to_le32(data);
0981 sel_val += pollrd->sel_val_stride;
0982 }
0983 return pollrd->no_ops * (2 * sizeof(u32));
0984 }
0985
0986 static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
0987 struct qlcnic_dump_entry *entry, __le32 *buffer)
0988 {
0989 struct __mux2 *mux2 = &entry->region.mux2;
0990 u32 data;
0991 u32 t_sel_val, sel_val1, sel_val2;
0992 int i;
0993
0994 sel_val1 = mux2->sel_val1;
0995 sel_val2 = mux2->sel_val2;
0996
0997 for (i = 0; i < mux2->no_ops; i++) {
0998 qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
0999 t_sel_val = sel_val1 & mux2->sel_val_mask;
1000 qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
1001 data = qlcnic_ind_rd(adapter, mux2->read_addr);
1002 *buffer++ = cpu_to_le32(t_sel_val);
1003 *buffer++ = cpu_to_le32(data);
1004 qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
1005 t_sel_val = sel_val2 & mux2->sel_val_mask;
1006 qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
1007 data = qlcnic_ind_rd(adapter, mux2->read_addr);
1008 *buffer++ = cpu_to_le32(t_sel_val);
1009 *buffer++ = cpu_to_le32(data);
1010 sel_val1 += mux2->sel_val_stride;
1011 sel_val2 += mux2->sel_val_stride;
1012 }
1013
1014 return mux2->no_ops * (4 * sizeof(u32));
1015 }
1016
1017 static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
1018 struct qlcnic_dump_entry *entry, __le32 *buffer)
1019 {
1020 u32 fl_addr, size;
1021 struct __mem *rom = &entry->region.mem;
1022
1023 fl_addr = rom->addr;
1024 size = rom->size / 4;
1025
1026 if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
1027 (u8 *)buffer, size))
1028 return rom->size;
1029
1030 return 0;
1031 }
1032
1033 static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
1034 {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
1035 {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
1036 {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
1037 {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
1038 {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
1039 {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
1040 {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
1041 {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
1042 {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
1043 {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
1044 {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
1045 {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
1046 {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
1047 {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
1048 {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
1049 {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
1050 {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
1051 {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
1052 {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
1053 {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
1054 };
1055
1056 static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
1057 {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
1058 {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
1059 {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
1060 {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
1061 {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
1062 {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
1063 {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
1064 {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
1065 {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
1066 {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
1067 {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
1068 {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
1069 {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
1070 {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
1071 {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
1072 {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
1073 {QLCNIC_READ_MUX2, qlcnic_read_mux2},
1074 {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
1075 {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
1076 {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
1077 {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
1078 {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
1079 {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
1080 };
1081
1082 static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
1083 {
1084 uint64_t sum = 0;
1085 int count = temp_size / sizeof(uint32_t);
1086 while (count-- > 0)
1087 sum += *temp_buffer++;
1088 while (sum >> 32)
1089 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
1090 return ~sum;
1091 }
1092
1093 static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
1094 u8 *buffer, u32 size)
1095 {
1096 int ret = 0;
1097
1098 if (qlcnic_82xx_check(adapter))
1099 return -EIO;
1100
1101 if (qlcnic_83xx_lock_flash(adapter))
1102 return -EIO;
1103
1104 ret = qlcnic_83xx_lockless_flash_read32(adapter,
1105 QLC_83XX_MINIDUMP_FLASH,
1106 buffer, size / sizeof(u32));
1107
1108 qlcnic_83xx_unlock_flash(adapter);
1109
1110 return ret;
1111 }
1112
1113 static int
1114 qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
1115 struct qlcnic_cmd_args *cmd)
1116 {
1117 struct qlcnic_83xx_dump_template_hdr tmp_hdr;
1118 u32 size = sizeof(tmp_hdr) / sizeof(u32);
1119 int ret = 0;
1120
1121 if (qlcnic_82xx_check(adapter))
1122 return -EIO;
1123
1124 if (qlcnic_83xx_lock_flash(adapter))
1125 return -EIO;
1126
1127 ret = qlcnic_83xx_lockless_flash_read32(adapter,
1128 QLC_83XX_MINIDUMP_FLASH,
1129 (u8 *)&tmp_hdr, size);
1130
1131 qlcnic_83xx_unlock_flash(adapter);
1132
1133 cmd->rsp.arg[2] = tmp_hdr.size;
1134 cmd->rsp.arg[3] = tmp_hdr.version;
1135
1136 return ret;
1137 }
1138
1139 static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
1140 u32 *version, u32 *temp_size,
1141 u8 *use_flash_temp)
1142 {
1143 int err = 0;
1144 struct qlcnic_cmd_args cmd;
1145
1146 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
1147 return -ENOMEM;
1148
1149 err = qlcnic_issue_cmd(adapter, &cmd);
1150 if (err != QLCNIC_RCODE_SUCCESS) {
1151 if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
1152 qlcnic_free_mbx_args(&cmd);
1153 return -EIO;
1154 }
1155 *use_flash_temp = 1;
1156 }
1157
1158 *temp_size = cmd.rsp.arg[2];
1159 *version = cmd.rsp.arg[3];
1160 qlcnic_free_mbx_args(&cmd);
1161
1162 if (!(*temp_size))
1163 return -EIO;
1164
1165 return 0;
1166 }
1167
1168 static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
1169 u32 *buffer, u32 temp_size)
1170 {
1171 int err = 0, i;
1172 void *tmp_addr;
1173 __le32 *tmp_buf;
1174 struct qlcnic_cmd_args cmd;
1175 dma_addr_t tmp_addr_t = 0;
1176
1177 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
1178 &tmp_addr_t, GFP_KERNEL);
1179 if (!tmp_addr)
1180 return -ENOMEM;
1181
1182 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
1183 err = -ENOMEM;
1184 goto free_mem;
1185 }
1186
1187 cmd.req.arg[1] = LSD(tmp_addr_t);
1188 cmd.req.arg[2] = MSD(tmp_addr_t);
1189 cmd.req.arg[3] = temp_size;
1190 err = qlcnic_issue_cmd(adapter, &cmd);
1191
1192 tmp_buf = tmp_addr;
1193 if (err == QLCNIC_RCODE_SUCCESS) {
1194 for (i = 0; i < temp_size / sizeof(u32); i++)
1195 *buffer++ = __le32_to_cpu(*tmp_buf++);
1196 }
1197
1198 qlcnic_free_mbx_args(&cmd);
1199
1200 free_mem:
1201 dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
1202
1203 return err;
1204 }
1205
1206 int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1207 {
1208 struct qlcnic_hardware_context *ahw;
1209 struct qlcnic_fw_dump *fw_dump;
1210 u32 version, csum, *tmp_buf;
1211 u8 use_flash_temp = 0;
1212 u32 temp_size = 0;
1213 void *temp_buffer;
1214 int err;
1215
1216 ahw = adapter->ahw;
1217 fw_dump = &ahw->fw_dump;
1218 err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
1219 &use_flash_temp);
1220 if (err) {
1221 dev_err(&adapter->pdev->dev,
1222 "Can't get template size %d\n", err);
1223 return -EIO;
1224 }
1225
1226 fw_dump->tmpl_hdr = vzalloc(temp_size);
1227 if (!fw_dump->tmpl_hdr)
1228 return -ENOMEM;
1229
1230 tmp_buf = (u32 *)fw_dump->tmpl_hdr;
1231 if (use_flash_temp)
1232 goto flash_temp;
1233
1234 err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
1235
1236 if (err) {
1237 flash_temp:
1238 err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
1239 temp_size);
1240
1241 if (err) {
1242 dev_err(&adapter->pdev->dev,
1243 "Failed to get minidump template header %d\n",
1244 err);
1245 vfree(fw_dump->tmpl_hdr);
1246 fw_dump->tmpl_hdr = NULL;
1247 return -EIO;
1248 }
1249 }
1250
1251 csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
1252
1253 if (csum) {
1254 dev_err(&adapter->pdev->dev,
1255 "Template header checksum validation failed\n");
1256 vfree(fw_dump->tmpl_hdr);
1257 fw_dump->tmpl_hdr = NULL;
1258 return -EIO;
1259 }
1260
1261 qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
1262
1263 if (fw_dump->use_pex_dma) {
1264 fw_dump->dma_buffer = NULL;
1265 temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
1266 QLC_PEX_DMA_READ_SIZE,
1267 &fw_dump->phys_addr,
1268 GFP_KERNEL);
1269 if (!temp_buffer)
1270 fw_dump->use_pex_dma = false;
1271 else
1272 fw_dump->dma_buffer = temp_buffer;
1273 }
1274
1275
1276 dev_info(&adapter->pdev->dev,
1277 "Default minidump capture mask 0x%x\n",
1278 fw_dump->cap_mask);
1279
1280 qlcnic_enable_fw_dump_state(adapter);
1281
1282 return 0;
1283 }
1284
1285 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1286 {
1287 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1288 const struct qlcnic_dump_operations *fw_dump_ops;
1289 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
1290 u32 entry_offset, dump, no_entries, buf_offset = 0;
1291 int i, k, ops_cnt, ops_index, dump_size = 0;
1292 struct device *dev = &adapter->pdev->dev;
1293 struct qlcnic_hardware_context *ahw;
1294 struct qlcnic_dump_entry *entry;
1295 void *tmpl_hdr;
1296 u32 ocm_window;
1297 __le32 *buffer;
1298 char mesg[64];
1299 char *msg[] = {mesg, NULL};
1300
1301 ahw = adapter->ahw;
1302 tmpl_hdr = fw_dump->tmpl_hdr;
1303
1304
1305 if (!tmpl_hdr)
1306 return -EIO;
1307
1308 if (!qlcnic_check_fw_dump_state(adapter)) {
1309 dev_info(&adapter->pdev->dev, "Dump not enabled\n");
1310 return -EIO;
1311 }
1312
1313 if (fw_dump->clr) {
1314 dev_info(&adapter->pdev->dev,
1315 "Previous dump not cleared, not capturing dump\n");
1316 return -EIO;
1317 }
1318
1319 netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
1320
1321 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
1322 if (i & fw_dump->cap_mask)
1323 dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
1324
1325 if (!dump_size)
1326 return -EIO;
1327
1328 fw_dump->data = vzalloc(dump_size);
1329 if (!fw_dump->data)
1330 return -ENOMEM;
1331
1332 buffer = fw_dump->data;
1333 fw_dump->size = dump_size;
1334 no_entries = fw_dump->num_entries;
1335 entry_offset = fw_dump->offset;
1336 qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
1337 qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
1338
1339 if (qlcnic_82xx_check(adapter)) {
1340 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
1341 fw_dump_ops = qlcnic_fw_dump_ops;
1342 } else {
1343 hdr_83xx = tmpl_hdr;
1344 ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
1345 fw_dump_ops = qlcnic_83xx_fw_dump_ops;
1346 ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
1347 hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
1348 hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
1349 }
1350
1351 for (i = 0; i < no_entries; i++) {
1352 entry = tmpl_hdr + entry_offset;
1353 if (!(entry->hdr.mask & fw_dump->cap_mask)) {
1354 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1355 entry_offset += entry->hdr.offset;
1356 continue;
1357 }
1358
1359
1360 ops_index = 0;
1361 while (ops_index < ops_cnt) {
1362 if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
1363 break;
1364 ops_index++;
1365 }
1366
1367 if (ops_index == ops_cnt) {
1368 dev_info(dev, "Skipping unknown entry opcode %d\n",
1369 entry->hdr.type);
1370 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1371 entry_offset += entry->hdr.offset;
1372 continue;
1373 }
1374
1375
1376 dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
1377 if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
1378 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1379 entry_offset += entry->hdr.offset;
1380 continue;
1381 }
1382
1383 buf_offset += entry->hdr.cap_size;
1384 entry_offset += entry->hdr.offset;
1385 buffer = fw_dump->data + buf_offset;
1386 cond_resched();
1387 }
1388
1389 fw_dump->clr = 1;
1390 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
1391 netdev_info(adapter->netdev,
1392 "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n",
1393 fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size,
1394 fw_dump->tmpl_hdr);
1395
1396 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
1397
1398 return 0;
1399 }
1400
1401 static inline bool
1402 qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter)
1403 {
1404
1405
1406
1407
1408
1409 return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) &&
1410 (adapter->ahw->extra_capability[0] &
1411 QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP));
1412 }
1413
1414 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
1415 {
1416 u32 prev_version, current_version;
1417 struct qlcnic_hardware_context *ahw = adapter->ahw;
1418 struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
1419 struct pci_dev *pdev = adapter->pdev;
1420 bool extended = false;
1421 int ret;
1422
1423 prev_version = adapter->fw_version;
1424 current_version = qlcnic_83xx_get_fw_version(adapter);
1425
1426 if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
1427 vfree(fw_dump->tmpl_hdr);
1428 fw_dump->tmpl_hdr = NULL;
1429
1430 if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
1431 extended = !qlcnic_83xx_extend_md_capab(adapter);
1432
1433 ret = qlcnic_fw_cmd_get_minidump_temp(adapter);
1434 if (ret)
1435 return;
1436
1437 dev_info(&pdev->dev, "Supports FW dump capability\n");
1438
1439
1440
1441
1442
1443 if (extended) {
1444 struct qlcnic_83xx_dump_template_hdr *hdr;
1445
1446 hdr = fw_dump->tmpl_hdr;
1447 if (!hdr)
1448 return;
1449 hdr->drv_cap_mask = 0x1f;
1450 fw_dump->cap_mask = 0x1f;
1451 dev_info(&pdev->dev,
1452 "Extended iSCSI dump capability and updated capture mask to 0x1f\n");
1453 }
1454 }
1455 }