0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #include <linux/pci.h>
0036 #include <linux/pci_regs.h>
0037 #include <linux/firmware.h>
0038 #include <linux/stddef.h>
0039 #include <linux/delay.h>
0040 #include <linux/string.h>
0041 #include <linux/compiler.h>
0042 #include <linux/jiffies.h>
0043 #include <linux/kernel.h>
0044 #include <linux/log2.h>
0045
0046 #include "csio_hw.h"
0047 #include "csio_lnode.h"
0048 #include "csio_rnode.h"
0049
0050 int csio_dbg_level = 0xFEFF;
0051 unsigned int csio_port_mask = 0xf;
0052
0053
0054 static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE;
0055
0056
0057 int csio_msi = 2;
0058
0059
0060 static int dev_num;
0061
0062
0063 static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
0064 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
0065 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
0066 {"T522-CR 10G/1G", "Chelsio T522-CR 10G/1G [FCoE]"},
0067 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
0068 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
0069 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
0070 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
0071 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
0072 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
0073 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
0074 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
0075 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
0076 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
0077 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
0078 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
0079 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
0080 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
0081 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
0082 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
0083 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"},
0084 {"T580-SO 40G", "Chelsio T580-SO 40G [FCoE]"},
0085 {"T502-BT 1G", "Chelsio T502-BT 1G [FCoE]"}
0086 };
0087
0088 static void csio_mgmtm_cleanup(struct csio_mgmtm *);
0089 static void csio_hw_mbm_cleanup(struct csio_hw *);
0090
0091
0092 static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev);
0093 static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev);
0094 static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev);
0095 static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev);
0096 static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev);
0097 static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev);
0098 static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev);
0099 static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev);
0100 static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev);
0101
0102 static void csio_hw_initialize(struct csio_hw *hw);
0103 static void csio_evtq_stop(struct csio_hw *hw);
0104 static void csio_evtq_start(struct csio_hw *hw);
0105
0106 int csio_is_hw_ready(struct csio_hw *hw)
0107 {
0108 return csio_match_state(hw, csio_hws_ready);
0109 }
0110
0111 int csio_is_hw_removing(struct csio_hw *hw)
0112 {
0113 return csio_match_state(hw, csio_hws_removing);
0114 }
0115
0116
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 int
0133 csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
0134 int polarity, int attempts, int delay, uint32_t *valp)
0135 {
0136 uint32_t val;
0137 while (1) {
0138 val = csio_rd_reg32(hw, reg);
0139
0140 if (!!(val & mask) == polarity) {
0141 if (valp)
0142 *valp = val;
0143 return 0;
0144 }
0145
0146 if (--attempts == 0)
0147 return -EAGAIN;
0148 if (delay)
0149 udelay(delay);
0150 }
0151 }
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162 void
0163 csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
0164 unsigned int mask, unsigned int val)
0165 {
0166 csio_wr_reg32(hw, addr, TP_PIO_ADDR_A);
0167 val |= csio_rd_reg32(hw, TP_PIO_DATA_A) & ~mask;
0168 csio_wr_reg32(hw, val, TP_PIO_DATA_A);
0169 }
0170
0171 void
0172 csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
0173 uint32_t value)
0174 {
0175 uint32_t val = csio_rd_reg32(hw, reg) & ~mask;
0176
0177 csio_wr_reg32(hw, val | value, reg);
0178
0179 csio_rd_reg32(hw, reg);
0180
0181 }
0182
0183 static int
0184 csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
0185 {
0186 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype,
0187 addr, len, buf, 0);
0188 }
0189
0190
0191
0192
0193 #define EEPROM_MAX_RD_POLL 40
0194 #define EEPROM_MAX_WR_POLL 6
0195 #define EEPROM_STAT_ADDR 0x7bfc
0196 #define VPD_BASE 0x400
0197 #define VPD_BASE_OLD 0
0198 #define VPD_LEN 1024
0199 #define VPD_INFO_FLD_HDR_SIZE 3
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210
0211 static int
0212 csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
0213 {
0214 uint16_t val = 0;
0215 int attempts = EEPROM_MAX_RD_POLL;
0216 uint32_t base = hw->params.pci.vpd_cap_addr;
0217
0218 if (addr >= EEPROMVSIZE || (addr & 3))
0219 return -EINVAL;
0220
0221 pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr);
0222
0223 do {
0224 udelay(10);
0225 pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val);
0226 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
0227
0228 if (!(val & PCI_VPD_ADDR_F)) {
0229 csio_err(hw, "reading EEPROM address 0x%x failed\n", addr);
0230 return -EINVAL;
0231 }
0232
0233 pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
0234 *data = le32_to_cpu(*(__le32 *)data);
0235
0236 return 0;
0237 }
0238
0239
0240
0241
0242
0243 struct t4_vpd_hdr {
0244 u8 id_tag;
0245 u8 id_len[2];
0246 u8 id_data[ID_LEN];
0247 u8 vpdr_tag;
0248 u8 vpdr_len[2];
0249 };
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260 static int
0261 csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
0262 {
0263 int32_t i;
0264 int32_t offset , len;
0265 const uint8_t *buf = &v->id_tag;
0266 const uint8_t *vpdr_len = &v->vpdr_tag;
0267 offset = sizeof(struct t4_vpd_hdr);
0268 len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8);
0269
0270 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN)
0271 return -EINVAL;
0272
0273 for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) {
0274 if (memcmp(buf + i , kw, 2) == 0) {
0275 i += VPD_INFO_FLD_HDR_SIZE;
0276 return i;
0277 }
0278
0279 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
0280 }
0281
0282 return -EINVAL;
0283 }
0284
0285 static int
0286 csio_pci_capability(struct pci_dev *pdev, int cap, int *pos)
0287 {
0288 *pos = pci_find_capability(pdev, cap);
0289 if (*pos)
0290 return 0;
0291
0292 return -1;
0293 }
0294
0295
0296
0297
0298
0299
0300
0301
0302 static int
0303 csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p)
0304 {
0305 int i, ret, ec, sn, addr;
0306 uint8_t *vpd, csum;
0307 const struct t4_vpd_hdr *v;
0308
0309 char __always_unused *s;
0310
0311 if (csio_is_valid_vpd(hw))
0312 return 0;
0313
0314 ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD,
0315 &hw->params.pci.vpd_cap_addr);
0316 if (ret)
0317 return -EINVAL;
0318
0319 vpd = kzalloc(VPD_LEN, GFP_ATOMIC);
0320 if (vpd == NULL)
0321 return -ENOMEM;
0322
0323
0324
0325
0326
0327 ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd));
0328 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
0329
0330 for (i = 0; i < VPD_LEN; i += 4) {
0331 ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i));
0332 if (ret) {
0333 kfree(vpd);
0334 return ret;
0335 }
0336 }
0337
0338
0339 hw->flags &= (~CSIO_HWF_VPD_VALID);
0340
0341 v = (const struct t4_vpd_hdr *)vpd;
0342
0343 #define FIND_VPD_KW(var, name) do { \
0344 var = csio_hw_get_vpd_keyword_val(v, name); \
0345 if (var < 0) { \
0346 csio_err(hw, "missing VPD keyword " name "\n"); \
0347 kfree(vpd); \
0348 return -EINVAL; \
0349 } \
0350 } while (0)
0351
0352 FIND_VPD_KW(i, "RV");
0353 for (csum = 0; i >= 0; i--)
0354 csum += vpd[i];
0355
0356 if (csum) {
0357 csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum);
0358 kfree(vpd);
0359 return -EINVAL;
0360 }
0361 FIND_VPD_KW(ec, "EC");
0362 FIND_VPD_KW(sn, "SN");
0363 #undef FIND_VPD_KW
0364
0365 memcpy(p->id, v->id_data, ID_LEN);
0366 s = strstrip(p->id);
0367 memcpy(p->ec, vpd + ec, EC_LEN);
0368 s = strstrip(p->ec);
0369 i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
0370 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
0371 s = strstrip(p->sn);
0372
0373 csio_valid_vpd_copied(hw);
0374
0375 kfree(vpd);
0376 return 0;
0377 }
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388
0389
0390
0391 static int
0392 csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
0393 int32_t lock, uint32_t *valp)
0394 {
0395 int ret;
0396
0397 if (!byte_cnt || byte_cnt > 4)
0398 return -EINVAL;
0399 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
0400 return -EBUSY;
0401
0402 csio_wr_reg32(hw, SF_LOCK_V(lock) | SF_CONT_V(cont) |
0403 BYTECNT_V(byte_cnt - 1), SF_OP_A);
0404 ret = csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
0405 10, NULL);
0406 if (!ret)
0407 *valp = csio_rd_reg32(hw, SF_DATA_A);
0408 return ret;
0409 }
0410
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422
0423 static int
0424 csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
0425 int32_t lock, uint32_t val)
0426 {
0427 if (!byte_cnt || byte_cnt > 4)
0428 return -EINVAL;
0429 if (csio_rd_reg32(hw, SF_OP_A) & SF_BUSY_F)
0430 return -EBUSY;
0431
0432 csio_wr_reg32(hw, val, SF_DATA_A);
0433 csio_wr_reg32(hw, SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) |
0434 OP_V(1) | SF_LOCK_V(lock), SF_OP_A);
0435
0436 return csio_hw_wait_op_done_val(hw, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS,
0437 10, NULL);
0438 }
0439
0440
0441
0442
0443
0444
0445
0446
0447
0448 static int
0449 csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay)
0450 {
0451 int ret;
0452 uint32_t status;
0453
0454 while (1) {
0455 ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS);
0456 if (ret != 0)
0457 return ret;
0458
0459 ret = csio_hw_sf1_read(hw, 1, 0, 1, &status);
0460 if (ret != 0)
0461 return ret;
0462
0463 if (!(status & 1))
0464 return 0;
0465 if (--attempts == 0)
0466 return -EAGAIN;
0467 if (delay)
0468 msleep(delay);
0469 }
0470 }
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485 static int
0486 csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
0487 uint32_t *data, int32_t byte_oriented)
0488 {
0489 int ret;
0490
0491 if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3))
0492 return -EINVAL;
0493
0494 addr = swab32(addr) | SF_RD_DATA_FAST;
0495
0496 ret = csio_hw_sf1_write(hw, 4, 1, 0, addr);
0497 if (ret != 0)
0498 return ret;
0499
0500 ret = csio_hw_sf1_read(hw, 1, 1, 0, data);
0501 if (ret != 0)
0502 return ret;
0503
0504 for ( ; nwords; nwords--, data++) {
0505 ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
0506 if (nwords == 1)
0507 csio_wr_reg32(hw, 0, SF_OP_A);
0508 if (ret)
0509 return ret;
0510 if (byte_oriented)
0511 *data = (__force __u32) htonl(*data);
0512 }
0513 return 0;
0514 }
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526 static int
0527 csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
0528 uint32_t n, const uint8_t *data)
0529 {
0530 int ret = -EINVAL;
0531 uint32_t buf[64];
0532 uint32_t i, c, left, val, offset = addr & 0xff;
0533
0534 if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE)
0535 return -EINVAL;
0536
0537 val = swab32(addr) | SF_PROG_PAGE;
0538
0539 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
0540 if (ret != 0)
0541 goto unlock;
0542
0543 ret = csio_hw_sf1_write(hw, 4, 1, 1, val);
0544 if (ret != 0)
0545 goto unlock;
0546
0547 for (left = n; left; left -= c) {
0548 c = min(left, 4U);
0549 for (val = 0, i = 0; i < c; ++i)
0550 val = (val << 8) + *data++;
0551
0552 ret = csio_hw_sf1_write(hw, c, c != left, 1, val);
0553 if (ret)
0554 goto unlock;
0555 }
0556 ret = csio_hw_flash_wait_op(hw, 8, 1);
0557 if (ret)
0558 goto unlock;
0559
0560 csio_wr_reg32(hw, 0, SF_OP_A);
0561
0562
0563 ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
0564 if (ret)
0565 return ret;
0566
0567 if (memcmp(data - n, (uint8_t *)buf + offset, n)) {
0568 csio_err(hw,
0569 "failed to correctly write the flash page at %#x\n",
0570 addr);
0571 return -EINVAL;
0572 }
0573
0574 return 0;
0575
0576 unlock:
0577 csio_wr_reg32(hw, 0, SF_OP_A);
0578 return ret;
0579 }
0580
0581
0582
0583
0584
0585
0586
0587
0588
0589 static int
0590 csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end)
0591 {
0592 int ret = 0;
0593
0594 while (start <= end) {
0595
0596 ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
0597 if (ret != 0)
0598 goto out;
0599
0600 ret = csio_hw_sf1_write(hw, 4, 0, 1,
0601 SF_ERASE_SECTOR | (start << 8));
0602 if (ret != 0)
0603 goto out;
0604
0605 ret = csio_hw_flash_wait_op(hw, 14, 500);
0606 if (ret != 0)
0607 goto out;
0608
0609 start++;
0610 }
0611 out:
0612 if (ret)
0613 csio_err(hw, "erase of flash sector %d failed, error %d\n",
0614 start, ret);
0615 csio_wr_reg32(hw, 0, SF_OP_A);
0616 return 0;
0617 }
0618
0619 static void
0620 csio_hw_print_fw_version(struct csio_hw *hw, char *str)
0621 {
0622 csio_info(hw, "%s: %u.%u.%u.%u\n", str,
0623 FW_HDR_FW_VER_MAJOR_G(hw->fwrev),
0624 FW_HDR_FW_VER_MINOR_G(hw->fwrev),
0625 FW_HDR_FW_VER_MICRO_G(hw->fwrev),
0626 FW_HDR_FW_VER_BUILD_G(hw->fwrev));
0627 }
0628
0629
0630
0631
0632
0633
0634
0635
0636 static int
0637 csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers)
0638 {
0639 return csio_hw_read_flash(hw, FLASH_FW_START +
0640 offsetof(struct fw_hdr, fw_ver), 1,
0641 vers, 0);
0642 }
0643
0644
0645
0646
0647
0648
0649
0650
0651 static int
0652 csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
0653 {
0654 return csio_hw_read_flash(hw, FLASH_FW_START +
0655 offsetof(struct fw_hdr, tp_microcode_ver), 1,
0656 vers, 0);
0657 }
0658
0659
0660
0661
0662
0663
0664
0665
0666
0667 static int
0668 csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
0669 {
0670 uint32_t csum;
0671 int32_t addr;
0672 int ret;
0673 uint32_t i;
0674 uint8_t first_page[SF_PAGE_SIZE];
0675 const __be32 *p = (const __be32 *)fw_data;
0676 struct fw_hdr *hdr = (struct fw_hdr *)fw_data;
0677 uint32_t sf_sec_size;
0678
0679 if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) {
0680 csio_err(hw, "Serial Flash data invalid\n");
0681 return -EINVAL;
0682 }
0683
0684 if (!size) {
0685 csio_err(hw, "FW image has no data\n");
0686 return -EINVAL;
0687 }
0688
0689 if (size & 511) {
0690 csio_err(hw, "FW image size not multiple of 512 bytes\n");
0691 return -EINVAL;
0692 }
0693
0694 if (ntohs(hdr->len512) * 512 != size) {
0695 csio_err(hw, "FW image size differs from size in FW header\n");
0696 return -EINVAL;
0697 }
0698
0699 if (size > FLASH_FW_MAX_SIZE) {
0700 csio_err(hw, "FW image too large, max is %u bytes\n",
0701 FLASH_FW_MAX_SIZE);
0702 return -EINVAL;
0703 }
0704
0705 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
0706 csum += ntohl(p[i]);
0707
0708 if (csum != 0xffffffff) {
0709 csio_err(hw, "corrupted firmware image, checksum %#x\n", csum);
0710 return -EINVAL;
0711 }
0712
0713 sf_sec_size = hw->params.sf_size / hw->params.sf_nsec;
0714 i = DIV_ROUND_UP(size, sf_sec_size);
0715
0716 csio_dbg(hw, "Erasing sectors... start:%d end:%d\n",
0717 FLASH_FW_START_SEC, FLASH_FW_START_SEC + i - 1);
0718
0719 ret = csio_hw_flash_erase_sectors(hw, FLASH_FW_START_SEC,
0720 FLASH_FW_START_SEC + i - 1);
0721 if (ret) {
0722 csio_err(hw, "Flash Erase failed\n");
0723 goto out;
0724 }
0725
0726
0727
0728
0729
0730
0731 memcpy(first_page, fw_data, SF_PAGE_SIZE);
0732 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
0733 ret = csio_hw_write_flash(hw, FLASH_FW_START, SF_PAGE_SIZE, first_page);
0734 if (ret)
0735 goto out;
0736
0737 csio_dbg(hw, "Writing Flash .. start:%d end:%d\n",
0738 FW_IMG_START, FW_IMG_START + size);
0739
0740 addr = FLASH_FW_START;
0741 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
0742 addr += SF_PAGE_SIZE;
0743 fw_data += SF_PAGE_SIZE;
0744 ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data);
0745 if (ret)
0746 goto out;
0747 }
0748
0749 ret = csio_hw_write_flash(hw,
0750 FLASH_FW_START +
0751 offsetof(struct fw_hdr, fw_ver),
0752 sizeof(hdr->fw_ver),
0753 (const uint8_t *)&hdr->fw_ver);
0754
0755 out:
0756 if (ret)
0757 csio_err(hw, "firmware download failed, error %d\n", ret);
0758 return ret;
0759 }
0760
0761 static int
0762 csio_hw_get_flash_params(struct csio_hw *hw)
0763 {
0764
0765
0766
0767 static struct flash_desc {
0768 u32 vendor_and_model_id;
0769 u32 size_mb;
0770 } supported_flash[] = {
0771 { 0x150201, 4 << 20 },
0772 };
0773
0774 u32 part, manufacturer;
0775 u32 density, size = 0;
0776 u32 flashid = 0;
0777 int ret;
0778
0779 ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
0780 if (!ret)
0781 ret = csio_hw_sf1_read(hw, 3, 0, 1, &flashid);
0782 csio_wr_reg32(hw, 0, SF_OP_A);
0783 if (ret)
0784 return ret;
0785
0786
0787
0788 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
0789 if (supported_flash[part].vendor_and_model_id == flashid) {
0790 hw->params.sf_size = supported_flash[part].size_mb;
0791 hw->params.sf_nsec =
0792 hw->params.sf_size / SF_SEC_SIZE;
0793 goto found;
0794 }
0795
0796
0797
0798
0799
0800
0801
0802
0803
0804 manufacturer = flashid & 0xff;
0805 switch (manufacturer) {
0806 case 0x20: {
0807
0808
0809
0810 density = (flashid >> 16) & 0xff;
0811 switch (density) {
0812 case 0x14 ... 0x19:
0813 size = 1 << density;
0814 break;
0815 case 0x20:
0816 size = 1 << 26;
0817 break;
0818 case 0x21:
0819 size = 1 << 27;
0820 break;
0821 case 0x22:
0822 size = 1 << 28;
0823 }
0824 break;
0825 }
0826 case 0x9d: {
0827
0828
0829
0830 density = (flashid >> 16) & 0xff;
0831 switch (density) {
0832 case 0x16:
0833 size = 1 << 25;
0834 break;
0835 case 0x17:
0836 size = 1 << 26;
0837 }
0838 break;
0839 }
0840 case 0xc2:
0841 case 0xef: {
0842
0843
0844
0845 density = (flashid >> 16) & 0xff;
0846 switch (density) {
0847 case 0x17:
0848 case 0x18:
0849 size = 1 << density;
0850 }
0851 }
0852 }
0853
0854
0855
0856
0857
0858
0859
0860 if (size == 0) {
0861 csio_warn(hw, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
0862 flashid);
0863 size = 1 << 22;
0864 }
0865
0866
0867 hw->params.sf_size = size;
0868 hw->params.sf_nsec = size / SF_SEC_SIZE;
0869
0870 found:
0871 if (hw->params.sf_size < FLASH_MIN_SIZE)
0872 csio_warn(hw, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
0873 flashid, hw->params.sf_size, FLASH_MIN_SIZE);
0874 return 0;
0875 }
0876
0877
0878
0879
0880
0881 static int
0882 csio_hw_dev_ready(struct csio_hw *hw)
0883 {
0884 uint32_t reg;
0885 int cnt = 6;
0886 int src_pf;
0887
0888 while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) &&
0889 (--cnt != 0))
0890 mdelay(100);
0891
0892 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
0893 src_pf = SOURCEPF_G(reg);
0894 else
0895 src_pf = T6_SOURCEPF_G(reg);
0896
0897 if ((cnt == 0) && (((int32_t)(src_pf) < 0) ||
0898 (src_pf >= CSIO_MAX_PFN))) {
0899 csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
0900 return -EIO;
0901 }
0902
0903 hw->pfn = src_pf;
0904
0905 return 0;
0906 }
0907
0908
0909
0910
0911
0912
0913
0914
0915 static int
0916 csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
0917 {
0918 struct csio_mb *mbp;
0919 int rv = 0;
0920 enum fw_retval retval;
0921 uint8_t mpfn;
0922 char state_str[16];
0923 int retries = FW_CMD_HELLO_RETRIES;
0924
0925 memset(state_str, 0, sizeof(state_str));
0926
0927 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
0928 if (!mbp) {
0929 rv = -ENOMEM;
0930 CSIO_INC_STATS(hw, n_err_nomem);
0931 goto out;
0932 }
0933
0934 retry:
0935 csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
0936 hw->pfn, CSIO_MASTER_MAY, NULL);
0937
0938 rv = csio_mb_issue(hw, mbp);
0939 if (rv) {
0940 csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv);
0941 goto out_free_mb;
0942 }
0943
0944 csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn);
0945 if (retval != FW_SUCCESS) {
0946 csio_err(hw, "HELLO cmd failed with ret: %d\n", retval);
0947 rv = -EINVAL;
0948 goto out_free_mb;
0949 }
0950
0951
0952 if (hw->pfn == mpfn) {
0953 hw->flags |= CSIO_HWF_MASTER;
0954 } else if (*state == CSIO_DEV_STATE_UNINIT) {
0955
0956
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967
0968 int waiting = FW_CMD_HELLO_TIMEOUT;
0969
0970
0971
0972
0973
0974
0975
0976
0977 for (;;) {
0978 uint32_t pcie_fw;
0979
0980 spin_unlock_irq(&hw->lock);
0981 msleep(50);
0982 spin_lock_irq(&hw->lock);
0983 waiting -= 50;
0984
0985
0986
0987
0988
0989
0990
0991 pcie_fw = csio_rd_reg32(hw, PCIE_FW_A);
0992 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
0993 if (waiting <= 0) {
0994 if (retries-- > 0)
0995 goto retry;
0996
0997 rv = -ETIMEDOUT;
0998 break;
0999 }
1000 continue;
1001 }
1002
1003
1004
1005
1006
1007 if (state) {
1008 if (pcie_fw & PCIE_FW_ERR_F) {
1009 *state = CSIO_DEV_STATE_ERR;
1010 rv = -ETIMEDOUT;
1011 } else if (pcie_fw & PCIE_FW_INIT_F)
1012 *state = CSIO_DEV_STATE_INIT;
1013 }
1014
1015
1016
1017
1018
1019
1020 if (mpfn == PCIE_FW_MASTER_M &&
1021 (pcie_fw & PCIE_FW_MASTER_VLD_F))
1022 mpfn = PCIE_FW_MASTER_G(pcie_fw);
1023 break;
1024 }
1025 hw->flags &= ~CSIO_HWF_MASTER;
1026 }
1027
1028 switch (*state) {
1029 case CSIO_DEV_STATE_UNINIT:
1030 strcpy(state_str, "Initializing");
1031 break;
1032 case CSIO_DEV_STATE_INIT:
1033 strcpy(state_str, "Initialized");
1034 break;
1035 case CSIO_DEV_STATE_ERR:
1036 strcpy(state_str, "Error");
1037 break;
1038 default:
1039 strcpy(state_str, "Unknown");
1040 break;
1041 }
1042
1043 if (hw->pfn == mpfn)
1044 csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n",
1045 hw->pfn, state_str);
1046 else
1047 csio_info(hw,
1048 "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
1049 hw->pfn, mpfn, state_str);
1050
1051 out_free_mb:
1052 mempool_free(mbp, hw->mb_mempool);
1053 out:
1054 return rv;
1055 }
1056
1057
1058
1059
1060
1061
1062 static int
1063 csio_do_bye(struct csio_hw *hw)
1064 {
1065 struct csio_mb *mbp;
1066 enum fw_retval retval;
1067
1068 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1069 if (!mbp) {
1070 CSIO_INC_STATS(hw, n_err_nomem);
1071 return -ENOMEM;
1072 }
1073
1074 csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1075
1076 if (csio_mb_issue(hw, mbp)) {
1077 csio_err(hw, "Issue of BYE command failed\n");
1078 mempool_free(mbp, hw->mb_mempool);
1079 return -EINVAL;
1080 }
1081
1082 retval = csio_mb_fw_retval(mbp);
1083 if (retval != FW_SUCCESS) {
1084 mempool_free(mbp, hw->mb_mempool);
1085 return -EINVAL;
1086 }
1087
1088 mempool_free(mbp, hw->mb_mempool);
1089
1090 return 0;
1091 }
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102 static int
1103 csio_do_reset(struct csio_hw *hw, bool fw_rst)
1104 {
1105 struct csio_mb *mbp;
1106 enum fw_retval retval;
1107
1108 if (!fw_rst) {
1109
1110 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
1111 mdelay(2000);
1112 return 0;
1113 }
1114
1115 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1116 if (!mbp) {
1117 CSIO_INC_STATS(hw, n_err_nomem);
1118 return -ENOMEM;
1119 }
1120
1121 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
1122 PIORSTMODE_F | PIORST_F, 0, NULL);
1123
1124 if (csio_mb_issue(hw, mbp)) {
1125 csio_err(hw, "Issue of RESET command failed.n");
1126 mempool_free(mbp, hw->mb_mempool);
1127 return -EINVAL;
1128 }
1129
1130 retval = csio_mb_fw_retval(mbp);
1131 if (retval != FW_SUCCESS) {
1132 csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval);
1133 mempool_free(mbp, hw->mb_mempool);
1134 return -EINVAL;
1135 }
1136
1137 mempool_free(mbp, hw->mb_mempool);
1138
1139 return 0;
1140 }
1141
1142 static int
1143 csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp)
1144 {
1145 struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb;
1146 uint16_t caps;
1147
1148 caps = ntohs(rsp->fcoecaps);
1149
1150 if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) {
1151 csio_err(hw, "No FCoE Initiator capability in the firmware.\n");
1152 return -EINVAL;
1153 }
1154
1155 if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) {
1156 csio_err(hw, "No FCoE Control Offload capability\n");
1157 return -EINVAL;
1158 }
1159
1160 return 0;
1161 }
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179 static int
1180 csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
1181 {
1182 enum fw_retval retval = 0;
1183
1184
1185
1186
1187
1188 if (mbox <= PCIE_FW_MASTER_M) {
1189 struct csio_mb *mbp;
1190
1191 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1192 if (!mbp) {
1193 CSIO_INC_STATS(hw, n_err_nomem);
1194 return -ENOMEM;
1195 }
1196
1197 csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
1198 PIORSTMODE_F | PIORST_F, FW_RESET_CMD_HALT_F,
1199 NULL);
1200
1201 if (csio_mb_issue(hw, mbp)) {
1202 csio_err(hw, "Issue of RESET command failed!\n");
1203 mempool_free(mbp, hw->mb_mempool);
1204 return -EINVAL;
1205 }
1206
1207 retval = csio_mb_fw_retval(mbp);
1208 mempool_free(mbp, hw->mb_mempool);
1209 }
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224 if (retval == 0 || force) {
1225 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
1226 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F,
1227 PCIE_FW_HALT_F);
1228 }
1229
1230
1231
1232
1233
1234 return retval ? -EINVAL : 0;
1235 }
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258 static int
1259 csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
1260 {
1261 if (reset) {
1262
1263
1264
1265
1266
1267 csio_set_reg_field(hw, PCIE_FW_A, PCIE_FW_HALT_F, 0);
1268
1269
1270
1271
1272
1273
1274
1275
1276 if (mbox <= PCIE_FW_MASTER_M) {
1277 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
1278 msleep(100);
1279 if (csio_do_reset(hw, true) == 0)
1280 return 0;
1281 }
1282
1283 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
1284 msleep(2000);
1285 } else {
1286 int ms;
1287
1288 csio_set_reg_field(hw, CIM_BOOT_CFG_A, UPCRST_F, 0);
1289 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
1290 if (!(csio_rd_reg32(hw, PCIE_FW_A) & PCIE_FW_HALT_F))
1291 return 0;
1292 msleep(100);
1293 ms += 100;
1294 }
1295 return -ETIMEDOUT;
1296 }
1297 return 0;
1298 }
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321 static int
1322 csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox,
1323 const u8 *fw_data, uint32_t size, int32_t force)
1324 {
1325 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
1326 int reset, ret;
1327
1328 ret = csio_hw_fw_halt(hw, mbox, force);
1329 if (ret != 0 && !force)
1330 return ret;
1331
1332 ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size);
1333 if (ret != 0)
1334 return ret;
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
1345 return csio_hw_fw_restart(hw, mbox, reset);
1346 }
1347
1348
1349
1350
1351
1352
1353 static int
1354 csio_get_device_params(struct csio_hw *hw)
1355 {
1356 struct csio_wrm *wrm = csio_hw_to_wrm(hw);
1357 struct csio_mb *mbp;
1358 enum fw_retval retval;
1359 u32 param[6];
1360 int i, j = 0;
1361
1362
1363 for (i = 0; i < CSIO_MAX_PPORTS; i++)
1364 hw->pport[i].portid = -1;
1365
1366 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1367 if (!mbp) {
1368 CSIO_INC_STATS(hw, n_err_nomem);
1369 return -ENOMEM;
1370 }
1371
1372
1373 param[0] = FW_PARAM_DEV(PORTVEC);
1374
1375
1376 param[1] = FW_PARAM_DEV(CCLK);
1377
1378
1379 param[2] = FW_PARAM_PFVF(EQ_START);
1380 param[3] = FW_PARAM_PFVF(EQ_END);
1381
1382
1383 param[4] = FW_PARAM_PFVF(IQFLINT_START);
1384 param[5] = FW_PARAM_PFVF(IQFLINT_END);
1385
1386 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1387 ARRAY_SIZE(param), param, NULL, false, NULL);
1388 if (csio_mb_issue(hw, mbp)) {
1389 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1390 mempool_free(mbp, hw->mb_mempool);
1391 return -EINVAL;
1392 }
1393
1394 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1395 ARRAY_SIZE(param), param);
1396 if (retval != FW_SUCCESS) {
1397 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1398 retval);
1399 mempool_free(mbp, hw->mb_mempool);
1400 return -EINVAL;
1401 }
1402
1403
1404 hw->port_vec = param[0];
1405 hw->vpd.cclk = param[1];
1406 wrm->fw_eq_start = param[2];
1407 wrm->fw_iq_start = param[4];
1408
1409
1410 if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) ||
1411 !csio_is_hw_master(hw)) {
1412 hw->cfg_niq = param[5] - param[4] + 1;
1413 hw->cfg_neq = param[3] - param[2] + 1;
1414 csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n",
1415 hw->cfg_niq, hw->cfg_neq);
1416 }
1417
1418 hw->port_vec &= csio_port_mask;
1419
1420 hw->num_pports = hweight32(hw->port_vec);
1421
1422 csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n",
1423 hw->port_vec, hw->num_pports);
1424
1425 for (i = 0; i < hw->num_pports; i++) {
1426 while ((hw->port_vec & (1 << j)) == 0)
1427 j++;
1428 hw->pport[i].portid = j++;
1429 csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid);
1430 }
1431 mempool_free(mbp, hw->mb_mempool);
1432
1433 return 0;
1434 }
1435
1436
1437
1438
1439
1440
1441
1442 static int
1443 csio_config_device_caps(struct csio_hw *hw)
1444 {
1445 struct csio_mb *mbp;
1446 enum fw_retval retval;
1447 int rv = -EINVAL;
1448
1449 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1450 if (!mbp) {
1451 CSIO_INC_STATS(hw, n_err_nomem);
1452 return -ENOMEM;
1453 }
1454
1455
1456 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL);
1457
1458 if (csio_mb_issue(hw, mbp)) {
1459 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
1460 goto out;
1461 }
1462
1463 retval = csio_mb_fw_retval(mbp);
1464 if (retval != FW_SUCCESS) {
1465 csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval);
1466 goto out;
1467 }
1468
1469
1470 rv = csio_hw_validate_caps(hw, mbp);
1471 if (rv != 0)
1472 goto out;
1473
1474
1475 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
1476 rv = 0;
1477 goto out;
1478 }
1479
1480
1481 csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true,
1482 false, true, NULL);
1483
1484 if (csio_mb_issue(hw, mbp)) {
1485 csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
1486 goto out;
1487 }
1488
1489 retval = csio_mb_fw_retval(mbp);
1490 if (retval != FW_SUCCESS) {
1491 csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval);
1492 goto out;
1493 }
1494
1495 rv = 0;
1496 out:
1497 mempool_free(mbp, hw->mb_mempool);
1498 return rv;
1499 }
1500
1501 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
1502 {
1503 enum cc_fec cc_fec = 0;
1504
1505 if (fw_fec & FW_PORT_CAP32_FEC_RS)
1506 cc_fec |= FEC_RS;
1507 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
1508 cc_fec |= FEC_BASER_RS;
1509
1510 return cc_fec;
1511 }
1512
1513 static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
1514 {
1515 fw_port_cap32_t fw_pause = 0;
1516
1517 if (cc_pause & PAUSE_RX)
1518 fw_pause |= FW_PORT_CAP32_FC_RX;
1519 if (cc_pause & PAUSE_TX)
1520 fw_pause |= FW_PORT_CAP32_FC_TX;
1521
1522 return fw_pause;
1523 }
1524
1525 static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
1526 {
1527 fw_port_cap32_t fw_fec = 0;
1528
1529 if (cc_fec & FEC_RS)
1530 fw_fec |= FW_PORT_CAP32_FEC_RS;
1531 if (cc_fec & FEC_BASER_RS)
1532 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
1533
1534 return fw_fec;
1535 }
1536
1537
1538
1539
1540
1541
1542
1543
1544 fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
1545 {
1546 #define TEST_SPEED_RETURN(__caps_speed) \
1547 do { \
1548 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
1549 return FW_PORT_CAP32_SPEED_##__caps_speed; \
1550 } while (0)
1551
1552 TEST_SPEED_RETURN(400G);
1553 TEST_SPEED_RETURN(200G);
1554 TEST_SPEED_RETURN(100G);
1555 TEST_SPEED_RETURN(50G);
1556 TEST_SPEED_RETURN(40G);
1557 TEST_SPEED_RETURN(25G);
1558 TEST_SPEED_RETURN(10G);
1559 TEST_SPEED_RETURN(1G);
1560 TEST_SPEED_RETURN(100M);
1561
1562 #undef TEST_SPEED_RETURN
1563
1564 return 0;
1565 }
1566
1567
1568
1569
1570
1571
1572
1573 fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
1574 {
1575 fw_port_cap32_t caps32 = 0;
1576
1577 #define CAP16_TO_CAP32(__cap) \
1578 do { \
1579 if (caps16 & FW_PORT_CAP_##__cap) \
1580 caps32 |= FW_PORT_CAP32_##__cap; \
1581 } while (0)
1582
1583 CAP16_TO_CAP32(SPEED_100M);
1584 CAP16_TO_CAP32(SPEED_1G);
1585 CAP16_TO_CAP32(SPEED_25G);
1586 CAP16_TO_CAP32(SPEED_10G);
1587 CAP16_TO_CAP32(SPEED_40G);
1588 CAP16_TO_CAP32(SPEED_100G);
1589 CAP16_TO_CAP32(FC_RX);
1590 CAP16_TO_CAP32(FC_TX);
1591 CAP16_TO_CAP32(ANEG);
1592 CAP16_TO_CAP32(MDIAUTO);
1593 CAP16_TO_CAP32(MDISTRAIGHT);
1594 CAP16_TO_CAP32(FEC_RS);
1595 CAP16_TO_CAP32(FEC_BASER_RS);
1596 CAP16_TO_CAP32(802_3_PAUSE);
1597 CAP16_TO_CAP32(802_3_ASM_DIR);
1598
1599 #undef CAP16_TO_CAP32
1600
1601 return caps32;
1602 }
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612 fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
1613 {
1614 fw_port_cap16_t caps16 = 0;
1615
1616 #define CAP32_TO_CAP16(__cap) \
1617 do { \
1618 if (caps32 & FW_PORT_CAP32_##__cap) \
1619 caps16 |= FW_PORT_CAP_##__cap; \
1620 } while (0)
1621
1622 CAP32_TO_CAP16(SPEED_100M);
1623 CAP32_TO_CAP16(SPEED_1G);
1624 CAP32_TO_CAP16(SPEED_10G);
1625 CAP32_TO_CAP16(SPEED_25G);
1626 CAP32_TO_CAP16(SPEED_40G);
1627 CAP32_TO_CAP16(SPEED_100G);
1628 CAP32_TO_CAP16(FC_RX);
1629 CAP32_TO_CAP16(FC_TX);
1630 CAP32_TO_CAP16(802_3_PAUSE);
1631 CAP32_TO_CAP16(802_3_ASM_DIR);
1632 CAP32_TO_CAP16(ANEG);
1633 CAP32_TO_CAP16(FORCE_PAUSE);
1634 CAP32_TO_CAP16(MDIAUTO);
1635 CAP32_TO_CAP16(MDISTRAIGHT);
1636 CAP32_TO_CAP16(FEC_RS);
1637 CAP32_TO_CAP16(FEC_BASER_RS);
1638
1639 #undef CAP32_TO_CAP16
1640
1641 return caps16;
1642 }
1643
1644
1645
1646
1647
1648
1649
1650
1651 fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
1652 {
1653 fw_port_cap32_t linkattr = 0;
1654
1655
1656
1657
1658
1659 if (lstatus & FW_PORT_CMD_RXPAUSE_F)
1660 linkattr |= FW_PORT_CAP32_FC_RX;
1661 if (lstatus & FW_PORT_CMD_TXPAUSE_F)
1662 linkattr |= FW_PORT_CAP32_FC_TX;
1663 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
1664 linkattr |= FW_PORT_CAP32_SPEED_100M;
1665 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
1666 linkattr |= FW_PORT_CAP32_SPEED_1G;
1667 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
1668 linkattr |= FW_PORT_CAP32_SPEED_10G;
1669 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
1670 linkattr |= FW_PORT_CAP32_SPEED_25G;
1671 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
1672 linkattr |= FW_PORT_CAP32_SPEED_40G;
1673 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
1674 linkattr |= FW_PORT_CAP32_SPEED_100G;
1675
1676 return linkattr;
1677 }
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688 static void csio_init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
1689 fw_port_cap32_t acaps)
1690 {
1691 lc->pcaps = pcaps;
1692 lc->def_acaps = acaps;
1693 lc->lpacaps = 0;
1694 lc->speed_caps = 0;
1695 lc->speed = 0;
1696 lc->requested_fc = PAUSE_RX | PAUSE_TX;
1697 lc->fc = lc->requested_fc;
1698
1699
1700
1701
1702
1703 lc->requested_fec = FEC_AUTO;
1704 lc->fec = fwcap_to_cc_fec(lc->def_acaps);
1705
1706
1707
1708
1709
1710
1711
1712
1713 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
1714 lc->acaps = lc->pcaps & ADVERT_MASK;
1715 lc->autoneg = AUTONEG_ENABLE;
1716 lc->requested_fc |= PAUSE_AUTONEG;
1717 } else {
1718 lc->acaps = 0;
1719 lc->autoneg = AUTONEG_DISABLE;
1720 }
1721 }
1722
1723 static void csio_link_l1cfg(struct link_config *lc, uint16_t fw_caps,
1724 uint32_t *rcaps)
1725 {
1726 unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO);
1727 fw_port_cap32_t fw_fc, cc_fec, fw_fec, lrcap;
1728
1729 lc->link_ok = 0;
1730
1731
1732
1733
1734
1735 fw_fc = cc_to_fwcap_pause(lc->requested_fc);
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745 if (lc->requested_fec & FEC_AUTO)
1746 cc_fec = fwcap_to_cc_fec(lc->def_acaps);
1747 else
1748 cc_fec = lc->requested_fec;
1749 fw_fec = cc_to_fwcap_fec(cc_fec);
1750
1751
1752
1753
1754
1755 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
1756 lrcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
1757 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
1758 lc->fec = cc_fec;
1759 } else if (lc->autoneg == AUTONEG_DISABLE) {
1760 lrcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
1761 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
1762 lc->fec = cc_fec;
1763 } else {
1764 lrcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
1765 }
1766
1767 *rcaps = lrcap;
1768 }
1769
1770
1771
1772
1773
1774
1775 static int
1776 csio_enable_ports(struct csio_hw *hw)
1777 {
1778 struct csio_mb *mbp;
1779 u16 fw_caps = FW_CAPS_UNKNOWN;
1780 enum fw_retval retval;
1781 uint8_t portid;
1782 fw_port_cap32_t pcaps, acaps, rcaps;
1783 int i;
1784
1785 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1786 if (!mbp) {
1787 CSIO_INC_STATS(hw, n_err_nomem);
1788 return -ENOMEM;
1789 }
1790
1791 for (i = 0; i < hw->num_pports; i++) {
1792 portid = hw->pport[i].portid;
1793
1794 if (fw_caps == FW_CAPS_UNKNOWN) {
1795 u32 param, val;
1796
1797 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
1798 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
1799 val = 1;
1800
1801 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO,
1802 hw->pfn, 0, 1, ¶m, &val, true,
1803 NULL);
1804
1805 if (csio_mb_issue(hw, mbp)) {
1806 csio_err(hw, "failed to issue FW_PARAMS_CMD(r) port:%d\n",
1807 portid);
1808 mempool_free(mbp, hw->mb_mempool);
1809 return -EINVAL;
1810 }
1811
1812 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1813 0, NULL);
1814 fw_caps = retval ? FW_CAPS16 : FW_CAPS32;
1815 }
1816
1817
1818 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
1819 false, 0, fw_caps, NULL);
1820
1821 if (csio_mb_issue(hw, mbp)) {
1822 csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n",
1823 portid);
1824 mempool_free(mbp, hw->mb_mempool);
1825 return -EINVAL;
1826 }
1827
1828 csio_mb_process_read_port_rsp(hw, mbp, &retval, fw_caps,
1829 &pcaps, &acaps);
1830 if (retval != FW_SUCCESS) {
1831 csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
1832 portid, retval);
1833 mempool_free(mbp, hw->mb_mempool);
1834 return -EINVAL;
1835 }
1836
1837 csio_init_link_config(&hw->pport[i].link_cfg, pcaps, acaps);
1838
1839 csio_link_l1cfg(&hw->pport[i].link_cfg, fw_caps, &rcaps);
1840
1841
1842 csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
1843 true, rcaps, fw_caps, NULL);
1844
1845 if (csio_mb_issue(hw, mbp)) {
1846 csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n",
1847 portid);
1848 mempool_free(mbp, hw->mb_mempool);
1849 return -EINVAL;
1850 }
1851
1852 retval = csio_mb_fw_retval(mbp);
1853 if (retval != FW_SUCCESS) {
1854 csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
1855 portid, retval);
1856 mempool_free(mbp, hw->mb_mempool);
1857 return -EINVAL;
1858 }
1859
1860 }
1861
1862 mempool_free(mbp, hw->mb_mempool);
1863
1864 return 0;
1865 }
1866
1867
1868
1869
1870
1871
1872 static int
1873 csio_get_fcoe_resinfo(struct csio_hw *hw)
1874 {
1875 struct csio_fcoe_res_info *res_info = &hw->fres_info;
1876 struct fw_fcoe_res_info_cmd *rsp;
1877 struct csio_mb *mbp;
1878 enum fw_retval retval;
1879
1880 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1881 if (!mbp) {
1882 CSIO_INC_STATS(hw, n_err_nomem);
1883 return -ENOMEM;
1884 }
1885
1886
1887 csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
1888
1889 if (csio_mb_issue(hw, mbp)) {
1890 csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n");
1891 mempool_free(mbp, hw->mb_mempool);
1892 return -EINVAL;
1893 }
1894
1895 rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb);
1896 retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
1897 if (retval != FW_SUCCESS) {
1898 csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
1899 retval);
1900 mempool_free(mbp, hw->mb_mempool);
1901 return -EINVAL;
1902 }
1903
1904 res_info->e_d_tov = ntohs(rsp->e_d_tov);
1905 res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq);
1906 res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els);
1907 res_info->r_r_tov = ntohs(rsp->r_r_tov);
1908 res_info->max_xchgs = ntohl(rsp->max_xchgs);
1909 res_info->max_ssns = ntohl(rsp->max_ssns);
1910 res_info->used_xchgs = ntohl(rsp->used_xchgs);
1911 res_info->used_ssns = ntohl(rsp->used_ssns);
1912 res_info->max_fcfs = ntohl(rsp->max_fcfs);
1913 res_info->max_vnps = ntohl(rsp->max_vnps);
1914 res_info->used_fcfs = ntohl(rsp->used_fcfs);
1915 res_info->used_vnps = ntohl(rsp->used_vnps);
1916
1917 csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns,
1918 res_info->max_xchgs);
1919 mempool_free(mbp, hw->mb_mempool);
1920
1921 return 0;
1922 }
1923
1924 static int
1925 csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param)
1926 {
1927 struct csio_mb *mbp;
1928 enum fw_retval retval;
1929 u32 _param[1];
1930
1931 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1932 if (!mbp) {
1933 CSIO_INC_STATS(hw, n_err_nomem);
1934 return -ENOMEM;
1935 }
1936
1937
1938
1939
1940
1941 _param[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
1942 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
1943
1944 csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
1945 ARRAY_SIZE(_param), _param, NULL, false, NULL);
1946 if (csio_mb_issue(hw, mbp)) {
1947 csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
1948 mempool_free(mbp, hw->mb_mempool);
1949 return -EINVAL;
1950 }
1951
1952 csio_mb_process_read_params_rsp(hw, mbp, &retval,
1953 ARRAY_SIZE(_param), _param);
1954 if (retval != FW_SUCCESS) {
1955 csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
1956 retval);
1957 mempool_free(mbp, hw->mb_mempool);
1958 return -EINVAL;
1959 }
1960
1961 mempool_free(mbp, hw->mb_mempool);
1962 *param = _param[0];
1963
1964 return 0;
1965 }
1966
1967 static int
1968 csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
1969 {
1970 int ret = 0;
1971 const struct firmware *cf;
1972 struct pci_dev *pci_dev = hw->pdev;
1973 struct device *dev = &pci_dev->dev;
1974 unsigned int mtype = 0, maddr = 0;
1975 uint32_t *cfg_data;
1976 int value_to_add = 0;
1977 const char *fw_cfg_file;
1978
1979 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
1980 fw_cfg_file = FW_CFG_NAME_T5;
1981 else
1982 fw_cfg_file = FW_CFG_NAME_T6;
1983
1984 if (request_firmware(&cf, fw_cfg_file, dev) < 0) {
1985 csio_err(hw, "could not find config file %s, err: %d\n",
1986 fw_cfg_file, ret);
1987 return -ENOENT;
1988 }
1989
1990 if (cf->size%4 != 0)
1991 value_to_add = 4 - (cf->size % 4);
1992
1993 cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
1994 if (cfg_data == NULL) {
1995 ret = -ENOMEM;
1996 goto leave;
1997 }
1998
1999 memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
2000 if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) {
2001 ret = -EINVAL;
2002 goto leave;
2003 }
2004
2005 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
2006 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
2007
2008 ret = csio_memory_write(hw, mtype, maddr,
2009 cf->size + value_to_add, cfg_data);
2010
2011 if ((ret == 0) && (value_to_add != 0)) {
2012 union {
2013 u32 word;
2014 char buf[4];
2015 } last;
2016 size_t size = cf->size & ~0x3;
2017 int i;
2018
2019 last.word = cfg_data[size >> 2];
2020 for (i = value_to_add; i < 4; i++)
2021 last.buf[i] = 0;
2022 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
2023 }
2024 if (ret == 0) {
2025 csio_info(hw, "config file upgraded to %s\n", fw_cfg_file);
2026 snprintf(path, 64, "%s%s", "/lib/firmware/", fw_cfg_file);
2027 }
2028
2029 leave:
2030 kfree(cfg_data);
2031 release_firmware(cf);
2032 return ret;
2033 }
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051 static int
2052 csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
2053 {
2054 struct csio_mb *mbp = NULL;
2055 struct fw_caps_config_cmd *caps_cmd;
2056 unsigned int mtype, maddr;
2057 int rv = -EINVAL;
2058 uint32_t finiver = 0, finicsum = 0, cfcsum = 0;
2059 char path[64];
2060 char *config_name = NULL;
2061
2062
2063
2064
2065 if (reset) {
2066 rv = csio_do_reset(hw, true);
2067 if (rv != 0)
2068 goto bye;
2069 }
2070
2071
2072
2073
2074
2075
2076 spin_unlock_irq(&hw->lock);
2077 rv = csio_hw_flash_config(hw, fw_cfg_param, path);
2078 spin_lock_irq(&hw->lock);
2079 if (rv != 0) {
2080
2081
2082
2083
2084 config_name = "On FLASH";
2085 mtype = FW_MEMTYPE_CF_FLASH;
2086 maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
2087 } else {
2088 config_name = path;
2089 mtype = FW_PARAMS_PARAM_Y_G(*fw_cfg_param);
2090 maddr = FW_PARAMS_PARAM_Z_G(*fw_cfg_param) << 16;
2091 }
2092
2093 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
2094 if (!mbp) {
2095 CSIO_INC_STATS(hw, n_err_nomem);
2096 return -ENOMEM;
2097 }
2098
2099
2100
2101
2102
2103
2104 caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
2105 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
2106 caps_cmd->op_to_write =
2107 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
2108 FW_CMD_REQUEST_F |
2109 FW_CMD_READ_F);
2110 caps_cmd->cfvalid_to_len16 =
2111 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
2112 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
2113 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
2114 FW_LEN16(*caps_cmd));
2115
2116 if (csio_mb_issue(hw, mbp)) {
2117 rv = -EINVAL;
2118 goto bye;
2119 }
2120
2121 rv = csio_mb_fw_retval(mbp);
2122
2123
2124
2125
2126
2127
2128 if (rv == ENOENT) {
2129 CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
2130 caps_cmd->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
2131 FW_CMD_REQUEST_F |
2132 FW_CMD_READ_F);
2133 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
2134
2135 if (csio_mb_issue(hw, mbp)) {
2136 rv = -EINVAL;
2137 goto bye;
2138 }
2139
2140 rv = csio_mb_fw_retval(mbp);
2141 config_name = "Firmware Default";
2142 }
2143 if (rv != FW_SUCCESS)
2144 goto bye;
2145
2146 finiver = ntohl(caps_cmd->finiver);
2147 finicsum = ntohl(caps_cmd->finicsum);
2148 cfcsum = ntohl(caps_cmd->cfcsum);
2149
2150
2151
2152
2153 caps_cmd->op_to_write =
2154 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
2155 FW_CMD_REQUEST_F |
2156 FW_CMD_WRITE_F);
2157 caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
2158
2159 if (csio_mb_issue(hw, mbp)) {
2160 rv = -EINVAL;
2161 goto bye;
2162 }
2163
2164 rv = csio_mb_fw_retval(mbp);
2165 if (rv != FW_SUCCESS) {
2166 csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
2167 goto bye;
2168 }
2169
2170 if (finicsum != cfcsum) {
2171 csio_warn(hw,
2172 "Config File checksum mismatch: csum=%#x, computed=%#x\n",
2173 finicsum, cfcsum);
2174 }
2175
2176
2177 rv = csio_hw_validate_caps(hw, mbp);
2178 if (rv != 0)
2179 goto bye;
2180
2181 mempool_free(mbp, hw->mb_mempool);
2182 mbp = NULL;
2183
2184
2185
2186
2187
2188
2189 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
2190
2191
2192 rv = csio_get_device_params(hw);
2193 if (rv != 0)
2194 goto bye;
2195
2196
2197 csio_wr_sge_init(hw);
2198
2199
2200
2201
2202
2203
2204 csio_post_event(&hw->sm, CSIO_HWE_INIT);
2205
2206 csio_info(hw, "Successfully configure using Firmware "
2207 "Configuration File %s, version %#x, computed checksum %#x\n",
2208 config_name, finiver, cfcsum);
2209 return 0;
2210
2211
2212
2213
2214 bye:
2215 if (mbp)
2216 mempool_free(mbp, hw->mb_mempool);
2217 hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS;
2218 csio_warn(hw, "Configuration file error %d\n", rv);
2219 return rv;
2220 }
2221
2222
2223
2224
2225 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
2226 {
2227
2228
2229 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
2230 return 1;
2231
2232 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
2233 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
2234 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
2235 return 1;
2236 #undef SAME_INTF
2237
2238 return 0;
2239 }
2240
2241
2242
2243
2244
2245 static int csio_should_install_fs_fw(struct csio_hw *hw, int card_fw_usable,
2246 int k, int c)
2247 {
2248 const char *reason;
2249
2250 if (!card_fw_usable) {
2251 reason = "incompatible or unusable";
2252 goto install;
2253 }
2254
2255 if (k > c) {
2256 reason = "older than the version supported with this driver";
2257 goto install;
2258 }
2259
2260 return 0;
2261
2262 install:
2263 csio_err(hw, "firmware on card (%u.%u.%u.%u) is %s, "
2264 "installing firmware %u.%u.%u.%u on card.\n",
2265 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
2266 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
2267 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
2268 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
2269
2270 return 1;
2271 }
2272
2273 static struct fw_info fw_info_array[] = {
2274 {
2275 .chip = CHELSIO_T5,
2276 .fs_name = FW_CFG_NAME_T5,
2277 .fw_mod_name = FW_FNAME_T5,
2278 .fw_hdr = {
2279 .chip = FW_HDR_CHIP_T5,
2280 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
2281 .intfver_nic = FW_INTFVER(T5, NIC),
2282 .intfver_vnic = FW_INTFVER(T5, VNIC),
2283 .intfver_ri = FW_INTFVER(T5, RI),
2284 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
2285 .intfver_fcoe = FW_INTFVER(T5, FCOE),
2286 },
2287 }, {
2288 .chip = CHELSIO_T6,
2289 .fs_name = FW_CFG_NAME_T6,
2290 .fw_mod_name = FW_FNAME_T6,
2291 .fw_hdr = {
2292 .chip = FW_HDR_CHIP_T6,
2293 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
2294 .intfver_nic = FW_INTFVER(T6, NIC),
2295 .intfver_vnic = FW_INTFVER(T6, VNIC),
2296 .intfver_ri = FW_INTFVER(T6, RI),
2297 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
2298 .intfver_fcoe = FW_INTFVER(T6, FCOE),
2299 },
2300 }
2301 };
2302
2303 static struct fw_info *find_fw_info(int chip)
2304 {
2305 int i;
2306
2307 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
2308 if (fw_info_array[i].chip == chip)
2309 return &fw_info_array[i];
2310 }
2311 return NULL;
2312 }
2313
2314 static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
2315 const u8 *fw_data, unsigned int fw_size,
2316 struct fw_hdr *card_fw, enum csio_dev_state state,
2317 int *reset)
2318 {
2319 int ret, card_fw_usable, fs_fw_usable;
2320 const struct fw_hdr *fs_fw;
2321 const struct fw_hdr *drv_fw;
2322
2323 drv_fw = &fw_info->fw_hdr;
2324
2325
2326 ret = csio_hw_read_flash(hw, FLASH_FW_START,
2327 sizeof(*card_fw) / sizeof(uint32_t),
2328 (uint32_t *)card_fw, 1);
2329 if (ret == 0) {
2330 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
2331 } else {
2332 csio_err(hw,
2333 "Unable to read card's firmware header: %d\n", ret);
2334 card_fw_usable = 0;
2335 }
2336
2337 if (fw_data != NULL) {
2338 fs_fw = (const void *)fw_data;
2339 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
2340 } else {
2341 fs_fw = NULL;
2342 fs_fw_usable = 0;
2343 }
2344
2345 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
2346 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
2347
2348
2349
2350
2351 } else if (fs_fw_usable && state == CSIO_DEV_STATE_UNINIT &&
2352 csio_should_install_fs_fw(hw, card_fw_usable,
2353 be32_to_cpu(fs_fw->fw_ver),
2354 be32_to_cpu(card_fw->fw_ver))) {
2355 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw_data,
2356 fw_size, 0);
2357 if (ret != 0) {
2358 csio_err(hw,
2359 "failed to install firmware: %d\n", ret);
2360 goto bye;
2361 }
2362
2363
2364 memcpy(card_fw, fs_fw, sizeof(*card_fw));
2365 card_fw_usable = 1;
2366 *reset = 0;
2367 }
2368
2369 if (!card_fw_usable) {
2370 uint32_t d, c, k;
2371
2372 d = be32_to_cpu(drv_fw->fw_ver);
2373 c = be32_to_cpu(card_fw->fw_ver);
2374 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
2375
2376 csio_err(hw, "Cannot find a usable firmware: "
2377 "chip state %d, "
2378 "driver compiled with %d.%d.%d.%d, "
2379 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
2380 state,
2381 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
2382 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
2383 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
2384 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
2385 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
2386 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
2387 ret = -EINVAL;
2388 goto bye;
2389 }
2390
2391
2392 hw->fwrev = be32_to_cpu(card_fw->fw_ver);
2393 hw->tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
2394
2395 bye:
2396 return ret;
2397 }
2398
2399
2400
2401
2402
2403
2404
2405 static int
2406 csio_hw_flash_fw(struct csio_hw *hw, int *reset)
2407 {
2408 int ret = -ECANCELED;
2409 const struct firmware *fw;
2410 struct fw_info *fw_info;
2411 struct fw_hdr *card_fw;
2412 struct pci_dev *pci_dev = hw->pdev;
2413 struct device *dev = &pci_dev->dev ;
2414 const u8 *fw_data = NULL;
2415 unsigned int fw_size = 0;
2416 const char *fw_bin_file;
2417
2418
2419
2420
2421 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(hw->chip_id));
2422 if (fw_info == NULL) {
2423 csio_err(hw,
2424 "unable to get firmware info for chip %d.\n",
2425 CHELSIO_CHIP_VERSION(hw->chip_id));
2426 return -EINVAL;
2427 }
2428
2429
2430
2431
2432 card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
2433 if (!card_fw)
2434 return -ENOMEM;
2435
2436 if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
2437 fw_bin_file = FW_FNAME_T5;
2438 else
2439 fw_bin_file = FW_FNAME_T6;
2440
2441 if (request_firmware(&fw, fw_bin_file, dev) < 0) {
2442 csio_err(hw, "could not find firmware image %s, err: %d\n",
2443 fw_bin_file, ret);
2444 } else {
2445 fw_data = fw->data;
2446 fw_size = fw->size;
2447 }
2448
2449
2450 ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
2451 hw->fw_state, reset);
2452
2453
2454 if (fw != NULL)
2455 release_firmware(fw);
2456 kfree(card_fw);
2457 return ret;
2458 }
2459
2460 static int csio_hw_check_fwver(struct csio_hw *hw)
2461 {
2462 if (csio_is_t6(hw->pdev->device & CSIO_HW_CHIP_MASK) &&
2463 (hw->fwrev < CSIO_MIN_T6_FW)) {
2464 csio_hw_print_fw_version(hw, "T6 unsupported fw");
2465 return -1;
2466 }
2467
2468 return 0;
2469 }
2470
2471
2472
2473
2474
2475
2476 static void
2477 csio_hw_configure(struct csio_hw *hw)
2478 {
2479 int reset = 1;
2480 int rv;
2481 u32 param[1];
2482
2483 rv = csio_hw_dev_ready(hw);
2484 if (rv != 0) {
2485 CSIO_INC_STATS(hw, n_err_fatal);
2486 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2487 goto out;
2488 }
2489
2490
2491 hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV_A);
2492
2493
2494 rv = csio_hw_get_flash_params(hw);
2495 if (rv != 0) {
2496 csio_err(hw, "Failed to get serial flash params rv:%d\n", rv);
2497 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2498 goto out;
2499 }
2500
2501
2502 if (pci_is_pcie(hw->pdev))
2503 pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2,
2504 PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
2505
2506 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
2507
2508 rv = csio_hw_get_fw_version(hw, &hw->fwrev);
2509 if (rv != 0)
2510 goto out;
2511
2512 csio_hw_print_fw_version(hw, "Firmware revision");
2513
2514 rv = csio_do_hello(hw, &hw->fw_state);
2515 if (rv != 0) {
2516 CSIO_INC_STATS(hw, n_err_fatal);
2517 csio_post_event(&hw->sm, CSIO_HWE_FATAL);
2518 goto out;
2519 }
2520
2521
2522 rv = csio_hw_get_vpd_params(hw, &hw->vpd);
2523 if (rv != 0)
2524 goto out;
2525
2526 csio_hw_get_fw_version(hw, &hw->fwrev);
2527 csio_hw_get_tp_version(hw, &hw->tp_vers);
2528 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2529
2530
2531 spin_unlock_irq(&hw->lock);
2532 rv = csio_hw_flash_fw(hw, &reset);
2533 spin_lock_irq(&hw->lock);
2534
2535 if (rv != 0)
2536 goto out;
2537
2538 rv = csio_hw_check_fwver(hw);
2539 if (rv < 0)
2540 goto out;
2541
2542
2543
2544
2545 rv = csio_hw_check_fwconfig(hw, param);
2546 if (rv != 0) {
2547 csio_info(hw, "Firmware doesn't support "
2548 "Firmware Configuration files\n");
2549 goto out;
2550 }
2551
2552
2553
2554
2555
2556 rv = csio_hw_use_fwconfig(hw, reset, param);
2557 if (rv == -ENOENT) {
2558 csio_info(hw, "Could not initialize "
2559 "adapter, error%d\n", rv);
2560 goto out;
2561 }
2562 if (rv != 0) {
2563 csio_info(hw, "Could not initialize "
2564 "adapter, error%d\n", rv);
2565 goto out;
2566 }
2567
2568 } else {
2569 rv = csio_hw_check_fwver(hw);
2570 if (rv < 0)
2571 goto out;
2572
2573 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
2574
2575 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
2576
2577
2578 rv = csio_get_device_params(hw);
2579 if (rv != 0)
2580 goto out;
2581
2582
2583 rv = csio_config_device_caps(hw);
2584 if (rv != 0)
2585 goto out;
2586
2587
2588 csio_wr_sge_init(hw);
2589
2590
2591 csio_post_event(&hw->sm, CSIO_HWE_INIT);
2592 goto out;
2593 }
2594 }
2595
2596 out:
2597 return;
2598 }
2599
2600
2601
2602
2603
2604
2605 static void
2606 csio_hw_initialize(struct csio_hw *hw)
2607 {
2608 struct csio_mb *mbp;
2609 enum fw_retval retval;
2610 int rv;
2611 int i;
2612
2613 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2614 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
2615 if (!mbp)
2616 goto out;
2617
2618 csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
2619
2620 if (csio_mb_issue(hw, mbp)) {
2621 csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n");
2622 goto free_and_out;
2623 }
2624
2625 retval = csio_mb_fw_retval(mbp);
2626 if (retval != FW_SUCCESS) {
2627 csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n",
2628 retval);
2629 goto free_and_out;
2630 }
2631
2632 mempool_free(mbp, hw->mb_mempool);
2633 }
2634
2635 rv = csio_get_fcoe_resinfo(hw);
2636 if (rv != 0) {
2637 csio_err(hw, "Failed to read fcoe resource info: %d\n", rv);
2638 goto out;
2639 }
2640
2641 spin_unlock_irq(&hw->lock);
2642 rv = csio_config_queues(hw);
2643 spin_lock_irq(&hw->lock);
2644
2645 if (rv != 0) {
2646 csio_err(hw, "Config of queues failed!: %d\n", rv);
2647 goto out;
2648 }
2649
2650 for (i = 0; i < hw->num_pports; i++)
2651 hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA;
2652
2653 if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
2654 rv = csio_enable_ports(hw);
2655 if (rv != 0) {
2656 csio_err(hw, "Failed to enable ports: %d\n", rv);
2657 goto out;
2658 }
2659 }
2660
2661 csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE);
2662 return;
2663
2664 free_and_out:
2665 mempool_free(mbp, hw->mb_mempool);
2666 out:
2667 return;
2668 }
2669
2670 #define PF_INTR_MASK (PFSW_F | PFCIM_F)
2671
2672
2673
2674
2675
2676
2677
2678 static void
2679 csio_hw_intr_enable(struct csio_hw *hw)
2680 {
2681 uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
2682 u32 pf = 0;
2683 uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A);
2684
2685 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
2686 pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
2687 else
2688 pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
2689
2690
2691
2692
2693
2694 if (hw->intr_mode == CSIO_IM_MSIX)
2695 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
2696 AIVEC_V(AIVEC_M), vec);
2697 else if (hw->intr_mode == CSIO_IM_MSI)
2698 csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG_A),
2699 AIVEC_V(AIVEC_M), 0);
2700
2701 csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE_A));
2702
2703
2704 csio_mb_intr_enable(hw);
2705
2706
2707 if (csio_is_hw_master(hw)) {
2708
2709
2710
2711 pl &= (~SF_F);
2712 csio_wr_reg32(hw, pl, PL_INT_ENABLE_A);
2713
2714 csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE_F |
2715 EGRESS_SIZE_ERR_F | ERR_INVALID_CIDX_INC_F |
2716 ERR_CPL_OPCODE_0_F | ERR_DROPPED_DB_F |
2717 ERR_DATA_CPL_ON_HIGH_QID1_F |
2718 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
2719 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
2720 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
2721 ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F,
2722 SGE_INT_ENABLE3_A);
2723 csio_set_reg_field(hw, PL_INT_MAP0_A, 0, 1 << pf);
2724 }
2725
2726 hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
2727
2728 }
2729
2730
2731
2732
2733
2734
2735
2736 void
2737 csio_hw_intr_disable(struct csio_hw *hw)
2738 {
2739 u32 pf = 0;
2740
2741 if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
2742 pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
2743 else
2744 pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
2745
2746 if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
2747 return;
2748
2749 hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
2750
2751 csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE_A));
2752 if (csio_is_hw_master(hw))
2753 csio_set_reg_field(hw, PL_INT_MAP0_A, 1 << pf, 0);
2754
2755
2756 csio_mb_intr_disable(hw);
2757
2758 }
2759
2760 void
2761 csio_hw_fatal_err(struct csio_hw *hw)
2762 {
2763 csio_set_reg_field(hw, SGE_CONTROL_A, GLOBALENABLE_F, 0);
2764 csio_hw_intr_disable(hw);
2765
2766
2767 csio_fatal(hw, "HW Fatal error encountered!\n");
2768 }
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779 static void
2780 csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt)
2781 {
2782 hw->prev_evt = hw->cur_evt;
2783 hw->cur_evt = evt;
2784 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2785
2786 switch (evt) {
2787 case CSIO_HWE_CFG:
2788 csio_set_state(&hw->sm, csio_hws_configuring);
2789 csio_hw_configure(hw);
2790 break;
2791
2792 default:
2793 CSIO_INC_STATS(hw, n_evt_unexp);
2794 break;
2795 }
2796 }
2797
2798
2799
2800
2801
2802
2803
2804 static void
2805 csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
2806 {
2807 hw->prev_evt = hw->cur_evt;
2808 hw->cur_evt = evt;
2809 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2810
2811 switch (evt) {
2812 case CSIO_HWE_INIT:
2813 csio_set_state(&hw->sm, csio_hws_initializing);
2814 csio_hw_initialize(hw);
2815 break;
2816
2817 case CSIO_HWE_INIT_DONE:
2818 csio_set_state(&hw->sm, csio_hws_ready);
2819
2820 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2821 break;
2822
2823 case CSIO_HWE_FATAL:
2824 csio_set_state(&hw->sm, csio_hws_uninit);
2825 break;
2826
2827 case CSIO_HWE_PCI_REMOVE:
2828 csio_do_bye(hw);
2829 break;
2830 default:
2831 CSIO_INC_STATS(hw, n_evt_unexp);
2832 break;
2833 }
2834 }
2835
2836
2837
2838
2839
2840
2841
2842 static void
2843 csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt)
2844 {
2845 hw->prev_evt = hw->cur_evt;
2846 hw->cur_evt = evt;
2847 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2848
2849 switch (evt) {
2850 case CSIO_HWE_INIT_DONE:
2851 csio_set_state(&hw->sm, csio_hws_ready);
2852
2853
2854 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
2855
2856
2857 csio_hw_intr_enable(hw);
2858 break;
2859
2860 case CSIO_HWE_FATAL:
2861 csio_set_state(&hw->sm, csio_hws_uninit);
2862 break;
2863
2864 case CSIO_HWE_PCI_REMOVE:
2865 csio_do_bye(hw);
2866 break;
2867
2868 default:
2869 CSIO_INC_STATS(hw, n_evt_unexp);
2870 break;
2871 }
2872 }
2873
2874
2875
2876
2877
2878
2879
2880 static void
2881 csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt)
2882 {
2883
2884 hw->evtflag = evt;
2885
2886 hw->prev_evt = hw->cur_evt;
2887 hw->cur_evt = evt;
2888 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2889
2890 switch (evt) {
2891 case CSIO_HWE_HBA_RESET:
2892 case CSIO_HWE_FW_DLOAD:
2893 case CSIO_HWE_SUSPEND:
2894 case CSIO_HWE_PCI_REMOVE:
2895 case CSIO_HWE_PCIERR_DETECTED:
2896 csio_set_state(&hw->sm, csio_hws_quiescing);
2897
2898 if (evt == CSIO_HWE_HBA_RESET ||
2899 evt == CSIO_HWE_PCIERR_DETECTED)
2900 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false);
2901 else
2902 csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true);
2903
2904 csio_hw_intr_disable(hw);
2905 csio_hw_mbm_cleanup(hw);
2906 csio_evtq_stop(hw);
2907 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP);
2908 csio_evtq_flush(hw);
2909 csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw));
2910 csio_post_event(&hw->sm, CSIO_HWE_QUIESCED);
2911 break;
2912
2913 case CSIO_HWE_FATAL:
2914 csio_set_state(&hw->sm, csio_hws_uninit);
2915 break;
2916
2917 default:
2918 CSIO_INC_STATS(hw, n_evt_unexp);
2919 break;
2920 }
2921 }
2922
2923
2924
2925
2926
2927
2928
2929 static void
2930 csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
2931 {
2932 hw->prev_evt = hw->cur_evt;
2933 hw->cur_evt = evt;
2934 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2935
2936 switch (evt) {
2937 case CSIO_HWE_QUIESCED:
2938 switch (hw->evtflag) {
2939 case CSIO_HWE_FW_DLOAD:
2940 csio_set_state(&hw->sm, csio_hws_resetting);
2941
2942 fallthrough;
2943
2944 case CSIO_HWE_HBA_RESET:
2945 csio_set_state(&hw->sm, csio_hws_resetting);
2946
2947 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET);
2948 csio_wr_destroy_queues(hw, false);
2949 csio_do_reset(hw, false);
2950 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE);
2951 break;
2952
2953 case CSIO_HWE_PCI_REMOVE:
2954 csio_set_state(&hw->sm, csio_hws_removing);
2955 csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE);
2956 csio_wr_destroy_queues(hw, true);
2957
2958 csio_do_bye(hw);
2959 break;
2960
2961 case CSIO_HWE_SUSPEND:
2962 csio_set_state(&hw->sm, csio_hws_quiesced);
2963 break;
2964
2965 case CSIO_HWE_PCIERR_DETECTED:
2966 csio_set_state(&hw->sm, csio_hws_pcierr);
2967 csio_wr_destroy_queues(hw, false);
2968 break;
2969
2970 default:
2971 CSIO_INC_STATS(hw, n_evt_unexp);
2972 break;
2973
2974 }
2975 break;
2976
2977 default:
2978 CSIO_INC_STATS(hw, n_evt_unexp);
2979 break;
2980 }
2981 }
2982
2983
2984
2985
2986
2987
2988
2989 static void
2990 csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt)
2991 {
2992 hw->prev_evt = hw->cur_evt;
2993 hw->cur_evt = evt;
2994 CSIO_INC_STATS(hw, n_evt_sm[evt]);
2995
2996 switch (evt) {
2997 case CSIO_HWE_RESUME:
2998 csio_set_state(&hw->sm, csio_hws_configuring);
2999 csio_hw_configure(hw);
3000 break;
3001
3002 default:
3003 CSIO_INC_STATS(hw, n_evt_unexp);
3004 break;
3005 }
3006 }
3007
3008
3009
3010
3011
3012
3013
3014 static void
3015 csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt)
3016 {
3017 hw->prev_evt = hw->cur_evt;
3018 hw->cur_evt = evt;
3019 CSIO_INC_STATS(hw, n_evt_sm[evt]);
3020
3021 switch (evt) {
3022 case CSIO_HWE_HBA_RESET_DONE:
3023 csio_evtq_start(hw);
3024 csio_set_state(&hw->sm, csio_hws_configuring);
3025 csio_hw_configure(hw);
3026 break;
3027
3028 default:
3029 CSIO_INC_STATS(hw, n_evt_unexp);
3030 break;
3031 }
3032 }
3033
3034
3035
3036
3037
3038
3039
3040 static void
3041 csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
3042 {
3043 hw->prev_evt = hw->cur_evt;
3044 hw->cur_evt = evt;
3045 CSIO_INC_STATS(hw, n_evt_sm[evt]);
3046
3047 switch (evt) {
3048 case CSIO_HWE_HBA_RESET:
3049 if (!csio_is_hw_master(hw))
3050 break;
3051
3052
3053
3054
3055
3056 csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
3057 csio_wr_reg32(hw, PIORSTMODE_F | PIORST_F, PL_RST_A);
3058 mdelay(2000);
3059 break;
3060
3061
3062 default:
3063 CSIO_INC_STATS(hw, n_evt_unexp);
3064 break;
3065
3066 }
3067 }
3068
3069
3070
3071
3072
3073
3074
3075 static void
3076 csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
3077 {
3078 hw->prev_evt = hw->cur_evt;
3079 hw->cur_evt = evt;
3080 CSIO_INC_STATS(hw, n_evt_sm[evt]);
3081
3082 switch (evt) {
3083 case CSIO_HWE_PCIERR_SLOT_RESET:
3084 csio_evtq_start(hw);
3085 csio_set_state(&hw->sm, csio_hws_configuring);
3086 csio_hw_configure(hw);
3087 break;
3088
3089 default:
3090 CSIO_INC_STATS(hw, n_evt_unexp);
3091 break;
3092 }
3093 }
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112 int
3113 csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
3114 const struct intr_info *acts)
3115 {
3116 int fatal = 0;
3117 unsigned int mask = 0;
3118 unsigned int status = csio_rd_reg32(hw, reg);
3119
3120 for ( ; acts->mask; ++acts) {
3121 if (!(status & acts->mask))
3122 continue;
3123 if (acts->fatal) {
3124 fatal++;
3125 csio_fatal(hw, "Fatal %s (0x%x)\n",
3126 acts->msg, status & acts->mask);
3127 } else if (acts->msg)
3128 csio_info(hw, "%s (0x%x)\n",
3129 acts->msg, status & acts->mask);
3130 mask |= acts->mask;
3131 }
3132 status &= mask;
3133 if (status)
3134 csio_wr_reg32(hw, status, reg);
3135 return fatal;
3136 }
3137
3138
3139
3140
3141 static void csio_tp_intr_handler(struct csio_hw *hw)
3142 {
3143 static struct intr_info tp_intr_info[] = {
3144 { 0x3fffffff, "TP parity error", -1, 1 },
3145 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
3146 { 0, NULL, 0, 0 }
3147 };
3148
3149 if (csio_handle_intr_status(hw, TP_INT_CAUSE_A, tp_intr_info))
3150 csio_hw_fatal_err(hw);
3151 }
3152
3153
3154
3155
3156 static void csio_sge_intr_handler(struct csio_hw *hw)
3157 {
3158 uint64_t v;
3159
3160 static struct intr_info sge_intr_info[] = {
3161 { ERR_CPL_EXCEED_IQE_SIZE_F,
3162 "SGE received CPL exceeding IQE size", -1, 1 },
3163 { ERR_INVALID_CIDX_INC_F,
3164 "SGE GTS CIDX increment too large", -1, 0 },
3165 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
3166 { ERR_DROPPED_DB_F, "SGE doorbell dropped", -1, 0 },
3167 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
3168 "SGE IQID > 1023 received CPL for FL", -1, 0 },
3169 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
3170 0 },
3171 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
3172 0 },
3173 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
3174 0 },
3175 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
3176 0 },
3177 { ERR_ING_CTXT_PRIO_F,
3178 "SGE too many priority ingress contexts", -1, 0 },
3179 { ERR_EGR_CTXT_PRIO_F,
3180 "SGE too many priority egress contexts", -1, 0 },
3181 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
3182 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
3183 { 0, NULL, 0, 0 }
3184 };
3185
3186 v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1_A) |
3187 ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2_A) << 32);
3188 if (v) {
3189 csio_fatal(hw, "SGE parity error (%#llx)\n",
3190 (unsigned long long)v);
3191 csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
3192 SGE_INT_CAUSE1_A);
3193 csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2_A);
3194 }
3195
3196 v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info);
3197
3198 if (csio_handle_intr_status(hw, SGE_INT_CAUSE3_A, sge_intr_info) ||
3199 v != 0)
3200 csio_hw_fatal_err(hw);
3201 }
3202
3203 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
3204 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
3205 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
3206 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
3207
3208
3209
3210
3211 static void csio_cim_intr_handler(struct csio_hw *hw)
3212 {
3213 static struct intr_info cim_intr_info[] = {
3214 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
3215 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
3216 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
3217 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
3218 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
3219 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
3220 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
3221 { 0, NULL, 0, 0 }
3222 };
3223 static struct intr_info cim_upintr_info[] = {
3224 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
3225 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
3226 { ILLWRINT_F, "CIM illegal write", -1, 1 },
3227 { ILLRDINT_F, "CIM illegal read", -1, 1 },
3228 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
3229 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
3230 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
3231 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
3232 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
3233 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
3234 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
3235 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
3236 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
3237 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
3238 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
3239 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
3240 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
3241 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
3242 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
3243 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
3244 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
3245 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
3246 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
3247 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
3248 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
3249 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
3250 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
3251 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
3252 { 0, NULL, 0, 0 }
3253 };
3254
3255 int fat;
3256
3257 fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE_A,
3258 cim_intr_info) +
3259 csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE_A,
3260 cim_upintr_info);
3261 if (fat)
3262 csio_hw_fatal_err(hw);
3263 }
3264
3265
3266
3267
3268 static void csio_ulprx_intr_handler(struct csio_hw *hw)
3269 {
3270 static struct intr_info ulprx_intr_info[] = {
3271 { 0x1800000, "ULPRX context error", -1, 1 },
3272 { 0x7fffff, "ULPRX parity error", -1, 1 },
3273 { 0, NULL, 0, 0 }
3274 };
3275
3276 if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
3277 csio_hw_fatal_err(hw);
3278 }
3279
3280
3281
3282
3283 static void csio_ulptx_intr_handler(struct csio_hw *hw)
3284 {
3285 static struct intr_info ulptx_intr_info[] = {
3286 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
3287 0 },
3288 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
3289 0 },
3290 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
3291 0 },
3292 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
3293 0 },
3294 { 0xfffffff, "ULPTX parity error", -1, 1 },
3295 { 0, NULL, 0, 0 }
3296 };
3297
3298 if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
3299 csio_hw_fatal_err(hw);
3300 }
3301
3302
3303
3304
3305 static void csio_pmtx_intr_handler(struct csio_hw *hw)
3306 {
3307 static struct intr_info pmtx_intr_info[] = {
3308 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
3309 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
3310 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
3311 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
3312 { 0xffffff0, "PMTX framing error", -1, 1 },
3313 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
3314 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error", -1,
3315 1 },
3316 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
3317 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
3318 { 0, NULL, 0, 0 }
3319 };
3320
3321 if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE_A, pmtx_intr_info))
3322 csio_hw_fatal_err(hw);
3323 }
3324
3325
3326
3327
3328 static void csio_pmrx_intr_handler(struct csio_hw *hw)
3329 {
3330 static struct intr_info pmrx_intr_info[] = {
3331 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
3332 { 0x3ffff0, "PMRX framing error", -1, 1 },
3333 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
3334 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error", -1,
3335 1 },
3336 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
3337 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
3338 { 0, NULL, 0, 0 }
3339 };
3340
3341 if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE_A, pmrx_intr_info))
3342 csio_hw_fatal_err(hw);
3343 }
3344
3345
3346
3347
3348 static void csio_cplsw_intr_handler(struct csio_hw *hw)
3349 {
3350 static struct intr_info cplsw_intr_info[] = {
3351 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
3352 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
3353 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
3354 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
3355 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
3356 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
3357 { 0, NULL, 0, 0 }
3358 };
3359
3360 if (csio_handle_intr_status(hw, CPL_INTR_CAUSE_A, cplsw_intr_info))
3361 csio_hw_fatal_err(hw);
3362 }
3363
3364
3365
3366
3367 static void csio_le_intr_handler(struct csio_hw *hw)
3368 {
3369 enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id);
3370
3371 static struct intr_info le_intr_info[] = {
3372 { LIPMISS_F, "LE LIP miss", -1, 0 },
3373 { LIP0_F, "LE 0 LIP error", -1, 0 },
3374 { PARITYERR_F, "LE parity error", -1, 1 },
3375 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
3376 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
3377 { 0, NULL, 0, 0 }
3378 };
3379
3380 static struct intr_info t6_le_intr_info[] = {
3381 { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
3382 { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
3383 { TCAMINTPERR_F, "LE parity error", -1, 1 },
3384 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
3385 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
3386 { 0, NULL, 0, 0 }
3387 };
3388
3389 if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A,
3390 (chip == CHELSIO_T5) ?
3391 le_intr_info : t6_le_intr_info))
3392 csio_hw_fatal_err(hw);
3393 }
3394
3395
3396
3397
3398 static void csio_mps_intr_handler(struct csio_hw *hw)
3399 {
3400 static struct intr_info mps_rx_intr_info[] = {
3401 { 0xffffff, "MPS Rx parity error", -1, 1 },
3402 { 0, NULL, 0, 0 }
3403 };
3404 static struct intr_info mps_tx_intr_info[] = {
3405 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
3406 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
3407 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
3408 -1, 1 },
3409 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
3410 -1, 1 },
3411 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
3412 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
3413 { FRMERR_F, "MPS Tx framing error", -1, 1 },
3414 { 0, NULL, 0, 0 }
3415 };
3416 static struct intr_info mps_trc_intr_info[] = {
3417 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
3418 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
3419 -1, 1 },
3420 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
3421 { 0, NULL, 0, 0 }
3422 };
3423 static struct intr_info mps_stat_sram_intr_info[] = {
3424 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
3425 { 0, NULL, 0, 0 }
3426 };
3427 static struct intr_info mps_stat_tx_intr_info[] = {
3428 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
3429 { 0, NULL, 0, 0 }
3430 };
3431 static struct intr_info mps_stat_rx_intr_info[] = {
3432 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
3433 { 0, NULL, 0, 0 }
3434 };
3435 static struct intr_info mps_cls_intr_info[] = {
3436 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
3437 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
3438 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
3439 { 0, NULL, 0, 0 }
3440 };
3441
3442 int fat;
3443
3444 fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE_A,
3445 mps_rx_intr_info) +
3446 csio_handle_intr_status(hw, MPS_TX_INT_CAUSE_A,
3447 mps_tx_intr_info) +
3448 csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE_A,
3449 mps_trc_intr_info) +
3450 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
3451 mps_stat_sram_intr_info) +
3452 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
3453 mps_stat_tx_intr_info) +
3454 csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
3455 mps_stat_rx_intr_info) +
3456 csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE_A,
3457 mps_cls_intr_info);
3458
3459 csio_wr_reg32(hw, 0, MPS_INT_CAUSE_A);
3460 csio_rd_reg32(hw, MPS_INT_CAUSE_A);
3461 if (fat)
3462 csio_hw_fatal_err(hw);
3463 }
3464
3465 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
3466 ECC_UE_INT_CAUSE_F)
3467
3468
3469
3470
3471 static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
3472 {
3473 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
3474
3475 unsigned int addr, cnt_addr, v;
3476
3477 if (idx <= MEM_EDC1) {
3478 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
3479 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
3480 } else {
3481 addr = MC_INT_CAUSE_A;
3482 cnt_addr = MC_ECC_STATUS_A;
3483 }
3484
3485 v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
3486 if (v & PERR_INT_CAUSE_F)
3487 csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
3488 if (v & ECC_CE_INT_CAUSE_F) {
3489 uint32_t cnt = ECC_CECNT_G(csio_rd_reg32(hw, cnt_addr));
3490
3491 csio_wr_reg32(hw, ECC_CECNT_V(ECC_CECNT_M), cnt_addr);
3492 csio_warn(hw, "%u %s correctable ECC data error%s\n",
3493 cnt, name[idx], cnt > 1 ? "s" : "");
3494 }
3495 if (v & ECC_UE_INT_CAUSE_F)
3496 csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
3497
3498 csio_wr_reg32(hw, v, addr);
3499 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
3500 csio_hw_fatal_err(hw);
3501 }
3502
3503
3504
3505
3506 static void csio_ma_intr_handler(struct csio_hw *hw)
3507 {
3508 uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE_A);
3509
3510 if (status & MEM_PERR_INT_CAUSE_F)
3511 csio_fatal(hw, "MA parity error, parity status %#x\n",
3512 csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS_A));
3513 if (status & MEM_WRAP_INT_CAUSE_F) {
3514 v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS_A);
3515 csio_fatal(hw,
3516 "MA address wrap-around error by client %u to address %#x\n",
3517 MEM_WRAP_CLIENT_NUM_G(v), MEM_WRAP_ADDRESS_G(v) << 4);
3518 }
3519 csio_wr_reg32(hw, status, MA_INT_CAUSE_A);
3520 csio_hw_fatal_err(hw);
3521 }
3522
3523
3524
3525
3526 static void csio_smb_intr_handler(struct csio_hw *hw)
3527 {
3528 static struct intr_info smb_intr_info[] = {
3529 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
3530 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
3531 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
3532 { 0, NULL, 0, 0 }
3533 };
3534
3535 if (csio_handle_intr_status(hw, SMB_INT_CAUSE_A, smb_intr_info))
3536 csio_hw_fatal_err(hw);
3537 }
3538
3539
3540
3541
3542 static void csio_ncsi_intr_handler(struct csio_hw *hw)
3543 {
3544 static struct intr_info ncsi_intr_info[] = {
3545 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
3546 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
3547 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
3548 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
3549 { 0, NULL, 0, 0 }
3550 };
3551
3552 if (csio_handle_intr_status(hw, NCSI_INT_CAUSE_A, ncsi_intr_info))
3553 csio_hw_fatal_err(hw);
3554 }
3555
3556
3557
3558
3559 static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
3560 {
3561 uint32_t v = csio_rd_reg32(hw, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A));
3562
3563 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
3564 if (!v)
3565 return;
3566
3567 if (v & TXFIFO_PRTY_ERR_F)
3568 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
3569 if (v & RXFIFO_PRTY_ERR_F)
3570 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
3571 csio_wr_reg32(hw, v, T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A));
3572 csio_hw_fatal_err(hw);
3573 }
3574
3575
3576
3577
3578 static void csio_pl_intr_handler(struct csio_hw *hw)
3579 {
3580 static struct intr_info pl_intr_info[] = {
3581 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
3582 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
3583 { 0, NULL, 0, 0 }
3584 };
3585
3586 if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE_A, pl_intr_info))
3587 csio_hw_fatal_err(hw);
3588 }
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598 int
3599 csio_hw_slow_intr_handler(struct csio_hw *hw)
3600 {
3601 uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE_A);
3602
3603 if (!(cause & CSIO_GLBL_INTR_MASK)) {
3604 CSIO_INC_STATS(hw, n_plint_unexp);
3605 return 0;
3606 }
3607
3608 csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause);
3609
3610 CSIO_INC_STATS(hw, n_plint_cnt);
3611
3612 if (cause & CIM_F)
3613 csio_cim_intr_handler(hw);
3614
3615 if (cause & MPS_F)
3616 csio_mps_intr_handler(hw);
3617
3618 if (cause & NCSI_F)
3619 csio_ncsi_intr_handler(hw);
3620
3621 if (cause & PL_F)
3622 csio_pl_intr_handler(hw);
3623
3624 if (cause & SMB_F)
3625 csio_smb_intr_handler(hw);
3626
3627 if (cause & XGMAC0_F)
3628 csio_xgmac_intr_handler(hw, 0);
3629
3630 if (cause & XGMAC1_F)
3631 csio_xgmac_intr_handler(hw, 1);
3632
3633 if (cause & XGMAC_KR0_F)
3634 csio_xgmac_intr_handler(hw, 2);
3635
3636 if (cause & XGMAC_KR1_F)
3637 csio_xgmac_intr_handler(hw, 3);
3638
3639 if (cause & PCIE_F)
3640 hw->chip_ops->chip_pcie_intr_handler(hw);
3641
3642 if (cause & MC_F)
3643 csio_mem_intr_handler(hw, MEM_MC);
3644
3645 if (cause & EDC0_F)
3646 csio_mem_intr_handler(hw, MEM_EDC0);
3647
3648 if (cause & EDC1_F)
3649 csio_mem_intr_handler(hw, MEM_EDC1);
3650
3651 if (cause & LE_F)
3652 csio_le_intr_handler(hw);
3653
3654 if (cause & TP_F)
3655 csio_tp_intr_handler(hw);
3656
3657 if (cause & MA_F)
3658 csio_ma_intr_handler(hw);
3659
3660 if (cause & PM_TX_F)
3661 csio_pmtx_intr_handler(hw);
3662
3663 if (cause & PM_RX_F)
3664 csio_pmrx_intr_handler(hw);
3665
3666 if (cause & ULP_RX_F)
3667 csio_ulprx_intr_handler(hw);
3668
3669 if (cause & CPL_SWITCH_F)
3670 csio_cplsw_intr_handler(hw);
3671
3672 if (cause & SGE_F)
3673 csio_sge_intr_handler(hw);
3674
3675 if (cause & ULP_TX_F)
3676 csio_ulptx_intr_handler(hw);
3677
3678
3679 csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE_A);
3680 csio_rd_reg32(hw, PL_INT_CAUSE_A);
3681
3682 return 1;
3683 }
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695 static void
3696 csio_mberr_worker(void *data)
3697 {
3698 struct csio_hw *hw = (struct csio_hw *)data;
3699 struct csio_mbm *mbm = &hw->mbm;
3700 LIST_HEAD(cbfn_q);
3701 struct csio_mb *mbp_next;
3702 int rv;
3703
3704 del_timer_sync(&mbm->timer);
3705
3706 spin_lock_irq(&hw->lock);
3707 if (list_empty(&mbm->cbfn_q)) {
3708 spin_unlock_irq(&hw->lock);
3709 return;
3710 }
3711
3712 list_splice_tail_init(&mbm->cbfn_q, &cbfn_q);
3713 mbm->stats.n_cbfnq = 0;
3714
3715
3716 if (!list_empty(&mbm->req_q)) {
3717 mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list);
3718 list_del_init(&mbp_next->list);
3719
3720 rv = csio_mb_issue(hw, mbp_next);
3721 if (rv != 0)
3722 list_add_tail(&mbp_next->list, &mbm->req_q);
3723 else
3724 CSIO_DEC_STATS(mbm, n_activeq);
3725 }
3726 spin_unlock_irq(&hw->lock);
3727
3728
3729 csio_mb_completions(hw, &cbfn_q);
3730 }
3731
3732
3733
3734
3735
3736
3737
3738 static void
3739 csio_hw_mb_timer(struct timer_list *t)
3740 {
3741 struct csio_mbm *mbm = from_timer(mbm, t, timer);
3742 struct csio_hw *hw = mbm->hw;
3743 struct csio_mb *mbp = NULL;
3744
3745 spin_lock_irq(&hw->lock);
3746 mbp = csio_mb_tmo_handler(hw);
3747 spin_unlock_irq(&hw->lock);
3748
3749
3750 if (mbp)
3751 mbp->mb_cbfn(hw, mbp);
3752
3753 }
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764 static void
3765 csio_hw_mbm_cleanup(struct csio_hw *hw)
3766 {
3767 LIST_HEAD(cbfn_q);
3768
3769 csio_mb_cancel_all(hw, &cbfn_q);
3770
3771 spin_unlock_irq(&hw->lock);
3772 csio_mb_completions(hw, &cbfn_q);
3773 spin_lock_irq(&hw->lock);
3774 }
3775
3776
3777
3778
3779 int
3780 csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3781 uint16_t len)
3782 {
3783 struct csio_evt_msg *evt_entry = NULL;
3784
3785 if (type >= CSIO_EVT_MAX)
3786 return -EINVAL;
3787
3788 if (len > CSIO_EVT_MSG_SIZE)
3789 return -EINVAL;
3790
3791 if (hw->flags & CSIO_HWF_FWEVT_STOP)
3792 return -EINVAL;
3793
3794 if (list_empty(&hw->evt_free_q)) {
3795 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3796 type, len);
3797 return -ENOMEM;
3798 }
3799
3800 evt_entry = list_first_entry(&hw->evt_free_q,
3801 struct csio_evt_msg, list);
3802 list_del_init(&evt_entry->list);
3803
3804
3805 evt_entry->type = type;
3806 memcpy((void *)evt_entry->data, evt_msg, len);
3807 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3808
3809 CSIO_DEC_STATS(hw, n_evt_freeq);
3810 CSIO_INC_STATS(hw, n_evt_activeq);
3811
3812 return 0;
3813 }
3814
3815 static int
3816 csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
3817 uint16_t len, bool msg_sg)
3818 {
3819 struct csio_evt_msg *evt_entry = NULL;
3820 struct csio_fl_dma_buf *fl_sg;
3821 uint32_t off = 0;
3822 unsigned long flags;
3823 int n, ret = 0;
3824
3825 if (type >= CSIO_EVT_MAX)
3826 return -EINVAL;
3827
3828 if (len > CSIO_EVT_MSG_SIZE)
3829 return -EINVAL;
3830
3831 spin_lock_irqsave(&hw->lock, flags);
3832 if (hw->flags & CSIO_HWF_FWEVT_STOP) {
3833 ret = -EINVAL;
3834 goto out;
3835 }
3836
3837 if (list_empty(&hw->evt_free_q)) {
3838 csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
3839 type, len);
3840 ret = -ENOMEM;
3841 goto out;
3842 }
3843
3844 evt_entry = list_first_entry(&hw->evt_free_q,
3845 struct csio_evt_msg, list);
3846 list_del_init(&evt_entry->list);
3847
3848
3849 evt_entry->type = type;
3850
3851
3852 if (msg_sg) {
3853 fl_sg = (struct csio_fl_dma_buf *) evt_msg;
3854 for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) {
3855 memcpy((void *)((uintptr_t)evt_entry->data + off),
3856 fl_sg->flbufs[n].vaddr,
3857 fl_sg->flbufs[n].len);
3858 off += fl_sg->flbufs[n].len;
3859 }
3860 } else
3861 memcpy((void *)evt_entry->data, evt_msg, len);
3862
3863 list_add_tail(&evt_entry->list, &hw->evt_active_q);
3864 CSIO_DEC_STATS(hw, n_evt_freeq);
3865 CSIO_INC_STATS(hw, n_evt_activeq);
3866 out:
3867 spin_unlock_irqrestore(&hw->lock, flags);
3868 return ret;
3869 }
3870
3871 static void
3872 csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry)
3873 {
3874 if (evt_entry) {
3875 spin_lock_irq(&hw->lock);
3876 list_del_init(&evt_entry->list);
3877 list_add_tail(&evt_entry->list, &hw->evt_free_q);
3878 CSIO_DEC_STATS(hw, n_evt_activeq);
3879 CSIO_INC_STATS(hw, n_evt_freeq);
3880 spin_unlock_irq(&hw->lock);
3881 }
3882 }
3883
3884 void
3885 csio_evtq_flush(struct csio_hw *hw)
3886 {
3887 uint32_t count;
3888 count = 30;
3889 while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) {
3890 spin_unlock_irq(&hw->lock);
3891 msleep(2000);
3892 spin_lock_irq(&hw->lock);
3893 }
3894
3895 CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING));
3896 }
3897
3898 static void
3899 csio_evtq_stop(struct csio_hw *hw)
3900 {
3901 hw->flags |= CSIO_HWF_FWEVT_STOP;
3902 }
3903
3904 static void
3905 csio_evtq_start(struct csio_hw *hw)
3906 {
3907 hw->flags &= ~CSIO_HWF_FWEVT_STOP;
3908 }
3909
3910 static void
3911 csio_evtq_cleanup(struct csio_hw *hw)
3912 {
3913 struct list_head *evt_entry, *next_entry;
3914
3915
3916 if (!list_empty(&hw->evt_active_q))
3917 list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q);
3918
3919 hw->stats.n_evt_activeq = 0;
3920 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
3921
3922
3923 list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) {
3924 kfree(evt_entry);
3925 CSIO_DEC_STATS(hw, n_evt_freeq);
3926 }
3927
3928 hw->stats.n_evt_freeq = 0;
3929 }
3930
3931
3932 static void
3933 csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
3934 struct csio_fl_dma_buf *flb, void *priv)
3935 {
3936 __u8 op;
3937 void *msg = NULL;
3938 uint32_t msg_len = 0;
3939 bool msg_sg = 0;
3940
3941 op = ((struct rss_header *) wr)->opcode;
3942 if (op == CPL_FW6_PLD) {
3943 CSIO_INC_STATS(hw, n_cpl_fw6_pld);
3944 if (!flb || !flb->totlen) {
3945 CSIO_INC_STATS(hw, n_cpl_unexp);
3946 return;
3947 }
3948
3949 msg = (void *) flb;
3950 msg_len = flb->totlen;
3951 msg_sg = 1;
3952 } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
3953
3954 CSIO_INC_STATS(hw, n_cpl_fw6_msg);
3955
3956 msg = (void *)((uintptr_t)wr + sizeof(__be64));
3957 msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
3958 sizeof(struct cpl_fw4_msg);
3959 } else {
3960 csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
3961 CSIO_INC_STATS(hw, n_cpl_unexp);
3962 return;
3963 }
3964
3965
3966
3967
3968
3969 if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg,
3970 (uint16_t)msg_len, msg_sg))
3971 CSIO_INC_STATS(hw, n_evt_drop);
3972 }
3973
3974 void
3975 csio_evtq_worker(struct work_struct *work)
3976 {
3977 struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work);
3978 struct list_head *evt_entry, *next_entry;
3979 LIST_HEAD(evt_q);
3980 struct csio_evt_msg *evt_msg;
3981 struct cpl_fw6_msg *msg;
3982 struct csio_rnode *rn;
3983 int rv = 0;
3984 uint8_t evtq_stop = 0;
3985
3986 csio_dbg(hw, "event worker thread active evts#%d\n",
3987 hw->stats.n_evt_activeq);
3988
3989 spin_lock_irq(&hw->lock);
3990 while (!list_empty(&hw->evt_active_q)) {
3991 list_splice_tail_init(&hw->evt_active_q, &evt_q);
3992 spin_unlock_irq(&hw->lock);
3993
3994 list_for_each_safe(evt_entry, next_entry, &evt_q) {
3995 evt_msg = (struct csio_evt_msg *) evt_entry;
3996
3997
3998 spin_lock_irq(&hw->lock);
3999 if (hw->flags & CSIO_HWF_FWEVT_STOP)
4000 evtq_stop = 1;
4001 spin_unlock_irq(&hw->lock);
4002 if (evtq_stop) {
4003 CSIO_INC_STATS(hw, n_evt_drop);
4004 goto free_evt;
4005 }
4006
4007 switch (evt_msg->type) {
4008 case CSIO_EVT_FW:
4009 msg = (struct cpl_fw6_msg *)(evt_msg->data);
4010
4011 if ((msg->opcode == CPL_FW6_MSG ||
4012 msg->opcode == CPL_FW4_MSG) &&
4013 !msg->type) {
4014 rv = csio_mb_fwevt_handler(hw,
4015 msg->data);
4016 if (!rv)
4017 break;
4018
4019 csio_fcoe_fwevt_handler(hw,
4020 msg->opcode, msg->data);
4021 } else if (msg->opcode == CPL_FW6_PLD) {
4022
4023 csio_fcoe_fwevt_handler(hw,
4024 msg->opcode, msg->data);
4025 } else {
4026 csio_warn(hw,
4027 "Unhandled FW msg op %x type %x\n",
4028 msg->opcode, msg->type);
4029 CSIO_INC_STATS(hw, n_evt_drop);
4030 }
4031 break;
4032
4033 case CSIO_EVT_MBX:
4034 csio_mberr_worker(hw);
4035 break;
4036
4037 case CSIO_EVT_DEV_LOSS:
4038 memcpy(&rn, evt_msg->data, sizeof(rn));
4039 csio_rnode_devloss_handler(rn);
4040 break;
4041
4042 default:
4043 csio_warn(hw, "Unhandled event %x on evtq\n",
4044 evt_msg->type);
4045 CSIO_INC_STATS(hw, n_evt_unexp);
4046 break;
4047 }
4048 free_evt:
4049 csio_free_evt(hw, evt_msg);
4050 }
4051
4052 spin_lock_irq(&hw->lock);
4053 }
4054 hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
4055 spin_unlock_irq(&hw->lock);
4056 }
4057
4058 int
4059 csio_fwevtq_handler(struct csio_hw *hw)
4060 {
4061 int rv;
4062
4063 if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) {
4064 CSIO_INC_STATS(hw, n_int_stray);
4065 return -EINVAL;
4066 }
4067
4068 rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx,
4069 csio_process_fwevtq_entry, NULL);
4070 return rv;
4071 }
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086 int
4087 csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req)
4088 {
4089 struct list_head *tmp;
4090
4091
4092 list_for_each(tmp, &mgmtm->active_q) {
4093 if (io_req == (struct csio_ioreq *)tmp)
4094 return 0;
4095 }
4096 return -EINVAL;
4097 }
4098
4099 #define ECM_MIN_TMO 1000
4100
4101
4102
4103
4104
4105
4106
4107 static void
4108 csio_mgmt_tmo_handler(struct timer_list *t)
4109 {
4110 struct csio_mgmtm *mgmtm = from_timer(mgmtm, t, mgmt_timer);
4111 struct list_head *tmp;
4112 struct csio_ioreq *io_req;
4113
4114 csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n");
4115
4116 spin_lock_irq(&mgmtm->hw->lock);
4117
4118 list_for_each(tmp, &mgmtm->active_q) {
4119 io_req = (struct csio_ioreq *) tmp;
4120 io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO);
4121
4122 if (!io_req->tmo) {
4123
4124 tmp = csio_list_prev(tmp);
4125 list_del_init(&io_req->sm.sm_list);
4126 if (io_req->io_cbfn) {
4127
4128 io_req->wr_status = -ETIMEDOUT;
4129 io_req->io_cbfn(mgmtm->hw, io_req);
4130 } else {
4131 CSIO_DB_ASSERT(0);
4132 }
4133 }
4134 }
4135
4136
4137 if (!list_empty(&mgmtm->active_q))
4138 mod_timer(&mgmtm->mgmt_timer,
4139 jiffies + msecs_to_jiffies(ECM_MIN_TMO));
4140 spin_unlock_irq(&mgmtm->hw->lock);
4141 }
4142
4143 static void
4144 csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm)
4145 {
4146 struct csio_hw *hw = mgmtm->hw;
4147 struct csio_ioreq *io_req;
4148 struct list_head *tmp;
4149 uint32_t count;
4150
4151 count = 30;
4152
4153 while ((!list_empty(&mgmtm->active_q)) && count--) {
4154 spin_unlock_irq(&hw->lock);
4155 msleep(2000);
4156 spin_lock_irq(&hw->lock);
4157 }
4158
4159
4160 list_for_each(tmp, &mgmtm->active_q) {
4161 io_req = (struct csio_ioreq *) tmp;
4162 tmp = csio_list_prev(tmp);
4163 list_del_init(&io_req->sm.sm_list);
4164 mgmtm->stats.n_active--;
4165 if (io_req->io_cbfn) {
4166
4167 io_req->wr_status = -ETIMEDOUT;
4168 io_req->io_cbfn(mgmtm->hw, io_req);
4169 }
4170 }
4171 }
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187 static int
4188 csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw)
4189 {
4190 timer_setup(&mgmtm->mgmt_timer, csio_mgmt_tmo_handler, 0);
4191
4192 INIT_LIST_HEAD(&mgmtm->active_q);
4193 INIT_LIST_HEAD(&mgmtm->cbfn_q);
4194
4195 mgmtm->hw = hw;
4196
4197
4198 return 0;
4199 }
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210 static void
4211 csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
4212 {
4213 del_timer_sync(&mgmtm->mgmt_timer);
4214 }
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225 int
4226 csio_hw_start(struct csio_hw *hw)
4227 {
4228 spin_lock_irq(&hw->lock);
4229 csio_post_event(&hw->sm, CSIO_HWE_CFG);
4230 spin_unlock_irq(&hw->lock);
4231
4232 if (csio_is_hw_ready(hw))
4233 return 0;
4234 else if (csio_match_state(hw, csio_hws_uninit))
4235 return -EINVAL;
4236 else
4237 return -ENODEV;
4238 }
4239
4240 int
4241 csio_hw_stop(struct csio_hw *hw)
4242 {
4243 csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE);
4244
4245 if (csio_is_hw_removing(hw))
4246 return 0;
4247 else
4248 return -EINVAL;
4249 }
4250
4251
4252 #define CSIO_MAX_RESET_RETRIES 3
4253
4254
4255
4256
4257
4258
4259
4260 int
4261 csio_hw_reset(struct csio_hw *hw)
4262 {
4263 if (!csio_is_hw_master(hw))
4264 return -EPERM;
4265
4266 if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) {
4267 csio_dbg(hw, "Max hw reset attempts reached..");
4268 return -EINVAL;
4269 }
4270
4271 hw->rst_retries++;
4272 csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET);
4273
4274 if (csio_is_hw_ready(hw)) {
4275 hw->rst_retries = 0;
4276 hw->stats.n_reset_start = jiffies_to_msecs(jiffies);
4277 return 0;
4278 } else
4279 return -EINVAL;
4280 }
4281
4282
4283
4284
4285
4286 static void
4287 csio_hw_get_device_id(struct csio_hw *hw)
4288 {
4289
4290 if (csio_is_dev_id_cached(hw))
4291 return;
4292
4293
4294 pci_read_config_word(hw->pdev, PCI_VENDOR_ID,
4295 &hw->params.pci.vendor_id);
4296 pci_read_config_word(hw->pdev, PCI_DEVICE_ID,
4297 &hw->params.pci.device_id);
4298
4299 csio_dev_id_cached(hw);
4300 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK);
4301
4302 }
4303
4304
4305
4306
4307
4308
4309
4310 static void
4311 csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
4312 {
4313 uint32_t adap_type, prot_type;
4314
4315 if (ven_id == CSIO_VENDOR_ID) {
4316 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
4317 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
4318
4319 if (prot_type == CSIO_T5_FCOE_ASIC) {
4320 memcpy(hw->hw_ver,
4321 csio_t5_fcoe_adapters[adap_type].model_no, 16);
4322 memcpy(hw->model_desc,
4323 csio_t5_fcoe_adapters[adap_type].description,
4324 32);
4325 } else {
4326 char tempName[32] = "Chelsio FCoE Controller";
4327 memcpy(hw->model_desc, tempName, 32);
4328 }
4329 }
4330 }
4331
4332
4333
4334
4335
4336
4337
4338 int
4339 csio_hw_init(struct csio_hw *hw)
4340 {
4341 int rv = -EINVAL;
4342 uint32_t i;
4343 uint16_t ven_id, dev_id;
4344 struct csio_evt_msg *evt_entry;
4345
4346 INIT_LIST_HEAD(&hw->sm.sm_list);
4347 csio_init_state(&hw->sm, csio_hws_uninit);
4348 spin_lock_init(&hw->lock);
4349 INIT_LIST_HEAD(&hw->sln_head);
4350
4351
4352 csio_hw_get_device_id(hw);
4353
4354 strcpy(hw->name, CSIO_HW_NAME);
4355
4356
4357 hw->chip_ops = &t5_ops;
4358
4359
4360
4361 ven_id = hw->params.pci.vendor_id;
4362 dev_id = hw->params.pci.device_id;
4363
4364 csio_hw_set_description(hw, ven_id, dev_id);
4365
4366
4367 hw->params.log_level = (uint32_t) csio_dbg_level;
4368
4369 csio_set_fwevt_intr_idx(hw, -1);
4370 csio_set_nondata_intr_idx(hw, -1);
4371
4372
4373 if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer))
4374 goto err;
4375
4376 rv = csio_wrm_init(csio_hw_to_wrm(hw), hw);
4377 if (rv)
4378 goto err_mbm_exit;
4379
4380 rv = csio_scsim_init(csio_hw_to_scsim(hw), hw);
4381 if (rv)
4382 goto err_wrm_exit;
4383
4384 rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw);
4385 if (rv)
4386 goto err_scsim_exit;
4387
4388 INIT_LIST_HEAD(&hw->evt_active_q);
4389 INIT_LIST_HEAD(&hw->evt_free_q);
4390 for (i = 0; i < csio_evtq_sz; i++) {
4391
4392 evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL);
4393 if (!evt_entry) {
4394 rv = -ENOMEM;
4395 csio_err(hw, "Failed to initialize eventq");
4396 goto err_evtq_cleanup;
4397 }
4398
4399 list_add_tail(&evt_entry->list, &hw->evt_free_q);
4400 CSIO_INC_STATS(hw, n_evt_freeq);
4401 }
4402
4403 hw->dev_num = dev_num;
4404 dev_num++;
4405
4406 return 0;
4407
4408 err_evtq_cleanup:
4409 csio_evtq_cleanup(hw);
4410 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
4411 err_scsim_exit:
4412 csio_scsim_exit(csio_hw_to_scsim(hw));
4413 err_wrm_exit:
4414 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
4415 err_mbm_exit:
4416 csio_mbm_exit(csio_hw_to_mbm(hw));
4417 err:
4418 return rv;
4419 }
4420
4421
4422
4423
4424
4425
4426 void
4427 csio_hw_exit(struct csio_hw *hw)
4428 {
4429 csio_evtq_cleanup(hw);
4430 csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
4431 csio_scsim_exit(csio_hw_to_scsim(hw));
4432 csio_wrm_exit(csio_hw_to_wrm(hw), hw);
4433 csio_mbm_exit(csio_hw_to_mbm(hw));
4434 }