0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/pci.h>
0012 #include <linux/delay.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/module.h>
0015
0016 #include "hfi.h"
0017 #include "trace.h"
0018 #include "mad.h"
0019 #include "pio.h"
0020 #include "sdma.h"
0021 #include "eprom.h"
0022 #include "efivar.h"
0023 #include "platform.h"
0024 #include "aspm.h"
0025 #include "affinity.h"
0026 #include "debugfs.h"
0027 #include "fault.h"
0028 #include "netdev.h"
0029
0030 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
0031 module_param(num_vls, uint, S_IRUGO);
0032 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
0033
0034
0035
0036
0037
0038
0039
0040
0041 uint rcv_intr_timeout = (824 + 16);
0042 module_param(rcv_intr_timeout, uint, S_IRUGO);
0043 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
0044
0045 uint rcv_intr_count = 16;
0046 module_param(rcv_intr_count, uint, S_IRUGO);
0047 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
0048
0049 ushort link_crc_mask = SUPPORTED_CRCS;
0050 module_param(link_crc_mask, ushort, S_IRUGO);
0051 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
0052
0053 uint loopback;
0054 module_param_named(loopback, loopback, uint, S_IRUGO);
0055 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
0056
0057
0058 uint rcv_intr_dynamic = 1;
0059 static ushort crc_14b_sideband = 1;
0060 static uint use_flr = 1;
0061 uint quick_linkup;
0062
0063 struct flag_table {
0064 u64 flag;
0065 char *str;
0066 u16 extra;
0067 u16 unused0;
0068 u32 unused1;
0069 };
0070
0071
0072 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
0073 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
0074
0075
0076 #define SEC_WRITE_DROPPED 0x1
0077 #define SEC_PACKET_DROPPED 0x2
0078 #define SEC_SC_HALTED 0x4
0079 #define SEC_SPC_FREEZE 0x8
0080
0081 #define DEFAULT_KRCVQS 2
0082 #define MIN_KERNEL_KCTXTS 2
0083 #define FIRST_KERNEL_KCTXT 1
0084
0085
0086
0087
0088
0089
0090
0091
0092 #define RSM_INS_FECN 0
0093 #define RSM_INS_VNIC 1
0094 #define RSM_INS_AIP 2
0095 #define RSM_INS_VERBS 3
0096
0097
0098 #define GUID_HFI_INDEX_SHIFT 39
0099
0100
0101 #define emulator_rev(dd) ((dd)->irev >> 8)
0102
0103 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
0104 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
0105
0106
0107
0108 #define IB_PACKET_TYPE 2ull
0109 #define QW_SHIFT 6ull
0110
0111 #define QPN_WIDTH 7ull
0112
0113
0114 #define LRH_BTH_QW 0ull
0115 #define LRH_BTH_BIT_OFFSET 48ull
0116 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
0117 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
0118 #define LRH_BTH_SELECT
0119 #define LRH_BTH_MASK 3ull
0120 #define LRH_BTH_VALUE 2ull
0121
0122
0123 #define LRH_SC_QW 0ull
0124 #define LRH_SC_BIT_OFFSET 56ull
0125 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
0126 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
0127 #define LRH_SC_MASK 128ull
0128 #define LRH_SC_VALUE 0ull
0129
0130
0131 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
0132
0133
0134 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
0135
0136
0137
0138
0139
0140 #define BTH_DESTQP_QW 1ull
0141 #define BTH_DESTQP_BIT_OFFSET 16ull
0142 #define BTH_DESTQP_OFFSET(off) ((BTH_DESTQP_QW << QW_SHIFT) | (off))
0143 #define BTH_DESTQP_MATCH_OFFSET BTH_DESTQP_OFFSET(BTH_DESTQP_BIT_OFFSET)
0144 #define BTH_DESTQP_MASK 0xFFull
0145 #define BTH_DESTQP_VALUE 0x81ull
0146
0147
0148
0149 #define DETH_AIP_SQPN_QW 3ull
0150 #define DETH_AIP_SQPN_BIT_OFFSET 56ull
0151 #define DETH_AIP_SQPN_OFFSET(off) ((DETH_AIP_SQPN_QW << QW_SHIFT) | (off))
0152 #define DETH_AIP_SQPN_SELECT_OFFSET \
0153 DETH_AIP_SQPN_OFFSET(DETH_AIP_SQPN_BIT_OFFSET)
0154
0155
0156
0157 #define L2_TYPE_QW 0ull
0158 #define L2_TYPE_BIT_OFFSET 61ull
0159 #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
0160 #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
0161 #define L2_TYPE_MASK 3ull
0162 #define L2_16B_VALUE 2ull
0163
0164
0165 #define L4_TYPE_QW 1ull
0166 #define L4_TYPE_BIT_OFFSET 0ull
0167 #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
0168 #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
0169 #define L4_16B_TYPE_MASK 0xFFull
0170 #define L4_16B_ETH_VALUE 0x78ull
0171
0172
0173 #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
0174
0175 #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
0176
0177
0178 #define SC2VL_VAL( \
0179 num, \
0180 sc0, sc0val, \
0181 sc1, sc1val, \
0182 sc2, sc2val, \
0183 sc3, sc3val, \
0184 sc4, sc4val, \
0185 sc5, sc5val, \
0186 sc6, sc6val, \
0187 sc7, sc7val) \
0188 ( \
0189 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
0190 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
0191 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
0192 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
0193 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
0194 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
0195 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
0196 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
0197 )
0198
0199 #define DC_SC_VL_VAL( \
0200 range, \
0201 e0, e0val, \
0202 e1, e1val, \
0203 e2, e2val, \
0204 e3, e3val, \
0205 e4, e4val, \
0206 e5, e5val, \
0207 e6, e6val, \
0208 e7, e7val, \
0209 e8, e8val, \
0210 e9, e9val, \
0211 e10, e10val, \
0212 e11, e11val, \
0213 e12, e12val, \
0214 e13, e13val, \
0215 e14, e14val, \
0216 e15, e15val) \
0217 ( \
0218 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
0219 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
0220 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
0221 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
0222 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
0223 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
0224 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
0225 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
0226 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
0227 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
0228 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
0229 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
0230 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
0231 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
0232 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
0233 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
0234 )
0235
0236
0237 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
0238 | CCE_STATUS_RXE_FROZE_SMASK \
0239 | CCE_STATUS_TXE_FROZE_SMASK \
0240 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
0241
0242 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
0243 | CCE_STATUS_TXE_PAUSED_SMASK \
0244 | CCE_STATUS_SDMA_PAUSED_SMASK)
0245
0246 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
0247
0248 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
0249 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
0250
0251
0252
0253
0254 static struct flag_table cce_err_status_flags[] = {
0255 FLAG_ENTRY0("CceCsrParityErr",
0256 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
0257 FLAG_ENTRY0("CceCsrReadBadAddrErr",
0258 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
0259 FLAG_ENTRY0("CceCsrWriteBadAddrErr",
0260 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
0261 FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
0262 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
0263 FLAG_ENTRY0("CceTrgtAccessErr",
0264 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
0265 FLAG_ENTRY0("CceRspdDataParityErr",
0266 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
0267 FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
0268 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
0269 FLAG_ENTRY0("CceCsrCfgBusParityErr",
0270 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
0271 FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
0272 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
0273 FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
0274 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
0275 FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
0276 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
0277 FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
0278 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
0279 FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
0280 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
0281 FLAG_ENTRY0("PcicRetryMemCorErr",
0282 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
0283 FLAG_ENTRY0("PcicRetryMemCorErr",
0284 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
0285 FLAG_ENTRY0("PcicPostHdQCorErr",
0286 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
0287 FLAG_ENTRY0("PcicPostHdQCorErr",
0288 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
0289 FLAG_ENTRY0("PcicPostHdQCorErr",
0290 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
0291 FLAG_ENTRY0("PcicCplDatQCorErr",
0292 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
0293 FLAG_ENTRY0("PcicNPostHQParityErr",
0294 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
0295 FLAG_ENTRY0("PcicNPostDatQParityErr",
0296 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
0297 FLAG_ENTRY0("PcicRetryMemUncErr",
0298 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
0299 FLAG_ENTRY0("PcicRetrySotMemUncErr",
0300 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
0301 FLAG_ENTRY0("PcicPostHdQUncErr",
0302 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
0303 FLAG_ENTRY0("PcicPostDatQUncErr",
0304 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
0305 FLAG_ENTRY0("PcicCplHdQUncErr",
0306 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
0307 FLAG_ENTRY0("PcicCplDatQUncErr",
0308 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
0309 FLAG_ENTRY0("PcicTransmitFrontParityErr",
0310 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
0311 FLAG_ENTRY0("PcicTransmitBackParityErr",
0312 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
0313 FLAG_ENTRY0("PcicReceiveParityErr",
0314 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
0315 FLAG_ENTRY0("CceTrgtCplTimeoutErr",
0316 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
0317 FLAG_ENTRY0("LATriggered",
0318 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
0319 FLAG_ENTRY0("CceSegReadBadAddrErr",
0320 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
0321 FLAG_ENTRY0("CceSegWriteBadAddrErr",
0322 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
0323 FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
0324 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
0325 FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
0326 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
0327 FLAG_ENTRY0("CceMsixTableCorErr",
0328 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
0329 FLAG_ENTRY0("CceMsixTableUncErr",
0330 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
0331 FLAG_ENTRY0("CceIntMapCorErr",
0332 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
0333 FLAG_ENTRY0("CceIntMapUncErr",
0334 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
0335 FLAG_ENTRY0("CceMsixCsrParityErr",
0336 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
0337
0338 };
0339
0340
0341
0342
0343 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
0344 static struct flag_table misc_err_status_flags[] = {
0345 FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
0346 FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
0347 FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
0348 FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
0349 FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
0350 FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
0351 FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
0352 FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
0353 FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
0354 FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
0355 FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
0356 FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
0357 FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
0358 };
0359
0360
0361
0362
0363 static struct flag_table pio_err_status_flags[] = {
0364 FLAG_ENTRY("PioWriteBadCtxt",
0365 SEC_WRITE_DROPPED,
0366 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
0367 FLAG_ENTRY("PioWriteAddrParity",
0368 SEC_SPC_FREEZE,
0369 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
0370 FLAG_ENTRY("PioCsrParity",
0371 SEC_SPC_FREEZE,
0372 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
0373 FLAG_ENTRY("PioSbMemFifo0",
0374 SEC_SPC_FREEZE,
0375 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
0376 FLAG_ENTRY("PioSbMemFifo1",
0377 SEC_SPC_FREEZE,
0378 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
0379 FLAG_ENTRY("PioPccFifoParity",
0380 SEC_SPC_FREEZE,
0381 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
0382 FLAG_ENTRY("PioPecFifoParity",
0383 SEC_SPC_FREEZE,
0384 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
0385 FLAG_ENTRY("PioSbrdctlCrrelParity",
0386 SEC_SPC_FREEZE,
0387 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
0388 FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
0389 SEC_SPC_FREEZE,
0390 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
0391 FLAG_ENTRY("PioPktEvictFifoParityErr",
0392 SEC_SPC_FREEZE,
0393 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
0394 FLAG_ENTRY("PioSmPktResetParity",
0395 SEC_SPC_FREEZE,
0396 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
0397 FLAG_ENTRY("PioVlLenMemBank0Unc",
0398 SEC_SPC_FREEZE,
0399 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
0400 FLAG_ENTRY("PioVlLenMemBank1Unc",
0401 SEC_SPC_FREEZE,
0402 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
0403 FLAG_ENTRY("PioVlLenMemBank0Cor",
0404 0,
0405 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
0406 FLAG_ENTRY("PioVlLenMemBank1Cor",
0407 0,
0408 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
0409 FLAG_ENTRY("PioCreditRetFifoParity",
0410 SEC_SPC_FREEZE,
0411 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
0412 FLAG_ENTRY("PioPpmcPblFifo",
0413 SEC_SPC_FREEZE,
0414 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
0415 FLAG_ENTRY("PioInitSmIn",
0416 0,
0417 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
0418 FLAG_ENTRY("PioPktEvictSmOrArbSm",
0419 SEC_SPC_FREEZE,
0420 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
0421 FLAG_ENTRY("PioHostAddrMemUnc",
0422 SEC_SPC_FREEZE,
0423 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
0424 FLAG_ENTRY("PioHostAddrMemCor",
0425 0,
0426 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
0427 FLAG_ENTRY("PioWriteDataParity",
0428 SEC_SPC_FREEZE,
0429 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
0430 FLAG_ENTRY("PioStateMachine",
0431 SEC_SPC_FREEZE,
0432 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
0433 FLAG_ENTRY("PioWriteQwValidParity",
0434 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
0435 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
0436 FLAG_ENTRY("PioBlockQwCountParity",
0437 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
0438 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
0439 FLAG_ENTRY("PioVlfVlLenParity",
0440 SEC_SPC_FREEZE,
0441 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
0442 FLAG_ENTRY("PioVlfSopParity",
0443 SEC_SPC_FREEZE,
0444 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
0445 FLAG_ENTRY("PioVlFifoParity",
0446 SEC_SPC_FREEZE,
0447 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
0448 FLAG_ENTRY("PioPpmcBqcMemParity",
0449 SEC_SPC_FREEZE,
0450 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
0451 FLAG_ENTRY("PioPpmcSopLen",
0452 SEC_SPC_FREEZE,
0453 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
0454
0455 FLAG_ENTRY("PioCurrentFreeCntParity",
0456 SEC_SPC_FREEZE,
0457 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
0458 FLAG_ENTRY("PioLastReturnedCntParity",
0459 SEC_SPC_FREEZE,
0460 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
0461 FLAG_ENTRY("PioPccSopHeadParity",
0462 SEC_SPC_FREEZE,
0463 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
0464 FLAG_ENTRY("PioPecSopHeadParityErr",
0465 SEC_SPC_FREEZE,
0466 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
0467
0468 };
0469
0470
0471 #define ALL_PIO_FREEZE_ERR \
0472 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
0473 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
0474 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
0475 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
0476 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
0477 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
0478 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
0479 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
0480 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
0481 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
0482 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
0483 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
0484 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
0485 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
0486 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
0487 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
0488 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
0489 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
0490 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
0491 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
0492 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
0493 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
0494 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
0495 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
0496 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
0497 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
0498 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
0499 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
0500 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
0501
0502
0503
0504
0505 static struct flag_table sdma_err_status_flags[] = {
0506 FLAG_ENTRY0("SDmaRpyTagErr",
0507 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
0508 FLAG_ENTRY0("SDmaCsrParityErr",
0509 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
0510 FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
0511 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
0512 FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
0513 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
0514
0515 };
0516
0517
0518 #define ALL_SDMA_FREEZE_ERR \
0519 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
0520 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
0521 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
0522
0523
0524 #define PORT_DISCARD_EGRESS_ERRS \
0525 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
0526 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
0527 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
0528
0529
0530
0531
0532 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
0533 static struct flag_table egress_err_status_flags[] = {
0534 FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
0535 FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
0536
0537 FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
0538 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
0539 FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
0540 FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
0541
0542 FLAG_ENTRY0("TxPioLaunchIntfParityErr",
0543 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
0544 FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
0545 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
0546
0547 FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
0548 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
0549 FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
0550 FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
0551 FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
0552 FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
0553 FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
0554 SEES(TX_SDMA0_DISALLOWED_PACKET)),
0555 FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
0556 SEES(TX_SDMA1_DISALLOWED_PACKET)),
0557 FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
0558 SEES(TX_SDMA2_DISALLOWED_PACKET)),
0559 FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
0560 SEES(TX_SDMA3_DISALLOWED_PACKET)),
0561 FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
0562 SEES(TX_SDMA4_DISALLOWED_PACKET)),
0563 FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
0564 SEES(TX_SDMA5_DISALLOWED_PACKET)),
0565 FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
0566 SEES(TX_SDMA6_DISALLOWED_PACKET)),
0567 FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
0568 SEES(TX_SDMA7_DISALLOWED_PACKET)),
0569 FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
0570 SEES(TX_SDMA8_DISALLOWED_PACKET)),
0571 FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
0572 SEES(TX_SDMA9_DISALLOWED_PACKET)),
0573 FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
0574 SEES(TX_SDMA10_DISALLOWED_PACKET)),
0575 FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
0576 SEES(TX_SDMA11_DISALLOWED_PACKET)),
0577 FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
0578 SEES(TX_SDMA12_DISALLOWED_PACKET)),
0579 FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
0580 SEES(TX_SDMA13_DISALLOWED_PACKET)),
0581 FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
0582 SEES(TX_SDMA14_DISALLOWED_PACKET)),
0583 FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
0584 SEES(TX_SDMA15_DISALLOWED_PACKET)),
0585 FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
0586 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
0587 FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
0588 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
0589 FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
0590 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
0591 FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
0592 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
0593 FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
0594 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
0595 FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
0596 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
0597 FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
0598 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
0599 FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
0600 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
0601 FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
0602 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
0603 FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
0604 FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
0605 FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
0606 FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
0607 FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
0608 FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
0609 FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
0610 FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
0611 FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
0612 FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
0613 FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
0614 FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
0615 FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
0616 FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
0617 FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
0618 FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
0619 FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
0620 FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
0621 FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
0622 FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
0623 FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
0624 FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
0625 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
0626 FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
0627 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
0628 };
0629
0630
0631
0632
0633 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
0634 static struct flag_table egress_err_info_flags[] = {
0635 FLAG_ENTRY0("Reserved", 0ull),
0636 FLAG_ENTRY0("VLErr", SEEI(VL)),
0637 FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
0638 FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
0639 FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
0640 FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
0641 FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
0642 FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
0643 FLAG_ENTRY0("RawErr", SEEI(RAW)),
0644 FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
0645 FLAG_ENTRY0("GRHErr", SEEI(GRH)),
0646 FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
0647 FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
0648 FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
0649 FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
0650 FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
0651 FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
0652 FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
0653 FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
0654 FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
0655 FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
0656 FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
0657 };
0658
0659
0660 #define ALL_TXE_EGRESS_FREEZE_ERR \
0661 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
0662 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
0663 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
0664 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
0665 | SEES(TX_LAUNCH_CSR_PARITY) \
0666 | SEES(TX_SBRD_CTL_CSR_PARITY) \
0667 | SEES(TX_CONFIG_PARITY) \
0668 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
0669 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
0670 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
0671 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
0672 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
0673 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
0674 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
0675 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
0676 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
0677 | SEES(TX_CREDIT_RETURN_PARITY))
0678
0679
0680
0681
0682 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
0683 static struct flag_table send_err_status_flags[] = {
0684 FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
0685 FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
0686 FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
0687 };
0688
0689
0690
0691
0692 static struct flag_table sc_err_status_flags[] = {
0693 FLAG_ENTRY("InconsistentSop",
0694 SEC_PACKET_DROPPED | SEC_SC_HALTED,
0695 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
0696 FLAG_ENTRY("DisallowedPacket",
0697 SEC_PACKET_DROPPED | SEC_SC_HALTED,
0698 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
0699 FLAG_ENTRY("WriteCrossesBoundary",
0700 SEC_WRITE_DROPPED | SEC_SC_HALTED,
0701 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
0702 FLAG_ENTRY("WriteOverflow",
0703 SEC_WRITE_DROPPED | SEC_SC_HALTED,
0704 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
0705 FLAG_ENTRY("WriteOutOfBounds",
0706 SEC_WRITE_DROPPED | SEC_SC_HALTED,
0707 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
0708
0709 };
0710
0711
0712
0713
0714 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
0715 static struct flag_table rxe_err_status_flags[] = {
0716 FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
0717 FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
0718 FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
0719 FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
0720 FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
0721 FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
0722 FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
0723 FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
0724 FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
0725 FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
0726 FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
0727 FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
0728 FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
0729 FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
0730 FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
0731 FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
0732 FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
0733 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
0734 FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
0735 FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
0736 FLAG_ENTRY0("RxRbufBlockListReadUncErr",
0737 RXES(RBUF_BLOCK_LIST_READ_UNC)),
0738 FLAG_ENTRY0("RxRbufBlockListReadCorErr",
0739 RXES(RBUF_BLOCK_LIST_READ_COR)),
0740 FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
0741 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
0742 FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
0743 RXES(RBUF_CSR_QENT_CNT_PARITY)),
0744 FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
0745 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
0746 FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
0747 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
0748 FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
0749 FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
0750 FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
0751 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
0752 FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
0753 FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
0754 FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
0755 FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
0756 FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
0757 FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
0758 FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
0759 FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
0760 RXES(RBUF_FL_INITDONE_PARITY)),
0761 FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
0762 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
0763 FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
0764 FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
0765 FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
0766 FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
0767 RXES(LOOKUP_DES_PART1_UNC_COR)),
0768 FLAG_ENTRY0("RxLookupDesPart2ParityErr",
0769 RXES(LOOKUP_DES_PART2_PARITY)),
0770 FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
0771 FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
0772 FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
0773 FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
0774 FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
0775 FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
0776 FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
0777 FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
0778 FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
0779 FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
0780 FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
0781 FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
0782 FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
0783 FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
0784 FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
0785 FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
0786 FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
0787 FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
0788 FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
0789 FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
0790 FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
0791 FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
0792 };
0793
0794
0795 #define ALL_RXE_FREEZE_ERR \
0796 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
0797 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
0798 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
0799 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
0800 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
0801 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
0802 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
0803 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
0804 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
0805 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
0806 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
0807 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
0808 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
0809 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
0810 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
0811 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
0812 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
0813 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
0814 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
0815 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
0816 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
0817 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
0818 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
0819 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
0820 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
0821 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
0822 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
0823 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
0824 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
0825 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
0826 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
0827 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
0828 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
0829 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
0830 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
0831 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
0832 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
0833 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
0834 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
0835 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
0836 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
0837 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
0838 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
0839 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
0840
0841 #define RXE_FREEZE_ABORT_MASK \
0842 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
0843 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
0844 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
0845
0846
0847
0848
0849 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
0850 static struct flag_table dcc_err_flags[] = {
0851 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
0852 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
0853 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
0854 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
0855 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
0856 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
0857 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
0858 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
0859 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
0860 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
0861 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
0862 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
0863 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
0864 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
0865 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
0866 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
0867 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
0868 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
0869 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
0870 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
0871 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
0872 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
0873 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
0874 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
0875 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
0876 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
0877 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
0878 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
0879 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
0880 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
0881 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
0882 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
0883 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
0884 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
0885 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
0886 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
0887 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
0888 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
0889 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
0890 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
0891 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
0892 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
0893 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
0894 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
0895 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
0896 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
0897 };
0898
0899
0900
0901
0902 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
0903 static struct flag_table lcb_err_flags[] = {
0904 FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
0905 FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
0906 FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
0907 FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
0908 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
0909 FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
0910 FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
0911 FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
0912 FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
0913 FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
0914 FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
0915 FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
0916 FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
0917 FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
0918 FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
0919 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
0920 FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
0921 FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
0922 FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
0923 FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
0924 FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
0925 FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
0926 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
0927 FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
0928 FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
0929 FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
0930 FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
0931 FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
0932 FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
0933 FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
0934 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
0935 FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
0936 FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
0937 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
0938 FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
0939 LCBE(REDUNDANT_FLIT_PARITY_ERR))
0940 };
0941
0942
0943
0944
0945 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
0946 static struct flag_table dc8051_err_flags[] = {
0947 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
0948 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
0949 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
0950 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
0951 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
0952 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
0953 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
0954 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
0955 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
0956 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
0957 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
0958 };
0959
0960
0961
0962
0963
0964
0965 static struct flag_table dc8051_info_err_flags[] = {
0966 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
0967 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
0968 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
0969 FLAG_ENTRY0("Serdes internal loopback failure",
0970 FAILED_SERDES_INTERNAL_LOOPBACK),
0971 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
0972 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
0973 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
0974 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
0975 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
0976 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
0977 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
0978 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
0979 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
0980 FLAG_ENTRY0("External Device Request Timeout",
0981 EXTERNAL_DEVICE_REQ_TIMEOUT),
0982 };
0983
0984
0985
0986
0987
0988
0989 static struct flag_table dc8051_info_host_msg_flags[] = {
0990 FLAG_ENTRY0("Host request done", 0x0001),
0991 FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
0992 FLAG_ENTRY0("BC SMA message", 0x0004),
0993 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
0994 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
0995 FLAG_ENTRY0("External device config request", 0x0020),
0996 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
0997 FLAG_ENTRY0("LinkUp achieved", 0x0080),
0998 FLAG_ENTRY0("Link going down", 0x0100),
0999 FLAG_ENTRY0("Link width downgraded", 0x0200),
1000 };
1001
1002 static u32 encoded_size(u32 size);
1003 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1004 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1005 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1006 u8 *continuous);
1007 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1008 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1009 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1010 u8 *remote_tx_rate, u16 *link_widths);
1011 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1012 u8 *flag_bits, u16 *link_widths);
1013 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1014 u8 *device_rev);
1015 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1016 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1017 u8 *tx_polarity_inversion,
1018 u8 *rx_polarity_inversion, u8 *max_rate);
1019 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1020 unsigned int context, u64 err_status);
1021 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1022 static void handle_dcc_err(struct hfi1_devdata *dd,
1023 unsigned int context, u64 err_status);
1024 static void handle_lcb_err(struct hfi1_devdata *dd,
1025 unsigned int context, u64 err_status);
1026 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1027 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1028 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1029 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1030 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1031 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1032 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1033 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1034 static void set_partition_keys(struct hfi1_pportdata *ppd);
1035 static const char *link_state_name(u32 state);
1036 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1037 u32 state);
1038 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1039 u64 *out_data);
1040 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1041 static int thermal_init(struct hfi1_devdata *dd);
1042
1043 static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1044 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1045 int msecs);
1046 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1047 int msecs);
1048 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1049 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1050 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1051 int msecs);
1052 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
1053 int msecs);
1054 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1055 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1056 static void handle_temp_err(struct hfi1_devdata *dd);
1057 static void dc_shutdown(struct hfi1_devdata *dd);
1058 static void dc_start(struct hfi1_devdata *dd);
1059 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1060 unsigned int *np);
1061 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1062 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1063 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1064 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
1065
1066
1067
1068
1069
1070
1071
1072 struct err_reg_info {
1073 u32 status;
1074 u32 clear;
1075 u32 mask;
1076 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1077 const char *desc;
1078 };
1079
1080 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
1081 #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
1082 #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
1083
1084
1085
1086
1087
1088 #define EE(reg, handler, desc) \
1089 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1090 handler, desc }
1091 #define DC_EE1(reg, handler, desc) \
1092 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1093 #define DC_EE2(reg, handler, desc) \
1094 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1095
1096
1097
1098
1099
1100 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1101 EE(CCE_ERR, handle_cce_err, "CceErr"),
1102 EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1103 EE(MISC_ERR, handle_misc_err, "MiscErr"),
1104 { 0, 0, 0, NULL },
1105 EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1106 EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1107 EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1108 EE(SEND_ERR, handle_txe_err, "TxeErr")
1109
1110 };
1111
1112
1113
1114
1115
1116 #define TCRIT_INT_SOURCE 4
1117
1118
1119
1120
1121
1122 static const struct err_reg_info sdma_eng_err =
1123 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1124
1125 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1126 { 0, 0, 0, NULL },
1127 { 0, 0, 0, NULL },
1128 EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1129 EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1130 { 0, 0, 0, NULL },
1131
1132 };
1133
1134
1135
1136
1137
1138
1139
1140 #define DCC_CFG_PORT_MTU_CAP_10240 7
1141
1142
1143
1144
1145
1146 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1147 DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1148 DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1149 DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1150
1151
1152 };
1153
1154 struct cntr_entry {
1155
1156
1157
1158 char *name;
1159
1160
1161
1162
1163 u64 csr;
1164
1165
1166
1167
1168 int offset;
1169
1170
1171
1172
1173 u8 flags;
1174
1175
1176
1177
1178 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1179 int mode, u64 data);
1180 };
1181
1182 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1183 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1184
1185 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1186 { \
1187 name, \
1188 csr, \
1189 offset, \
1190 flags, \
1191 accessor \
1192 }
1193
1194
1195 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1196 CNTR_ELEM(#name, \
1197 (counter * 8 + RCV_COUNTER_ARRAY32), \
1198 0, flags | CNTR_32BIT, \
1199 port_access_u32_csr)
1200
1201 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1202 CNTR_ELEM(#name, \
1203 (counter * 8 + RCV_COUNTER_ARRAY32), \
1204 0, flags | CNTR_32BIT, \
1205 dev_access_u32_csr)
1206
1207
1208 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1209 CNTR_ELEM(#name, \
1210 (counter * 8 + RCV_COUNTER_ARRAY64), \
1211 0, flags, \
1212 port_access_u64_csr)
1213
1214 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1215 CNTR_ELEM(#name, \
1216 (counter * 8 + RCV_COUNTER_ARRAY64), \
1217 0, flags, \
1218 dev_access_u64_csr)
1219
1220 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1221 #define OVR_ELM(ctx) \
1222 CNTR_ELEM("RcvHdrOvr" #ctx, \
1223 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1224 0, CNTR_NORMAL, port_access_u64_csr)
1225
1226
1227 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1228 CNTR_ELEM(#name, \
1229 (counter * 8 + SEND_COUNTER_ARRAY32), \
1230 0, flags | CNTR_32BIT, \
1231 port_access_u32_csr)
1232
1233
1234 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1235 CNTR_ELEM(#name, \
1236 (counter * 8 + SEND_COUNTER_ARRAY64), \
1237 0, flags, \
1238 port_access_u64_csr)
1239
1240 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1241 CNTR_ELEM(#name,\
1242 counter * 8 + SEND_COUNTER_ARRAY64, \
1243 0, \
1244 flags, \
1245 dev_access_u64_csr)
1246
1247
1248 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1249 CNTR_ELEM(#name, \
1250 (counter * 8 + CCE_COUNTER_ARRAY32), \
1251 0, flags | CNTR_32BIT, \
1252 dev_access_u32_csr)
1253
1254 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1255 CNTR_ELEM(#name, \
1256 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1257 0, flags | CNTR_32BIT, \
1258 dev_access_u32_csr)
1259
1260
1261 #define DC_PERF_CNTR(name, counter, flags) \
1262 CNTR_ELEM(#name, \
1263 counter, \
1264 0, \
1265 flags, \
1266 dev_access_u64_csr)
1267
1268 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1269 CNTR_ELEM(#name, \
1270 counter, \
1271 0, \
1272 flags, \
1273 dc_access_lcb_cntr)
1274
1275
1276 #define SW_IBP_CNTR(name, cntr) \
1277 CNTR_ELEM(#name, \
1278 0, \
1279 0, \
1280 CNTR_SYNTH, \
1281 access_ibp_##cntr)
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291 static inline void __iomem *hfi1_addr_from_offset(
1292 const struct hfi1_devdata *dd,
1293 u32 offset)
1294 {
1295 if (offset >= dd->base2_start)
1296 return dd->kregbase2 + (offset - dd->base2_start);
1297 return dd->kregbase1 + offset;
1298 }
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1309 {
1310 if (dd->flags & HFI1_PRESENT)
1311 return readq(hfi1_addr_from_offset(dd, offset));
1312 return -1;
1313 }
1314
1315
1316
1317
1318
1319
1320
1321 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1322 {
1323 if (dd->flags & HFI1_PRESENT) {
1324 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1325
1326
1327 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1328 return;
1329 writeq(value, base);
1330 }
1331 }
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341 void __iomem *get_csr_addr(
1342 const struct hfi1_devdata *dd,
1343 u32 offset)
1344 {
1345 if (dd->flags & HFI1_PRESENT)
1346 return hfi1_addr_from_offset(dd, offset);
1347 return NULL;
1348 }
1349
1350 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1351 int mode, u64 value)
1352 {
1353 u64 ret;
1354
1355 if (mode == CNTR_MODE_R) {
1356 ret = read_csr(dd, csr);
1357 } else if (mode == CNTR_MODE_W) {
1358 write_csr(dd, csr, value);
1359 ret = value;
1360 } else {
1361 dd_dev_err(dd, "Invalid cntr register access mode");
1362 return 0;
1363 }
1364
1365 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1366 return ret;
1367 }
1368
1369
1370 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1371 void *context, int vl, int mode, u64 data)
1372 {
1373 struct hfi1_devdata *dd = context;
1374 u64 csr = entry->csr;
1375
1376 if (entry->flags & CNTR_SDMA) {
1377 if (vl == CNTR_INVALID_VL)
1378 return 0;
1379 csr += 0x100 * vl;
1380 } else {
1381 if (vl != CNTR_INVALID_VL)
1382 return 0;
1383 }
1384 return read_write_csr(dd, csr, mode, data);
1385 }
1386
1387 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1388 void *context, int idx, int mode, u64 data)
1389 {
1390 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1391
1392 if (dd->per_sdma && idx < dd->num_sdma)
1393 return dd->per_sdma[idx].err_cnt;
1394 return 0;
1395 }
1396
1397 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1398 void *context, int idx, int mode, u64 data)
1399 {
1400 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1401
1402 if (dd->per_sdma && idx < dd->num_sdma)
1403 return dd->per_sdma[idx].sdma_int_cnt;
1404 return 0;
1405 }
1406
1407 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1408 void *context, int idx, int mode, u64 data)
1409 {
1410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1411
1412 if (dd->per_sdma && idx < dd->num_sdma)
1413 return dd->per_sdma[idx].idle_int_cnt;
1414 return 0;
1415 }
1416
1417 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1418 void *context, int idx, int mode,
1419 u64 data)
1420 {
1421 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1422
1423 if (dd->per_sdma && idx < dd->num_sdma)
1424 return dd->per_sdma[idx].progress_int_cnt;
1425 return 0;
1426 }
1427
1428 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1429 int vl, int mode, u64 data)
1430 {
1431 struct hfi1_devdata *dd = context;
1432
1433 u64 val = 0;
1434 u64 csr = entry->csr;
1435
1436 if (entry->flags & CNTR_VL) {
1437 if (vl == CNTR_INVALID_VL)
1438 return 0;
1439 csr += 8 * vl;
1440 } else {
1441 if (vl != CNTR_INVALID_VL)
1442 return 0;
1443 }
1444
1445 val = read_write_csr(dd, csr, mode, data);
1446 return val;
1447 }
1448
1449 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1450 int vl, int mode, u64 data)
1451 {
1452 struct hfi1_devdata *dd = context;
1453 u32 csr = entry->csr;
1454 int ret = 0;
1455
1456 if (vl != CNTR_INVALID_VL)
1457 return 0;
1458 if (mode == CNTR_MODE_R)
1459 ret = read_lcb_csr(dd, csr, &data);
1460 else if (mode == CNTR_MODE_W)
1461 ret = write_lcb_csr(dd, csr, data);
1462
1463 if (ret) {
1464 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1465 return 0;
1466 }
1467
1468 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1469 return data;
1470 }
1471
1472
1473 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1474 int vl, int mode, u64 data)
1475 {
1476 struct hfi1_pportdata *ppd = context;
1477
1478 if (vl != CNTR_INVALID_VL)
1479 return 0;
1480 return read_write_csr(ppd->dd, entry->csr, mode, data);
1481 }
1482
1483 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1484 void *context, int vl, int mode, u64 data)
1485 {
1486 struct hfi1_pportdata *ppd = context;
1487 u64 val;
1488 u64 csr = entry->csr;
1489
1490 if (entry->flags & CNTR_VL) {
1491 if (vl == CNTR_INVALID_VL)
1492 return 0;
1493 csr += 8 * vl;
1494 } else {
1495 if (vl != CNTR_INVALID_VL)
1496 return 0;
1497 }
1498 val = read_write_csr(ppd->dd, csr, mode, data);
1499 return val;
1500 }
1501
1502
1503 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1504 u64 data)
1505 {
1506 u64 ret;
1507
1508 if (mode == CNTR_MODE_R) {
1509 ret = *cntr;
1510 } else if (mode == CNTR_MODE_W) {
1511 *cntr = data;
1512 ret = data;
1513 } else {
1514 dd_dev_err(dd, "Invalid cntr sw access mode");
1515 return 0;
1516 }
1517
1518 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1519
1520 return ret;
1521 }
1522
1523 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1524 int vl, int mode, u64 data)
1525 {
1526 struct hfi1_pportdata *ppd = context;
1527
1528 if (vl != CNTR_INVALID_VL)
1529 return 0;
1530 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1531 }
1532
1533 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1534 int vl, int mode, u64 data)
1535 {
1536 struct hfi1_pportdata *ppd = context;
1537
1538 if (vl != CNTR_INVALID_VL)
1539 return 0;
1540 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1541 }
1542
1543 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1544 void *context, int vl, int mode,
1545 u64 data)
1546 {
1547 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1548
1549 if (vl != CNTR_INVALID_VL)
1550 return 0;
1551 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1552 }
1553
1554 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1555 void *context, int vl, int mode, u64 data)
1556 {
1557 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1558 u64 zero = 0;
1559 u64 *counter;
1560
1561 if (vl == CNTR_INVALID_VL)
1562 counter = &ppd->port_xmit_discards;
1563 else if (vl >= 0 && vl < C_VL_COUNT)
1564 counter = &ppd->port_xmit_discards_vl[vl];
1565 else
1566 counter = &zero;
1567
1568 return read_write_sw(ppd->dd, counter, mode, data);
1569 }
1570
1571 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1572 void *context, int vl, int mode,
1573 u64 data)
1574 {
1575 struct hfi1_pportdata *ppd = context;
1576
1577 if (vl != CNTR_INVALID_VL)
1578 return 0;
1579
1580 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1581 mode, data);
1582 }
1583
1584 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1585 void *context, int vl, int mode, u64 data)
1586 {
1587 struct hfi1_pportdata *ppd = context;
1588
1589 if (vl != CNTR_INVALID_VL)
1590 return 0;
1591
1592 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1593 mode, data);
1594 }
1595
1596 u64 get_all_cpu_total(u64 __percpu *cntr)
1597 {
1598 int cpu;
1599 u64 counter = 0;
1600
1601 for_each_possible_cpu(cpu)
1602 counter += *per_cpu_ptr(cntr, cpu);
1603 return counter;
1604 }
1605
1606 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1607 u64 __percpu *cntr,
1608 int vl, int mode, u64 data)
1609 {
1610 u64 ret = 0;
1611
1612 if (vl != CNTR_INVALID_VL)
1613 return 0;
1614
1615 if (mode == CNTR_MODE_R) {
1616 ret = get_all_cpu_total(cntr) - *z_val;
1617 } else if (mode == CNTR_MODE_W) {
1618
1619 if (data == 0)
1620 *z_val = get_all_cpu_total(cntr);
1621 else
1622 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1623 } else {
1624 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1625 return 0;
1626 }
1627
1628 return ret;
1629 }
1630
1631 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1632 void *context, int vl, int mode, u64 data)
1633 {
1634 struct hfi1_devdata *dd = context;
1635
1636 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1637 mode, data);
1638 }
1639
1640 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1641 void *context, int vl, int mode, u64 data)
1642 {
1643 struct hfi1_devdata *dd = context;
1644
1645 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1646 mode, data);
1647 }
1648
1649 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1650 void *context, int vl, int mode, u64 data)
1651 {
1652 struct hfi1_devdata *dd = context;
1653
1654 return dd->verbs_dev.n_piowait;
1655 }
1656
1657 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1658 void *context, int vl, int mode, u64 data)
1659 {
1660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1661
1662 return dd->verbs_dev.n_piodrain;
1663 }
1664
1665 static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
1666 void *context, int vl, int mode, u64 data)
1667 {
1668 struct hfi1_devdata *dd = context;
1669
1670 return dd->ctx0_seq_drop;
1671 }
1672
1673 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1674 void *context, int vl, int mode, u64 data)
1675 {
1676 struct hfi1_devdata *dd = context;
1677
1678 return dd->verbs_dev.n_txwait;
1679 }
1680
1681 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1682 void *context, int vl, int mode, u64 data)
1683 {
1684 struct hfi1_devdata *dd = context;
1685
1686 return dd->verbs_dev.n_kmem_wait;
1687 }
1688
1689 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1690 void *context, int vl, int mode, u64 data)
1691 {
1692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1693
1694 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1695 mode, data);
1696 }
1697
1698
1699 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1700 void *context, int vl, int mode,
1701 u64 data)
1702 {
1703 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1704
1705 return dd->misc_err_status_cnt[12];
1706 }
1707
1708 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1709 void *context, int vl, int mode,
1710 u64 data)
1711 {
1712 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1713
1714 return dd->misc_err_status_cnt[11];
1715 }
1716
1717 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1718 void *context, int vl, int mode,
1719 u64 data)
1720 {
1721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1722
1723 return dd->misc_err_status_cnt[10];
1724 }
1725
1726 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1727 void *context, int vl,
1728 int mode, u64 data)
1729 {
1730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1731
1732 return dd->misc_err_status_cnt[9];
1733 }
1734
1735 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1736 void *context, int vl, int mode,
1737 u64 data)
1738 {
1739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1740
1741 return dd->misc_err_status_cnt[8];
1742 }
1743
1744 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1745 const struct cntr_entry *entry,
1746 void *context, int vl, int mode, u64 data)
1747 {
1748 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1749
1750 return dd->misc_err_status_cnt[7];
1751 }
1752
1753 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1754 void *context, int vl,
1755 int mode, u64 data)
1756 {
1757 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1758
1759 return dd->misc_err_status_cnt[6];
1760 }
1761
1762 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1763 void *context, int vl, int mode,
1764 u64 data)
1765 {
1766 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1767
1768 return dd->misc_err_status_cnt[5];
1769 }
1770
1771 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1772 void *context, int vl, int mode,
1773 u64 data)
1774 {
1775 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1776
1777 return dd->misc_err_status_cnt[4];
1778 }
1779
1780 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1781 void *context, int vl,
1782 int mode, u64 data)
1783 {
1784 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1785
1786 return dd->misc_err_status_cnt[3];
1787 }
1788
1789 static u64 access_misc_csr_write_bad_addr_err_cnt(
1790 const struct cntr_entry *entry,
1791 void *context, int vl, int mode, u64 data)
1792 {
1793 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1794
1795 return dd->misc_err_status_cnt[2];
1796 }
1797
1798 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1799 void *context, int vl,
1800 int mode, u64 data)
1801 {
1802 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1803
1804 return dd->misc_err_status_cnt[1];
1805 }
1806
1807 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1808 void *context, int vl, int mode,
1809 u64 data)
1810 {
1811 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1812
1813 return dd->misc_err_status_cnt[0];
1814 }
1815
1816
1817
1818
1819
1820 static u64 access_sw_cce_err_status_aggregated_cnt(
1821 const struct cntr_entry *entry,
1822 void *context, int vl, int mode, u64 data)
1823 {
1824 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1825
1826 return dd->sw_cce_err_status_aggregate;
1827 }
1828
1829
1830
1831
1832
1833 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1834 void *context, int vl, int mode,
1835 u64 data)
1836 {
1837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1838
1839 return dd->cce_err_status_cnt[40];
1840 }
1841
1842 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1843 void *context, int vl, int mode,
1844 u64 data)
1845 {
1846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1847
1848 return dd->cce_err_status_cnt[39];
1849 }
1850
1851 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1852 void *context, int vl, int mode,
1853 u64 data)
1854 {
1855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1856
1857 return dd->cce_err_status_cnt[38];
1858 }
1859
1860 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1861 void *context, int vl, int mode,
1862 u64 data)
1863 {
1864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1865
1866 return dd->cce_err_status_cnt[37];
1867 }
1868
1869 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1870 void *context, int vl, int mode,
1871 u64 data)
1872 {
1873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1874
1875 return dd->cce_err_status_cnt[36];
1876 }
1877
1878 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1879 const struct cntr_entry *entry,
1880 void *context, int vl, int mode, u64 data)
1881 {
1882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1883
1884 return dd->cce_err_status_cnt[35];
1885 }
1886
1887 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1888 const struct cntr_entry *entry,
1889 void *context, int vl, int mode, u64 data)
1890 {
1891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1892
1893 return dd->cce_err_status_cnt[34];
1894 }
1895
1896 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1897 void *context, int vl,
1898 int mode, u64 data)
1899 {
1900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1901
1902 return dd->cce_err_status_cnt[33];
1903 }
1904
1905 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1906 void *context, int vl, int mode,
1907 u64 data)
1908 {
1909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1910
1911 return dd->cce_err_status_cnt[32];
1912 }
1913
1914 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1915 void *context, int vl, int mode, u64 data)
1916 {
1917 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1918
1919 return dd->cce_err_status_cnt[31];
1920 }
1921
1922 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1923 void *context, int vl, int mode,
1924 u64 data)
1925 {
1926 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1927
1928 return dd->cce_err_status_cnt[30];
1929 }
1930
1931 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1932 void *context, int vl, int mode,
1933 u64 data)
1934 {
1935 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1936
1937 return dd->cce_err_status_cnt[29];
1938 }
1939
1940 static u64 access_pcic_transmit_back_parity_err_cnt(
1941 const struct cntr_entry *entry,
1942 void *context, int vl, int mode, u64 data)
1943 {
1944 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1945
1946 return dd->cce_err_status_cnt[28];
1947 }
1948
1949 static u64 access_pcic_transmit_front_parity_err_cnt(
1950 const struct cntr_entry *entry,
1951 void *context, int vl, int mode, u64 data)
1952 {
1953 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1954
1955 return dd->cce_err_status_cnt[27];
1956 }
1957
1958 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1959 void *context, int vl, int mode,
1960 u64 data)
1961 {
1962 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1963
1964 return dd->cce_err_status_cnt[26];
1965 }
1966
1967 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1968 void *context, int vl, int mode,
1969 u64 data)
1970 {
1971 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1972
1973 return dd->cce_err_status_cnt[25];
1974 }
1975
1976 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1977 void *context, int vl, int mode,
1978 u64 data)
1979 {
1980 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1981
1982 return dd->cce_err_status_cnt[24];
1983 }
1984
1985 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1986 void *context, int vl, int mode,
1987 u64 data)
1988 {
1989 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1990
1991 return dd->cce_err_status_cnt[23];
1992 }
1993
1994 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1995 void *context, int vl,
1996 int mode, u64 data)
1997 {
1998 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1999
2000 return dd->cce_err_status_cnt[22];
2001 }
2002
2003 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2004 void *context, int vl, int mode,
2005 u64 data)
2006 {
2007 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2008
2009 return dd->cce_err_status_cnt[21];
2010 }
2011
2012 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2013 const struct cntr_entry *entry,
2014 void *context, int vl, int mode, u64 data)
2015 {
2016 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2017
2018 return dd->cce_err_status_cnt[20];
2019 }
2020
2021 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2022 void *context, int vl,
2023 int mode, u64 data)
2024 {
2025 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2026
2027 return dd->cce_err_status_cnt[19];
2028 }
2029
2030 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2031 void *context, int vl, int mode,
2032 u64 data)
2033 {
2034 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2035
2036 return dd->cce_err_status_cnt[18];
2037 }
2038
2039 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2040 void *context, int vl, int mode,
2041 u64 data)
2042 {
2043 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2044
2045 return dd->cce_err_status_cnt[17];
2046 }
2047
2048 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2049 void *context, int vl, int mode,
2050 u64 data)
2051 {
2052 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2053
2054 return dd->cce_err_status_cnt[16];
2055 }
2056
2057 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2058 void *context, int vl, int mode,
2059 u64 data)
2060 {
2061 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2062
2063 return dd->cce_err_status_cnt[15];
2064 }
2065
2066 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2067 void *context, int vl,
2068 int mode, u64 data)
2069 {
2070 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2071
2072 return dd->cce_err_status_cnt[14];
2073 }
2074
2075 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2076 void *context, int vl, int mode,
2077 u64 data)
2078 {
2079 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2080
2081 return dd->cce_err_status_cnt[13];
2082 }
2083
2084 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2085 const struct cntr_entry *entry,
2086 void *context, int vl, int mode, u64 data)
2087 {
2088 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2089
2090 return dd->cce_err_status_cnt[12];
2091 }
2092
2093 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2094 const struct cntr_entry *entry,
2095 void *context, int vl, int mode, u64 data)
2096 {
2097 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2098
2099 return dd->cce_err_status_cnt[11];
2100 }
2101
2102 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2103 const struct cntr_entry *entry,
2104 void *context, int vl, int mode, u64 data)
2105 {
2106 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2107
2108 return dd->cce_err_status_cnt[10];
2109 }
2110
2111 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2112 const struct cntr_entry *entry,
2113 void *context, int vl, int mode, u64 data)
2114 {
2115 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2116
2117 return dd->cce_err_status_cnt[9];
2118 }
2119
2120 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2121 const struct cntr_entry *entry,
2122 void *context, int vl, int mode, u64 data)
2123 {
2124 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2125
2126 return dd->cce_err_status_cnt[8];
2127 }
2128
2129 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2130 void *context, int vl,
2131 int mode, u64 data)
2132 {
2133 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2134
2135 return dd->cce_err_status_cnt[7];
2136 }
2137
2138 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2139 const struct cntr_entry *entry,
2140 void *context, int vl, int mode, u64 data)
2141 {
2142 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2143
2144 return dd->cce_err_status_cnt[6];
2145 }
2146
2147 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2148 void *context, int vl, int mode,
2149 u64 data)
2150 {
2151 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2152
2153 return dd->cce_err_status_cnt[5];
2154 }
2155
2156 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2157 void *context, int vl, int mode,
2158 u64 data)
2159 {
2160 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2161
2162 return dd->cce_err_status_cnt[4];
2163 }
2164
2165 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2166 const struct cntr_entry *entry,
2167 void *context, int vl, int mode, u64 data)
2168 {
2169 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2170
2171 return dd->cce_err_status_cnt[3];
2172 }
2173
2174 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2175 void *context, int vl,
2176 int mode, u64 data)
2177 {
2178 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2179
2180 return dd->cce_err_status_cnt[2];
2181 }
2182
2183 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2184 void *context, int vl,
2185 int mode, u64 data)
2186 {
2187 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2188
2189 return dd->cce_err_status_cnt[1];
2190 }
2191
2192 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2193 void *context, int vl, int mode,
2194 u64 data)
2195 {
2196 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2197
2198 return dd->cce_err_status_cnt[0];
2199 }
2200
2201
2202
2203
2204
2205 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2206 void *context, int vl, int mode,
2207 u64 data)
2208 {
2209 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2210
2211 return dd->rcv_err_status_cnt[63];
2212 }
2213
2214 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2215 void *context, int vl,
2216 int mode, u64 data)
2217 {
2218 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2219
2220 return dd->rcv_err_status_cnt[62];
2221 }
2222
2223 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2224 void *context, int vl, int mode,
2225 u64 data)
2226 {
2227 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2228
2229 return dd->rcv_err_status_cnt[61];
2230 }
2231
2232 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2233 void *context, int vl, int mode,
2234 u64 data)
2235 {
2236 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2237
2238 return dd->rcv_err_status_cnt[60];
2239 }
2240
2241 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2242 void *context, int vl,
2243 int mode, u64 data)
2244 {
2245 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2246
2247 return dd->rcv_err_status_cnt[59];
2248 }
2249
2250 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2251 void *context, int vl,
2252 int mode, u64 data)
2253 {
2254 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2255
2256 return dd->rcv_err_status_cnt[58];
2257 }
2258
2259 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2260 void *context, int vl, int mode,
2261 u64 data)
2262 {
2263 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2264
2265 return dd->rcv_err_status_cnt[57];
2266 }
2267
2268 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2269 void *context, int vl, int mode,
2270 u64 data)
2271 {
2272 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2273
2274 return dd->rcv_err_status_cnt[56];
2275 }
2276
2277 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2278 void *context, int vl, int mode,
2279 u64 data)
2280 {
2281 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2282
2283 return dd->rcv_err_status_cnt[55];
2284 }
2285
2286 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2287 const struct cntr_entry *entry,
2288 void *context, int vl, int mode, u64 data)
2289 {
2290 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2291
2292 return dd->rcv_err_status_cnt[54];
2293 }
2294
2295 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2296 const struct cntr_entry *entry,
2297 void *context, int vl, int mode, u64 data)
2298 {
2299 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2300
2301 return dd->rcv_err_status_cnt[53];
2302 }
2303
2304 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2305 void *context, int vl,
2306 int mode, u64 data)
2307 {
2308 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2309
2310 return dd->rcv_err_status_cnt[52];
2311 }
2312
2313 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2314 void *context, int vl,
2315 int mode, u64 data)
2316 {
2317 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2318
2319 return dd->rcv_err_status_cnt[51];
2320 }
2321
2322 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2323 void *context, int vl,
2324 int mode, u64 data)
2325 {
2326 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2327
2328 return dd->rcv_err_status_cnt[50];
2329 }
2330
2331 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2332 void *context, int vl,
2333 int mode, u64 data)
2334 {
2335 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2336
2337 return dd->rcv_err_status_cnt[49];
2338 }
2339
2340 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2341 void *context, int vl,
2342 int mode, u64 data)
2343 {
2344 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2345
2346 return dd->rcv_err_status_cnt[48];
2347 }
2348
2349 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2350 void *context, int vl,
2351 int mode, u64 data)
2352 {
2353 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2354
2355 return dd->rcv_err_status_cnt[47];
2356 }
2357
2358 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2359 void *context, int vl, int mode,
2360 u64 data)
2361 {
2362 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2363
2364 return dd->rcv_err_status_cnt[46];
2365 }
2366
2367 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2368 const struct cntr_entry *entry,
2369 void *context, int vl, int mode, u64 data)
2370 {
2371 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2372
2373 return dd->rcv_err_status_cnt[45];
2374 }
2375
2376 static u64 access_rx_lookup_csr_parity_err_cnt(
2377 const struct cntr_entry *entry,
2378 void *context, int vl, int mode, u64 data)
2379 {
2380 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2381
2382 return dd->rcv_err_status_cnt[44];
2383 }
2384
2385 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2386 const struct cntr_entry *entry,
2387 void *context, int vl, int mode, u64 data)
2388 {
2389 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2390
2391 return dd->rcv_err_status_cnt[43];
2392 }
2393
2394 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2395 const struct cntr_entry *entry,
2396 void *context, int vl, int mode, u64 data)
2397 {
2398 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2399
2400 return dd->rcv_err_status_cnt[42];
2401 }
2402
2403 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2404 const struct cntr_entry *entry,
2405 void *context, int vl, int mode, u64 data)
2406 {
2407 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2408
2409 return dd->rcv_err_status_cnt[41];
2410 }
2411
2412 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2413 const struct cntr_entry *entry,
2414 void *context, int vl, int mode, u64 data)
2415 {
2416 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2417
2418 return dd->rcv_err_status_cnt[40];
2419 }
2420
2421 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2422 const struct cntr_entry *entry,
2423 void *context, int vl, int mode, u64 data)
2424 {
2425 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2426
2427 return dd->rcv_err_status_cnt[39];
2428 }
2429
2430 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2431 const struct cntr_entry *entry,
2432 void *context, int vl, int mode, u64 data)
2433 {
2434 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2435
2436 return dd->rcv_err_status_cnt[38];
2437 }
2438
2439 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2440 const struct cntr_entry *entry,
2441 void *context, int vl, int mode, u64 data)
2442 {
2443 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2444
2445 return dd->rcv_err_status_cnt[37];
2446 }
2447
2448 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2449 const struct cntr_entry *entry,
2450 void *context, int vl, int mode, u64 data)
2451 {
2452 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2453
2454 return dd->rcv_err_status_cnt[36];
2455 }
2456
2457 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2458 const struct cntr_entry *entry,
2459 void *context, int vl, int mode, u64 data)
2460 {
2461 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2462
2463 return dd->rcv_err_status_cnt[35];
2464 }
2465
2466 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2467 const struct cntr_entry *entry,
2468 void *context, int vl, int mode, u64 data)
2469 {
2470 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2471
2472 return dd->rcv_err_status_cnt[34];
2473 }
2474
2475 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2476 const struct cntr_entry *entry,
2477 void *context, int vl, int mode, u64 data)
2478 {
2479 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2480
2481 return dd->rcv_err_status_cnt[33];
2482 }
2483
2484 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2485 void *context, int vl, int mode,
2486 u64 data)
2487 {
2488 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2489
2490 return dd->rcv_err_status_cnt[32];
2491 }
2492
2493 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2494 void *context, int vl, int mode,
2495 u64 data)
2496 {
2497 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2498
2499 return dd->rcv_err_status_cnt[31];
2500 }
2501
2502 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2503 void *context, int vl, int mode,
2504 u64 data)
2505 {
2506 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2507
2508 return dd->rcv_err_status_cnt[30];
2509 }
2510
2511 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2512 void *context, int vl, int mode,
2513 u64 data)
2514 {
2515 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2516
2517 return dd->rcv_err_status_cnt[29];
2518 }
2519
2520 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2521 void *context, int vl,
2522 int mode, u64 data)
2523 {
2524 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2525
2526 return dd->rcv_err_status_cnt[28];
2527 }
2528
2529 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2530 const struct cntr_entry *entry,
2531 void *context, int vl, int mode, u64 data)
2532 {
2533 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2534
2535 return dd->rcv_err_status_cnt[27];
2536 }
2537
2538 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2539 const struct cntr_entry *entry,
2540 void *context, int vl, int mode, u64 data)
2541 {
2542 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2543
2544 return dd->rcv_err_status_cnt[26];
2545 }
2546
2547 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2548 const struct cntr_entry *entry,
2549 void *context, int vl, int mode, u64 data)
2550 {
2551 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2552
2553 return dd->rcv_err_status_cnt[25];
2554 }
2555
2556 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2557 const struct cntr_entry *entry,
2558 void *context, int vl, int mode, u64 data)
2559 {
2560 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2561
2562 return dd->rcv_err_status_cnt[24];
2563 }
2564
2565 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2566 const struct cntr_entry *entry,
2567 void *context, int vl, int mode, u64 data)
2568 {
2569 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2570
2571 return dd->rcv_err_status_cnt[23];
2572 }
2573
2574 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2575 const struct cntr_entry *entry,
2576 void *context, int vl, int mode, u64 data)
2577 {
2578 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2579
2580 return dd->rcv_err_status_cnt[22];
2581 }
2582
2583 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2584 const struct cntr_entry *entry,
2585 void *context, int vl, int mode, u64 data)
2586 {
2587 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2588
2589 return dd->rcv_err_status_cnt[21];
2590 }
2591
2592 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2593 const struct cntr_entry *entry,
2594 void *context, int vl, int mode, u64 data)
2595 {
2596 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2597
2598 return dd->rcv_err_status_cnt[20];
2599 }
2600
2601 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2602 const struct cntr_entry *entry,
2603 void *context, int vl, int mode, u64 data)
2604 {
2605 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2606
2607 return dd->rcv_err_status_cnt[19];
2608 }
2609
2610 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2611 void *context, int vl,
2612 int mode, u64 data)
2613 {
2614 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2615
2616 return dd->rcv_err_status_cnt[18];
2617 }
2618
2619 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2620 void *context, int vl,
2621 int mode, u64 data)
2622 {
2623 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2624
2625 return dd->rcv_err_status_cnt[17];
2626 }
2627
2628 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2629 const struct cntr_entry *entry,
2630 void *context, int vl, int mode, u64 data)
2631 {
2632 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2633
2634 return dd->rcv_err_status_cnt[16];
2635 }
2636
2637 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2638 const struct cntr_entry *entry,
2639 void *context, int vl, int mode, u64 data)
2640 {
2641 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2642
2643 return dd->rcv_err_status_cnt[15];
2644 }
2645
2646 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2647 void *context, int vl,
2648 int mode, u64 data)
2649 {
2650 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2651
2652 return dd->rcv_err_status_cnt[14];
2653 }
2654
2655 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2656 void *context, int vl,
2657 int mode, u64 data)
2658 {
2659 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2660
2661 return dd->rcv_err_status_cnt[13];
2662 }
2663
2664 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2665 void *context, int vl, int mode,
2666 u64 data)
2667 {
2668 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2669
2670 return dd->rcv_err_status_cnt[12];
2671 }
2672
2673 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2674 void *context, int vl, int mode,
2675 u64 data)
2676 {
2677 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2678
2679 return dd->rcv_err_status_cnt[11];
2680 }
2681
2682 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2683 void *context, int vl, int mode,
2684 u64 data)
2685 {
2686 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2687
2688 return dd->rcv_err_status_cnt[10];
2689 }
2690
2691 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2692 void *context, int vl, int mode,
2693 u64 data)
2694 {
2695 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2696
2697 return dd->rcv_err_status_cnt[9];
2698 }
2699
2700 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2701 void *context, int vl, int mode,
2702 u64 data)
2703 {
2704 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2705
2706 return dd->rcv_err_status_cnt[8];
2707 }
2708
2709 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2710 const struct cntr_entry *entry,
2711 void *context, int vl, int mode, u64 data)
2712 {
2713 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2714
2715 return dd->rcv_err_status_cnt[7];
2716 }
2717
2718 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2719 const struct cntr_entry *entry,
2720 void *context, int vl, int mode, u64 data)
2721 {
2722 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2723
2724 return dd->rcv_err_status_cnt[6];
2725 }
2726
2727 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2728 void *context, int vl, int mode,
2729 u64 data)
2730 {
2731 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2732
2733 return dd->rcv_err_status_cnt[5];
2734 }
2735
2736 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2737 void *context, int vl, int mode,
2738 u64 data)
2739 {
2740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2741
2742 return dd->rcv_err_status_cnt[4];
2743 }
2744
2745 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2746 void *context, int vl, int mode,
2747 u64 data)
2748 {
2749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2750
2751 return dd->rcv_err_status_cnt[3];
2752 }
2753
2754 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2755 void *context, int vl, int mode,
2756 u64 data)
2757 {
2758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2759
2760 return dd->rcv_err_status_cnt[2];
2761 }
2762
2763 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2764 void *context, int vl, int mode,
2765 u64 data)
2766 {
2767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2768
2769 return dd->rcv_err_status_cnt[1];
2770 }
2771
2772 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2773 void *context, int vl, int mode,
2774 u64 data)
2775 {
2776 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2777
2778 return dd->rcv_err_status_cnt[0];
2779 }
2780
2781
2782
2783
2784
2785 static u64 access_pio_pec_sop_head_parity_err_cnt(
2786 const struct cntr_entry *entry,
2787 void *context, int vl, int mode, u64 data)
2788 {
2789 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2790
2791 return dd->send_pio_err_status_cnt[35];
2792 }
2793
2794 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2795 const struct cntr_entry *entry,
2796 void *context, int vl, int mode, u64 data)
2797 {
2798 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2799
2800 return dd->send_pio_err_status_cnt[34];
2801 }
2802
2803 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2804 const struct cntr_entry *entry,
2805 void *context, int vl, int mode, u64 data)
2806 {
2807 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2808
2809 return dd->send_pio_err_status_cnt[33];
2810 }
2811
2812 static u64 access_pio_current_free_cnt_parity_err_cnt(
2813 const struct cntr_entry *entry,
2814 void *context, int vl, int mode, u64 data)
2815 {
2816 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2817
2818 return dd->send_pio_err_status_cnt[32];
2819 }
2820
2821 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2822 void *context, int vl, int mode,
2823 u64 data)
2824 {
2825 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2826
2827 return dd->send_pio_err_status_cnt[31];
2828 }
2829
2830 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2831 void *context, int vl, int mode,
2832 u64 data)
2833 {
2834 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2835
2836 return dd->send_pio_err_status_cnt[30];
2837 }
2838
2839 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2840 void *context, int vl, int mode,
2841 u64 data)
2842 {
2843 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2844
2845 return dd->send_pio_err_status_cnt[29];
2846 }
2847
2848 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2849 const struct cntr_entry *entry,
2850 void *context, int vl, int mode, u64 data)
2851 {
2852 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2853
2854 return dd->send_pio_err_status_cnt[28];
2855 }
2856
2857 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2858 void *context, int vl, int mode,
2859 u64 data)
2860 {
2861 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2862
2863 return dd->send_pio_err_status_cnt[27];
2864 }
2865
2866 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2867 void *context, int vl, int mode,
2868 u64 data)
2869 {
2870 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2871
2872 return dd->send_pio_err_status_cnt[26];
2873 }
2874
2875 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2876 void *context, int vl,
2877 int mode, u64 data)
2878 {
2879 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2880
2881 return dd->send_pio_err_status_cnt[25];
2882 }
2883
2884 static u64 access_pio_block_qw_count_parity_err_cnt(
2885 const struct cntr_entry *entry,
2886 void *context, int vl, int mode, u64 data)
2887 {
2888 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2889
2890 return dd->send_pio_err_status_cnt[24];
2891 }
2892
2893 static u64 access_pio_write_qw_valid_parity_err_cnt(
2894 const struct cntr_entry *entry,
2895 void *context, int vl, int mode, u64 data)
2896 {
2897 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2898
2899 return dd->send_pio_err_status_cnt[23];
2900 }
2901
2902 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2903 void *context, int vl, int mode,
2904 u64 data)
2905 {
2906 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2907
2908 return dd->send_pio_err_status_cnt[22];
2909 }
2910
2911 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2912 void *context, int vl,
2913 int mode, u64 data)
2914 {
2915 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2916
2917 return dd->send_pio_err_status_cnt[21];
2918 }
2919
2920 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2921 void *context, int vl,
2922 int mode, u64 data)
2923 {
2924 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2925
2926 return dd->send_pio_err_status_cnt[20];
2927 }
2928
2929 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2930 void *context, int vl,
2931 int mode, u64 data)
2932 {
2933 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2934
2935 return dd->send_pio_err_status_cnt[19];
2936 }
2937
2938 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2939 const struct cntr_entry *entry,
2940 void *context, int vl, int mode, u64 data)
2941 {
2942 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2943
2944 return dd->send_pio_err_status_cnt[18];
2945 }
2946
2947 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2948 void *context, int vl, int mode,
2949 u64 data)
2950 {
2951 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2952
2953 return dd->send_pio_err_status_cnt[17];
2954 }
2955
2956 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2957 void *context, int vl, int mode,
2958 u64 data)
2959 {
2960 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2961
2962 return dd->send_pio_err_status_cnt[16];
2963 }
2964
2965 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2966 const struct cntr_entry *entry,
2967 void *context, int vl, int mode, u64 data)
2968 {
2969 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2970
2971 return dd->send_pio_err_status_cnt[15];
2972 }
2973
2974 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2975 const struct cntr_entry *entry,
2976 void *context, int vl, int mode, u64 data)
2977 {
2978 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2979
2980 return dd->send_pio_err_status_cnt[14];
2981 }
2982
2983 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2984 const struct cntr_entry *entry,
2985 void *context, int vl, int mode, u64 data)
2986 {
2987 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2988
2989 return dd->send_pio_err_status_cnt[13];
2990 }
2991
2992 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2993 const struct cntr_entry *entry,
2994 void *context, int vl, int mode, u64 data)
2995 {
2996 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2997
2998 return dd->send_pio_err_status_cnt[12];
2999 }
3000
3001 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3002 const struct cntr_entry *entry,
3003 void *context, int vl, int mode, u64 data)
3004 {
3005 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3006
3007 return dd->send_pio_err_status_cnt[11];
3008 }
3009
3010 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3011 const struct cntr_entry *entry,
3012 void *context, int vl, int mode, u64 data)
3013 {
3014 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3015
3016 return dd->send_pio_err_status_cnt[10];
3017 }
3018
3019 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3020 const struct cntr_entry *entry,
3021 void *context, int vl, int mode, u64 data)
3022 {
3023 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3024
3025 return dd->send_pio_err_status_cnt[9];
3026 }
3027
3028 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3029 const struct cntr_entry *entry,
3030 void *context, int vl, int mode, u64 data)
3031 {
3032 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3033
3034 return dd->send_pio_err_status_cnt[8];
3035 }
3036
3037 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3038 const struct cntr_entry *entry,
3039 void *context, int vl, int mode, u64 data)
3040 {
3041 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3042
3043 return dd->send_pio_err_status_cnt[7];
3044 }
3045
3046 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3047 void *context, int vl, int mode,
3048 u64 data)
3049 {
3050 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3051
3052 return dd->send_pio_err_status_cnt[6];
3053 }
3054
3055 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3056 void *context, int vl, int mode,
3057 u64 data)
3058 {
3059 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3060
3061 return dd->send_pio_err_status_cnt[5];
3062 }
3063
3064 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3065 void *context, int vl, int mode,
3066 u64 data)
3067 {
3068 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3069
3070 return dd->send_pio_err_status_cnt[4];
3071 }
3072
3073 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3074 void *context, int vl, int mode,
3075 u64 data)
3076 {
3077 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3078
3079 return dd->send_pio_err_status_cnt[3];
3080 }
3081
3082 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3083 void *context, int vl, int mode,
3084 u64 data)
3085 {
3086 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3087
3088 return dd->send_pio_err_status_cnt[2];
3089 }
3090
3091 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3092 void *context, int vl,
3093 int mode, u64 data)
3094 {
3095 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3096
3097 return dd->send_pio_err_status_cnt[1];
3098 }
3099
3100 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3101 void *context, int vl, int mode,
3102 u64 data)
3103 {
3104 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3105
3106 return dd->send_pio_err_status_cnt[0];
3107 }
3108
3109
3110
3111
3112
3113 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3114 const struct cntr_entry *entry,
3115 void *context, int vl, int mode, u64 data)
3116 {
3117 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3118
3119 return dd->send_dma_err_status_cnt[3];
3120 }
3121
3122 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3123 const struct cntr_entry *entry,
3124 void *context, int vl, int mode, u64 data)
3125 {
3126 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3127
3128 return dd->send_dma_err_status_cnt[2];
3129 }
3130
3131 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3132 void *context, int vl, int mode,
3133 u64 data)
3134 {
3135 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3136
3137 return dd->send_dma_err_status_cnt[1];
3138 }
3139
3140 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3141 void *context, int vl, int mode,
3142 u64 data)
3143 {
3144 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3145
3146 return dd->send_dma_err_status_cnt[0];
3147 }
3148
3149
3150
3151
3152
3153 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3154 const struct cntr_entry *entry,
3155 void *context, int vl, int mode, u64 data)
3156 {
3157 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3158
3159 return dd->send_egress_err_status_cnt[63];
3160 }
3161
3162 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3163 const struct cntr_entry *entry,
3164 void *context, int vl, int mode, u64 data)
3165 {
3166 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3167
3168 return dd->send_egress_err_status_cnt[62];
3169 }
3170
3171 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3172 void *context, int vl, int mode,
3173 u64 data)
3174 {
3175 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3176
3177 return dd->send_egress_err_status_cnt[61];
3178 }
3179
3180 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3181 void *context, int vl,
3182 int mode, u64 data)
3183 {
3184 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3185
3186 return dd->send_egress_err_status_cnt[60];
3187 }
3188
3189 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3190 const struct cntr_entry *entry,
3191 void *context, int vl, int mode, u64 data)
3192 {
3193 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3194
3195 return dd->send_egress_err_status_cnt[59];
3196 }
3197
3198 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3199 void *context, int vl, int mode,
3200 u64 data)
3201 {
3202 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3203
3204 return dd->send_egress_err_status_cnt[58];
3205 }
3206
3207 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3208 void *context, int vl, int mode,
3209 u64 data)
3210 {
3211 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3212
3213 return dd->send_egress_err_status_cnt[57];
3214 }
3215
3216 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3217 void *context, int vl, int mode,
3218 u64 data)
3219 {
3220 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3221
3222 return dd->send_egress_err_status_cnt[56];
3223 }
3224
3225 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3226 void *context, int vl, int mode,
3227 u64 data)
3228 {
3229 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3230
3231 return dd->send_egress_err_status_cnt[55];
3232 }
3233
3234 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3235 void *context, int vl, int mode,
3236 u64 data)
3237 {
3238 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3239
3240 return dd->send_egress_err_status_cnt[54];
3241 }
3242
3243 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3244 void *context, int vl, int mode,
3245 u64 data)
3246 {
3247 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3248
3249 return dd->send_egress_err_status_cnt[53];
3250 }
3251
3252 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3253 void *context, int vl, int mode,
3254 u64 data)
3255 {
3256 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3257
3258 return dd->send_egress_err_status_cnt[52];
3259 }
3260
3261 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3262 void *context, int vl, int mode,
3263 u64 data)
3264 {
3265 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3266
3267 return dd->send_egress_err_status_cnt[51];
3268 }
3269
3270 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3271 void *context, int vl, int mode,
3272 u64 data)
3273 {
3274 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3275
3276 return dd->send_egress_err_status_cnt[50];
3277 }
3278
3279 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3280 void *context, int vl, int mode,
3281 u64 data)
3282 {
3283 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3284
3285 return dd->send_egress_err_status_cnt[49];
3286 }
3287
3288 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3289 void *context, int vl, int mode,
3290 u64 data)
3291 {
3292 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3293
3294 return dd->send_egress_err_status_cnt[48];
3295 }
3296
3297 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3298 void *context, int vl, int mode,
3299 u64 data)
3300 {
3301 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3302
3303 return dd->send_egress_err_status_cnt[47];
3304 }
3305
3306 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3307 void *context, int vl, int mode,
3308 u64 data)
3309 {
3310 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3311
3312 return dd->send_egress_err_status_cnt[46];
3313 }
3314
3315 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3316 void *context, int vl, int mode,
3317 u64 data)
3318 {
3319 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3320
3321 return dd->send_egress_err_status_cnt[45];
3322 }
3323
3324 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3325 void *context, int vl,
3326 int mode, u64 data)
3327 {
3328 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3329
3330 return dd->send_egress_err_status_cnt[44];
3331 }
3332
3333 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3334 const struct cntr_entry *entry,
3335 void *context, int vl, int mode, u64 data)
3336 {
3337 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3338
3339 return dd->send_egress_err_status_cnt[43];
3340 }
3341
3342 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3343 void *context, int vl, int mode,
3344 u64 data)
3345 {
3346 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3347
3348 return dd->send_egress_err_status_cnt[42];
3349 }
3350
3351 static u64 access_tx_credit_return_partiy_err_cnt(
3352 const struct cntr_entry *entry,
3353 void *context, int vl, int mode, u64 data)
3354 {
3355 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3356
3357 return dd->send_egress_err_status_cnt[41];
3358 }
3359
3360 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3361 const struct cntr_entry *entry,
3362 void *context, int vl, int mode, u64 data)
3363 {
3364 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3365
3366 return dd->send_egress_err_status_cnt[40];
3367 }
3368
3369 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3370 const struct cntr_entry *entry,
3371 void *context, int vl, int mode, u64 data)
3372 {
3373 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3374
3375 return dd->send_egress_err_status_cnt[39];
3376 }
3377
3378 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3379 const struct cntr_entry *entry,
3380 void *context, int vl, int mode, u64 data)
3381 {
3382 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3383
3384 return dd->send_egress_err_status_cnt[38];
3385 }
3386
3387 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3388 const struct cntr_entry *entry,
3389 void *context, int vl, int mode, u64 data)
3390 {
3391 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3392
3393 return dd->send_egress_err_status_cnt[37];
3394 }
3395
3396 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3397 const struct cntr_entry *entry,
3398 void *context, int vl, int mode, u64 data)
3399 {
3400 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3401
3402 return dd->send_egress_err_status_cnt[36];
3403 }
3404
3405 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3406 const struct cntr_entry *entry,
3407 void *context, int vl, int mode, u64 data)
3408 {
3409 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3410
3411 return dd->send_egress_err_status_cnt[35];
3412 }
3413
3414 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3415 const struct cntr_entry *entry,
3416 void *context, int vl, int mode, u64 data)
3417 {
3418 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3419
3420 return dd->send_egress_err_status_cnt[34];
3421 }
3422
3423 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3424 const struct cntr_entry *entry,
3425 void *context, int vl, int mode, u64 data)
3426 {
3427 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3428
3429 return dd->send_egress_err_status_cnt[33];
3430 }
3431
3432 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3433 const struct cntr_entry *entry,
3434 void *context, int vl, int mode, u64 data)
3435 {
3436 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3437
3438 return dd->send_egress_err_status_cnt[32];
3439 }
3440
3441 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3442 const struct cntr_entry *entry,
3443 void *context, int vl, int mode, u64 data)
3444 {
3445 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3446
3447 return dd->send_egress_err_status_cnt[31];
3448 }
3449
3450 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3451 const struct cntr_entry *entry,
3452 void *context, int vl, int mode, u64 data)
3453 {
3454 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3455
3456 return dd->send_egress_err_status_cnt[30];
3457 }
3458
3459 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3460 const struct cntr_entry *entry,
3461 void *context, int vl, int mode, u64 data)
3462 {
3463 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3464
3465 return dd->send_egress_err_status_cnt[29];
3466 }
3467
3468 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3469 const struct cntr_entry *entry,
3470 void *context, int vl, int mode, u64 data)
3471 {
3472 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3473
3474 return dd->send_egress_err_status_cnt[28];
3475 }
3476
3477 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3478 const struct cntr_entry *entry,
3479 void *context, int vl, int mode, u64 data)
3480 {
3481 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3482
3483 return dd->send_egress_err_status_cnt[27];
3484 }
3485
3486 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3487 const struct cntr_entry *entry,
3488 void *context, int vl, int mode, u64 data)
3489 {
3490 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3491
3492 return dd->send_egress_err_status_cnt[26];
3493 }
3494
3495 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3496 const struct cntr_entry *entry,
3497 void *context, int vl, int mode, u64 data)
3498 {
3499 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3500
3501 return dd->send_egress_err_status_cnt[25];
3502 }
3503
3504 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3505 const struct cntr_entry *entry,
3506 void *context, int vl, int mode, u64 data)
3507 {
3508 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3509
3510 return dd->send_egress_err_status_cnt[24];
3511 }
3512
3513 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3514 const struct cntr_entry *entry,
3515 void *context, int vl, int mode, u64 data)
3516 {
3517 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3518
3519 return dd->send_egress_err_status_cnt[23];
3520 }
3521
3522 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3523 const struct cntr_entry *entry,
3524 void *context, int vl, int mode, u64 data)
3525 {
3526 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3527
3528 return dd->send_egress_err_status_cnt[22];
3529 }
3530
3531 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3532 const struct cntr_entry *entry,
3533 void *context, int vl, int mode, u64 data)
3534 {
3535 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3536
3537 return dd->send_egress_err_status_cnt[21];
3538 }
3539
3540 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3541 const struct cntr_entry *entry,
3542 void *context, int vl, int mode, u64 data)
3543 {
3544 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3545
3546 return dd->send_egress_err_status_cnt[20];
3547 }
3548
3549 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3550 const struct cntr_entry *entry,
3551 void *context, int vl, int mode, u64 data)
3552 {
3553 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3554
3555 return dd->send_egress_err_status_cnt[19];
3556 }
3557
3558 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3559 const struct cntr_entry *entry,
3560 void *context, int vl, int mode, u64 data)
3561 {
3562 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3563
3564 return dd->send_egress_err_status_cnt[18];
3565 }
3566
3567 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3568 const struct cntr_entry *entry,
3569 void *context, int vl, int mode, u64 data)
3570 {
3571 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3572
3573 return dd->send_egress_err_status_cnt[17];
3574 }
3575
3576 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3577 const struct cntr_entry *entry,
3578 void *context, int vl, int mode, u64 data)
3579 {
3580 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3581
3582 return dd->send_egress_err_status_cnt[16];
3583 }
3584
3585 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3586 void *context, int vl, int mode,
3587 u64 data)
3588 {
3589 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3590
3591 return dd->send_egress_err_status_cnt[15];
3592 }
3593
3594 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3595 void *context, int vl,
3596 int mode, u64 data)
3597 {
3598 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3599
3600 return dd->send_egress_err_status_cnt[14];
3601 }
3602
3603 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3604 void *context, int vl, int mode,
3605 u64 data)
3606 {
3607 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3608
3609 return dd->send_egress_err_status_cnt[13];
3610 }
3611
3612 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3613 void *context, int vl, int mode,
3614 u64 data)
3615 {
3616 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3617
3618 return dd->send_egress_err_status_cnt[12];
3619 }
3620
3621 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3622 const struct cntr_entry *entry,
3623 void *context, int vl, int mode, u64 data)
3624 {
3625 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3626
3627 return dd->send_egress_err_status_cnt[11];
3628 }
3629
3630 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3631 void *context, int vl, int mode,
3632 u64 data)
3633 {
3634 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3635
3636 return dd->send_egress_err_status_cnt[10];
3637 }
3638
3639 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3640 void *context, int vl, int mode,
3641 u64 data)
3642 {
3643 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3644
3645 return dd->send_egress_err_status_cnt[9];
3646 }
3647
3648 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3649 const struct cntr_entry *entry,
3650 void *context, int vl, int mode, u64 data)
3651 {
3652 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3653
3654 return dd->send_egress_err_status_cnt[8];
3655 }
3656
3657 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3658 const struct cntr_entry *entry,
3659 void *context, int vl, int mode, u64 data)
3660 {
3661 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3662
3663 return dd->send_egress_err_status_cnt[7];
3664 }
3665
3666 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3667 void *context, int vl, int mode,
3668 u64 data)
3669 {
3670 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3671
3672 return dd->send_egress_err_status_cnt[6];
3673 }
3674
3675 static u64 access_tx_incorrect_link_state_err_cnt(
3676 const struct cntr_entry *entry,
3677 void *context, int vl, int mode, u64 data)
3678 {
3679 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3680
3681 return dd->send_egress_err_status_cnt[5];
3682 }
3683
3684 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3685 void *context, int vl, int mode,
3686 u64 data)
3687 {
3688 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3689
3690 return dd->send_egress_err_status_cnt[4];
3691 }
3692
3693 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3694 const struct cntr_entry *entry,
3695 void *context, int vl, int mode, u64 data)
3696 {
3697 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3698
3699 return dd->send_egress_err_status_cnt[3];
3700 }
3701
3702 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3703 void *context, int vl, int mode,
3704 u64 data)
3705 {
3706 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3707
3708 return dd->send_egress_err_status_cnt[2];
3709 }
3710
3711 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3712 const struct cntr_entry *entry,
3713 void *context, int vl, int mode, u64 data)
3714 {
3715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3716
3717 return dd->send_egress_err_status_cnt[1];
3718 }
3719
3720 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3721 const struct cntr_entry *entry,
3722 void *context, int vl, int mode, u64 data)
3723 {
3724 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3725
3726 return dd->send_egress_err_status_cnt[0];
3727 }
3728
3729
3730
3731
3732
3733 static u64 access_send_csr_write_bad_addr_err_cnt(
3734 const struct cntr_entry *entry,
3735 void *context, int vl, int mode, u64 data)
3736 {
3737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3738
3739 return dd->send_err_status_cnt[2];
3740 }
3741
3742 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3743 void *context, int vl,
3744 int mode, u64 data)
3745 {
3746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3747
3748 return dd->send_err_status_cnt[1];
3749 }
3750
3751 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3752 void *context, int vl, int mode,
3753 u64 data)
3754 {
3755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3756
3757 return dd->send_err_status_cnt[0];
3758 }
3759
3760
3761
3762
3763
3764 static u64 access_pio_write_out_of_bounds_err_cnt(
3765 const struct cntr_entry *entry,
3766 void *context, int vl, int mode, u64 data)
3767 {
3768 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3769
3770 return dd->sw_ctxt_err_status_cnt[4];
3771 }
3772
3773 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3774 void *context, int vl, int mode,
3775 u64 data)
3776 {
3777 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3778
3779 return dd->sw_ctxt_err_status_cnt[3];
3780 }
3781
3782 static u64 access_pio_write_crosses_boundary_err_cnt(
3783 const struct cntr_entry *entry,
3784 void *context, int vl, int mode, u64 data)
3785 {
3786 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3787
3788 return dd->sw_ctxt_err_status_cnt[2];
3789 }
3790
3791 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3792 void *context, int vl,
3793 int mode, u64 data)
3794 {
3795 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3796
3797 return dd->sw_ctxt_err_status_cnt[1];
3798 }
3799
3800 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3801 void *context, int vl, int mode,
3802 u64 data)
3803 {
3804 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3805
3806 return dd->sw_ctxt_err_status_cnt[0];
3807 }
3808
3809
3810
3811
3812
3813 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3814 const struct cntr_entry *entry,
3815 void *context, int vl, int mode, u64 data)
3816 {
3817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3818
3819 return dd->sw_send_dma_eng_err_status_cnt[23];
3820 }
3821
3822 static u64 access_sdma_header_storage_cor_err_cnt(
3823 const struct cntr_entry *entry,
3824 void *context, int vl, int mode, u64 data)
3825 {
3826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3827
3828 return dd->sw_send_dma_eng_err_status_cnt[22];
3829 }
3830
3831 static u64 access_sdma_packet_tracking_cor_err_cnt(
3832 const struct cntr_entry *entry,
3833 void *context, int vl, int mode, u64 data)
3834 {
3835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3836
3837 return dd->sw_send_dma_eng_err_status_cnt[21];
3838 }
3839
3840 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3841 void *context, int vl, int mode,
3842 u64 data)
3843 {
3844 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3845
3846 return dd->sw_send_dma_eng_err_status_cnt[20];
3847 }
3848
3849 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3850 void *context, int vl, int mode,
3851 u64 data)
3852 {
3853 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3854
3855 return dd->sw_send_dma_eng_err_status_cnt[19];
3856 }
3857
3858 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3859 const struct cntr_entry *entry,
3860 void *context, int vl, int mode, u64 data)
3861 {
3862 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3863
3864 return dd->sw_send_dma_eng_err_status_cnt[18];
3865 }
3866
3867 static u64 access_sdma_header_storage_unc_err_cnt(
3868 const struct cntr_entry *entry,
3869 void *context, int vl, int mode, u64 data)
3870 {
3871 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3872
3873 return dd->sw_send_dma_eng_err_status_cnt[17];
3874 }
3875
3876 static u64 access_sdma_packet_tracking_unc_err_cnt(
3877 const struct cntr_entry *entry,
3878 void *context, int vl, int mode, u64 data)
3879 {
3880 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3881
3882 return dd->sw_send_dma_eng_err_status_cnt[16];
3883 }
3884
3885 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3886 void *context, int vl, int mode,
3887 u64 data)
3888 {
3889 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3890
3891 return dd->sw_send_dma_eng_err_status_cnt[15];
3892 }
3893
3894 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3895 void *context, int vl, int mode,
3896 u64 data)
3897 {
3898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3899
3900 return dd->sw_send_dma_eng_err_status_cnt[14];
3901 }
3902
3903 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3904 void *context, int vl, int mode,
3905 u64 data)
3906 {
3907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3908
3909 return dd->sw_send_dma_eng_err_status_cnt[13];
3910 }
3911
3912 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3913 void *context, int vl, int mode,
3914 u64 data)
3915 {
3916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3917
3918 return dd->sw_send_dma_eng_err_status_cnt[12];
3919 }
3920
3921 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3922 void *context, int vl, int mode,
3923 u64 data)
3924 {
3925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3926
3927 return dd->sw_send_dma_eng_err_status_cnt[11];
3928 }
3929
3930 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3931 void *context, int vl, int mode,
3932 u64 data)
3933 {
3934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3935
3936 return dd->sw_send_dma_eng_err_status_cnt[10];
3937 }
3938
3939 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3940 void *context, int vl, int mode,
3941 u64 data)
3942 {
3943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3944
3945 return dd->sw_send_dma_eng_err_status_cnt[9];
3946 }
3947
3948 static u64 access_sdma_packet_desc_overflow_err_cnt(
3949 const struct cntr_entry *entry,
3950 void *context, int vl, int mode, u64 data)
3951 {
3952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3953
3954 return dd->sw_send_dma_eng_err_status_cnt[8];
3955 }
3956
3957 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3958 void *context, int vl,
3959 int mode, u64 data)
3960 {
3961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3962
3963 return dd->sw_send_dma_eng_err_status_cnt[7];
3964 }
3965
3966 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3967 void *context, int vl, int mode, u64 data)
3968 {
3969 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3970
3971 return dd->sw_send_dma_eng_err_status_cnt[6];
3972 }
3973
3974 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3975 void *context, int vl, int mode,
3976 u64 data)
3977 {
3978 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3979
3980 return dd->sw_send_dma_eng_err_status_cnt[5];
3981 }
3982
3983 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3984 void *context, int vl, int mode,
3985 u64 data)
3986 {
3987 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3988
3989 return dd->sw_send_dma_eng_err_status_cnt[4];
3990 }
3991
3992 static u64 access_sdma_tail_out_of_bounds_err_cnt(
3993 const struct cntr_entry *entry,
3994 void *context, int vl, int mode, u64 data)
3995 {
3996 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3997
3998 return dd->sw_send_dma_eng_err_status_cnt[3];
3999 }
4000
4001 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4002 void *context, int vl, int mode,
4003 u64 data)
4004 {
4005 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4006
4007 return dd->sw_send_dma_eng_err_status_cnt[2];
4008 }
4009
4010 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4011 void *context, int vl, int mode,
4012 u64 data)
4013 {
4014 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4015
4016 return dd->sw_send_dma_eng_err_status_cnt[1];
4017 }
4018
4019 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4020 void *context, int vl, int mode,
4021 u64 data)
4022 {
4023 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4024
4025 return dd->sw_send_dma_eng_err_status_cnt[0];
4026 }
4027
4028 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4029 void *context, int vl, int mode,
4030 u64 data)
4031 {
4032 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4033
4034 u64 val = 0;
4035 u64 csr = entry->csr;
4036
4037 val = read_write_csr(dd, csr, mode, data);
4038 if (mode == CNTR_MODE_R) {
4039 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4040 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4041 } else if (mode == CNTR_MODE_W) {
4042 dd->sw_rcv_bypass_packet_errors = 0;
4043 } else {
4044 dd_dev_err(dd, "Invalid cntr register access mode");
4045 return 0;
4046 }
4047 return val;
4048 }
4049
4050 #define def_access_sw_cpu(cntr) \
4051 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
4052 void *context, int vl, int mode, u64 data) \
4053 { \
4054 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4055 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4056 ppd->ibport_data.rvp.cntr, vl, \
4057 mode, data); \
4058 }
4059
4060 def_access_sw_cpu(rc_acks);
4061 def_access_sw_cpu(rc_qacks);
4062 def_access_sw_cpu(rc_delayed_comp);
4063
4064 #define def_access_ibp_counter(cntr) \
4065 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
4066 void *context, int vl, int mode, u64 data) \
4067 { \
4068 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4069 \
4070 if (vl != CNTR_INVALID_VL) \
4071 return 0; \
4072 \
4073 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
4074 mode, data); \
4075 }
4076
4077 def_access_ibp_counter(loop_pkts);
4078 def_access_ibp_counter(rc_resends);
4079 def_access_ibp_counter(rnr_naks);
4080 def_access_ibp_counter(other_naks);
4081 def_access_ibp_counter(rc_timeouts);
4082 def_access_ibp_counter(pkt_drops);
4083 def_access_ibp_counter(dmawait);
4084 def_access_ibp_counter(rc_seqnak);
4085 def_access_ibp_counter(rc_dupreq);
4086 def_access_ibp_counter(rdma_seq);
4087 def_access_ibp_counter(unaligned);
4088 def_access_ibp_counter(seq_naks);
4089 def_access_ibp_counter(rc_crwaits);
4090
4091 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4092 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4093 [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
4094 [C_RX_SHORT_ERR] = RXE32_DEV_CNTR_ELEM(RxShrErr, RCV_SHORT_ERR_CNT, CNTR_SYNTH),
4095 [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
4096 [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
4097 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4098 CNTR_NORMAL),
4099 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4100 CNTR_NORMAL),
4101 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4102 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4103 CNTR_NORMAL),
4104 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4105 CNTR_NORMAL),
4106 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4107 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4108 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4109 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4110 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4111 CNTR_NORMAL),
4112 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4113 CNTR_NORMAL),
4114 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4115 CNTR_NORMAL),
4116 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4117 CNTR_NORMAL),
4118 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4119 CNTR_NORMAL),
4120 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4121 CNTR_NORMAL),
4122 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4123 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4124 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4125 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4126 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4127 CNTR_SYNTH),
4128 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4129 access_dc_rcv_err_cnt),
4130 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4131 CNTR_SYNTH),
4132 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4133 CNTR_SYNTH),
4134 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4135 CNTR_SYNTH),
4136 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4137 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4138 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4139 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4140 CNTR_SYNTH),
4141 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4142 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4143 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4144 CNTR_SYNTH),
4145 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4146 CNTR_SYNTH),
4147 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4148 CNTR_SYNTH),
4149 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4150 CNTR_SYNTH),
4151 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4152 CNTR_SYNTH),
4153 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4154 CNTR_SYNTH),
4155 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4156 CNTR_SYNTH),
4157 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4158 CNTR_SYNTH | CNTR_VL),
4159 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4160 CNTR_SYNTH | CNTR_VL),
4161 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4162 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4163 CNTR_SYNTH | CNTR_VL),
4164 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4165 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4166 CNTR_SYNTH | CNTR_VL),
4167 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4168 CNTR_SYNTH),
4169 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4170 CNTR_SYNTH | CNTR_VL),
4171 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4172 CNTR_SYNTH),
4173 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4174 CNTR_SYNTH | CNTR_VL),
4175 [C_DC_TOTAL_CRC] =
4176 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4177 CNTR_SYNTH),
4178 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4179 CNTR_SYNTH),
4180 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4181 CNTR_SYNTH),
4182 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4183 CNTR_SYNTH),
4184 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4185 CNTR_SYNTH),
4186 [C_DC_CRC_MULT_LN] =
4187 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4188 CNTR_SYNTH),
4189 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4190 CNTR_SYNTH),
4191 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4192 CNTR_SYNTH),
4193 [C_DC_SEQ_CRC_CNT] =
4194 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4195 CNTR_SYNTH),
4196 [C_DC_ESC0_ONLY_CNT] =
4197 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4198 CNTR_SYNTH),
4199 [C_DC_ESC0_PLUS1_CNT] =
4200 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4201 CNTR_SYNTH),
4202 [C_DC_ESC0_PLUS2_CNT] =
4203 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4204 CNTR_SYNTH),
4205 [C_DC_REINIT_FROM_PEER_CNT] =
4206 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4207 CNTR_SYNTH),
4208 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4209 CNTR_SYNTH),
4210 [C_DC_MISC_FLG_CNT] =
4211 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4212 CNTR_SYNTH),
4213 [C_DC_PRF_GOOD_LTP_CNT] =
4214 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4215 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4216 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4217 CNTR_SYNTH),
4218 [C_DC_PRF_RX_FLIT_CNT] =
4219 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4220 [C_DC_PRF_TX_FLIT_CNT] =
4221 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4222 [C_DC_PRF_CLK_CNTR] =
4223 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4224 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4225 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4226 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4227 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4228 CNTR_SYNTH),
4229 [C_DC_PG_STS_TX_SBE_CNT] =
4230 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4231 [C_DC_PG_STS_TX_MBE_CNT] =
4232 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4233 CNTR_SYNTH),
4234 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4235 access_sw_cpu_intr),
4236 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4237 access_sw_cpu_rcv_limit),
4238 [C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
4239 access_sw_ctx0_seq_drop),
4240 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4241 access_sw_vtx_wait),
4242 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4243 access_sw_pio_wait),
4244 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4245 access_sw_pio_drain),
4246 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4247 access_sw_kmem_wait),
4248 [C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL,
4249 hfi1_access_sw_tid_wait),
4250 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4251 access_sw_send_schedule),
4252 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4253 SEND_DMA_DESC_FETCHED_CNT, 0,
4254 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4255 dev_access_u32_csr),
4256 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4257 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4258 access_sde_int_cnt),
4259 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4260 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4261 access_sde_err_cnt),
4262 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4263 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4264 access_sde_idle_int_cnt),
4265 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4266 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4267 access_sde_progress_int_cnt),
4268
4269 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4270 CNTR_NORMAL,
4271 access_misc_pll_lock_fail_err_cnt),
4272 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4273 CNTR_NORMAL,
4274 access_misc_mbist_fail_err_cnt),
4275 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4276 CNTR_NORMAL,
4277 access_misc_invalid_eep_cmd_err_cnt),
4278 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4279 CNTR_NORMAL,
4280 access_misc_efuse_done_parity_err_cnt),
4281 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4282 CNTR_NORMAL,
4283 access_misc_efuse_write_err_cnt),
4284 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4285 0, CNTR_NORMAL,
4286 access_misc_efuse_read_bad_addr_err_cnt),
4287 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4288 CNTR_NORMAL,
4289 access_misc_efuse_csr_parity_err_cnt),
4290 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4291 CNTR_NORMAL,
4292 access_misc_fw_auth_failed_err_cnt),
4293 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4294 CNTR_NORMAL,
4295 access_misc_key_mismatch_err_cnt),
4296 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4297 CNTR_NORMAL,
4298 access_misc_sbus_write_failed_err_cnt),
4299 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4300 CNTR_NORMAL,
4301 access_misc_csr_write_bad_addr_err_cnt),
4302 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4303 CNTR_NORMAL,
4304 access_misc_csr_read_bad_addr_err_cnt),
4305 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4306 CNTR_NORMAL,
4307 access_misc_csr_parity_err_cnt),
4308
4309 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4310 CNTR_NORMAL,
4311 access_sw_cce_err_status_aggregated_cnt),
4312 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4313 CNTR_NORMAL,
4314 access_cce_msix_csr_parity_err_cnt),
4315 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4316 CNTR_NORMAL,
4317 access_cce_int_map_unc_err_cnt),
4318 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4319 CNTR_NORMAL,
4320 access_cce_int_map_cor_err_cnt),
4321 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4322 CNTR_NORMAL,
4323 access_cce_msix_table_unc_err_cnt),
4324 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4325 CNTR_NORMAL,
4326 access_cce_msix_table_cor_err_cnt),
4327 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4328 0, CNTR_NORMAL,
4329 access_cce_rxdma_conv_fifo_parity_err_cnt),
4330 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4331 0, CNTR_NORMAL,
4332 access_cce_rcpl_async_fifo_parity_err_cnt),
4333 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4334 CNTR_NORMAL,
4335 access_cce_seg_write_bad_addr_err_cnt),
4336 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4337 CNTR_NORMAL,
4338 access_cce_seg_read_bad_addr_err_cnt),
4339 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4340 CNTR_NORMAL,
4341 access_la_triggered_cnt),
4342 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4343 CNTR_NORMAL,
4344 access_cce_trgt_cpl_timeout_err_cnt),
4345 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4346 CNTR_NORMAL,
4347 access_pcic_receive_parity_err_cnt),
4348 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4349 CNTR_NORMAL,
4350 access_pcic_transmit_back_parity_err_cnt),
4351 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4352 0, CNTR_NORMAL,
4353 access_pcic_transmit_front_parity_err_cnt),
4354 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4355 CNTR_NORMAL,
4356 access_pcic_cpl_dat_q_unc_err_cnt),
4357 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4358 CNTR_NORMAL,
4359 access_pcic_cpl_hd_q_unc_err_cnt),
4360 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4361 CNTR_NORMAL,
4362 access_pcic_post_dat_q_unc_err_cnt),
4363 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4364 CNTR_NORMAL,
4365 access_pcic_post_hd_q_unc_err_cnt),
4366 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4367 CNTR_NORMAL,
4368 access_pcic_retry_sot_mem_unc_err_cnt),
4369 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4370 CNTR_NORMAL,
4371 access_pcic_retry_mem_unc_err),
4372 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4373 CNTR_NORMAL,
4374 access_pcic_n_post_dat_q_parity_err_cnt),
4375 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4376 CNTR_NORMAL,
4377 access_pcic_n_post_h_q_parity_err_cnt),
4378 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4379 CNTR_NORMAL,
4380 access_pcic_cpl_dat_q_cor_err_cnt),
4381 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4382 CNTR_NORMAL,
4383 access_pcic_cpl_hd_q_cor_err_cnt),
4384 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4385 CNTR_NORMAL,
4386 access_pcic_post_dat_q_cor_err_cnt),
4387 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4388 CNTR_NORMAL,
4389 access_pcic_post_hd_q_cor_err_cnt),
4390 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4391 CNTR_NORMAL,
4392 access_pcic_retry_sot_mem_cor_err_cnt),
4393 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4394 CNTR_NORMAL,
4395 access_pcic_retry_mem_cor_err_cnt),
4396 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4397 "CceCli1AsyncFifoDbgParityError", 0, 0,
4398 CNTR_NORMAL,
4399 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4400 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4401 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4402 CNTR_NORMAL,
4403 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4404 ),
4405 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4406 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4407 CNTR_NORMAL,
4408 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4409 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4410 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4411 CNTR_NORMAL,
4412 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4413 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4414 0, CNTR_NORMAL,
4415 access_cce_cli2_async_fifo_parity_err_cnt),
4416 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4417 CNTR_NORMAL,
4418 access_cce_csr_cfg_bus_parity_err_cnt),
4419 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4420 0, CNTR_NORMAL,
4421 access_cce_cli0_async_fifo_parity_err_cnt),
4422 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4423 CNTR_NORMAL,
4424 access_cce_rspd_data_parity_err_cnt),
4425 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4426 CNTR_NORMAL,
4427 access_cce_trgt_access_err_cnt),
4428 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4429 0, CNTR_NORMAL,
4430 access_cce_trgt_async_fifo_parity_err_cnt),
4431 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4432 CNTR_NORMAL,
4433 access_cce_csr_write_bad_addr_err_cnt),
4434 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4435 CNTR_NORMAL,
4436 access_cce_csr_read_bad_addr_err_cnt),
4437 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4438 CNTR_NORMAL,
4439 access_ccs_csr_parity_err_cnt),
4440
4441
4442 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4443 CNTR_NORMAL,
4444 access_rx_csr_parity_err_cnt),
4445 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4446 CNTR_NORMAL,
4447 access_rx_csr_write_bad_addr_err_cnt),
4448 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4449 CNTR_NORMAL,
4450 access_rx_csr_read_bad_addr_err_cnt),
4451 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4452 CNTR_NORMAL,
4453 access_rx_dma_csr_unc_err_cnt),
4454 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4455 CNTR_NORMAL,
4456 access_rx_dma_dq_fsm_encoding_err_cnt),
4457 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4458 CNTR_NORMAL,
4459 access_rx_dma_eq_fsm_encoding_err_cnt),
4460 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4461 CNTR_NORMAL,
4462 access_rx_dma_csr_parity_err_cnt),
4463 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4464 CNTR_NORMAL,
4465 access_rx_rbuf_data_cor_err_cnt),
4466 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4467 CNTR_NORMAL,
4468 access_rx_rbuf_data_unc_err_cnt),
4469 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4470 CNTR_NORMAL,
4471 access_rx_dma_data_fifo_rd_cor_err_cnt),
4472 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4473 CNTR_NORMAL,
4474 access_rx_dma_data_fifo_rd_unc_err_cnt),
4475 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4476 CNTR_NORMAL,
4477 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4478 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4479 CNTR_NORMAL,
4480 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4481 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4482 CNTR_NORMAL,
4483 access_rx_rbuf_desc_part2_cor_err_cnt),
4484 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4485 CNTR_NORMAL,
4486 access_rx_rbuf_desc_part2_unc_err_cnt),
4487 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4488 CNTR_NORMAL,
4489 access_rx_rbuf_desc_part1_cor_err_cnt),
4490 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4491 CNTR_NORMAL,
4492 access_rx_rbuf_desc_part1_unc_err_cnt),
4493 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4494 CNTR_NORMAL,
4495 access_rx_hq_intr_fsm_err_cnt),
4496 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4497 CNTR_NORMAL,
4498 access_rx_hq_intr_csr_parity_err_cnt),
4499 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4500 CNTR_NORMAL,
4501 access_rx_lookup_csr_parity_err_cnt),
4502 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4503 CNTR_NORMAL,
4504 access_rx_lookup_rcv_array_cor_err_cnt),
4505 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4506 CNTR_NORMAL,
4507 access_rx_lookup_rcv_array_unc_err_cnt),
4508 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4509 0, CNTR_NORMAL,
4510 access_rx_lookup_des_part2_parity_err_cnt),
4511 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4512 0, CNTR_NORMAL,
4513 access_rx_lookup_des_part1_unc_cor_err_cnt),
4514 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4515 CNTR_NORMAL,
4516 access_rx_lookup_des_part1_unc_err_cnt),
4517 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4518 CNTR_NORMAL,
4519 access_rx_rbuf_next_free_buf_cor_err_cnt),
4520 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4521 CNTR_NORMAL,
4522 access_rx_rbuf_next_free_buf_unc_err_cnt),
4523 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4524 "RxRbufFlInitWrAddrParityErr", 0, 0,
4525 CNTR_NORMAL,
4526 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4527 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4528 0, CNTR_NORMAL,
4529 access_rx_rbuf_fl_initdone_parity_err_cnt),
4530 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4531 0, CNTR_NORMAL,
4532 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4533 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4534 CNTR_NORMAL,
4535 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4536 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4537 CNTR_NORMAL,
4538 access_rx_rbuf_empty_err_cnt),
4539 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4540 CNTR_NORMAL,
4541 access_rx_rbuf_full_err_cnt),
4542 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4543 CNTR_NORMAL,
4544 access_rbuf_bad_lookup_err_cnt),
4545 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4546 CNTR_NORMAL,
4547 access_rbuf_ctx_id_parity_err_cnt),
4548 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4549 CNTR_NORMAL,
4550 access_rbuf_csr_qeopdw_parity_err_cnt),
4551 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4552 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4553 CNTR_NORMAL,
4554 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4555 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4556 "RxRbufCsrQTlPtrParityErr", 0, 0,
4557 CNTR_NORMAL,
4558 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4559 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4560 0, CNTR_NORMAL,
4561 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4562 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4563 0, CNTR_NORMAL,
4564 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4565 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4566 0, 0, CNTR_NORMAL,
4567 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4568 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4569 0, CNTR_NORMAL,
4570 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4571 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4572 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4573 CNTR_NORMAL,
4574 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4575 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4576 0, CNTR_NORMAL,
4577 access_rx_rbuf_block_list_read_cor_err_cnt),
4578 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4579 0, CNTR_NORMAL,
4580 access_rx_rbuf_block_list_read_unc_err_cnt),
4581 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4582 CNTR_NORMAL,
4583 access_rx_rbuf_lookup_des_cor_err_cnt),
4584 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4585 CNTR_NORMAL,
4586 access_rx_rbuf_lookup_des_unc_err_cnt),
4587 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4588 "RxRbufLookupDesRegUncCorErr", 0, 0,
4589 CNTR_NORMAL,
4590 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4591 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4592 CNTR_NORMAL,
4593 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4594 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4595 CNTR_NORMAL,
4596 access_rx_rbuf_free_list_cor_err_cnt),
4597 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4598 CNTR_NORMAL,
4599 access_rx_rbuf_free_list_unc_err_cnt),
4600 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4601 CNTR_NORMAL,
4602 access_rx_rcv_fsm_encoding_err_cnt),
4603 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4604 CNTR_NORMAL,
4605 access_rx_dma_flag_cor_err_cnt),
4606 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4607 CNTR_NORMAL,
4608 access_rx_dma_flag_unc_err_cnt),
4609 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4610 CNTR_NORMAL,
4611 access_rx_dc_sop_eop_parity_err_cnt),
4612 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4613 CNTR_NORMAL,
4614 access_rx_rcv_csr_parity_err_cnt),
4615 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4616 CNTR_NORMAL,
4617 access_rx_rcv_qp_map_table_cor_err_cnt),
4618 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4619 CNTR_NORMAL,
4620 access_rx_rcv_qp_map_table_unc_err_cnt),
4621 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4622 CNTR_NORMAL,
4623 access_rx_rcv_data_cor_err_cnt),
4624 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4625 CNTR_NORMAL,
4626 access_rx_rcv_data_unc_err_cnt),
4627 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4628 CNTR_NORMAL,
4629 access_rx_rcv_hdr_cor_err_cnt),
4630 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4631 CNTR_NORMAL,
4632 access_rx_rcv_hdr_unc_err_cnt),
4633 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4634 CNTR_NORMAL,
4635 access_rx_dc_intf_parity_err_cnt),
4636 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4637 CNTR_NORMAL,
4638 access_rx_dma_csr_cor_err_cnt),
4639
4640 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4641 CNTR_NORMAL,
4642 access_pio_pec_sop_head_parity_err_cnt),
4643 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4644 CNTR_NORMAL,
4645 access_pio_pcc_sop_head_parity_err_cnt),
4646 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4647 0, 0, CNTR_NORMAL,
4648 access_pio_last_returned_cnt_parity_err_cnt),
4649 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4650 0, CNTR_NORMAL,
4651 access_pio_current_free_cnt_parity_err_cnt),
4652 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4653 CNTR_NORMAL,
4654 access_pio_reserved_31_err_cnt),
4655 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4656 CNTR_NORMAL,
4657 access_pio_reserved_30_err_cnt),
4658 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4659 CNTR_NORMAL,
4660 access_pio_ppmc_sop_len_err_cnt),
4661 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4662 CNTR_NORMAL,
4663 access_pio_ppmc_bqc_mem_parity_err_cnt),
4664 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4665 CNTR_NORMAL,
4666 access_pio_vl_fifo_parity_err_cnt),
4667 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4668 CNTR_NORMAL,
4669 access_pio_vlf_sop_parity_err_cnt),
4670 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4671 CNTR_NORMAL,
4672 access_pio_vlf_v1_len_parity_err_cnt),
4673 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4674 CNTR_NORMAL,
4675 access_pio_block_qw_count_parity_err_cnt),
4676 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4677 CNTR_NORMAL,
4678 access_pio_write_qw_valid_parity_err_cnt),
4679 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4680 CNTR_NORMAL,
4681 access_pio_state_machine_err_cnt),
4682 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4683 CNTR_NORMAL,
4684 access_pio_write_data_parity_err_cnt),
4685 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4686 CNTR_NORMAL,
4687 access_pio_host_addr_mem_cor_err_cnt),
4688 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4689 CNTR_NORMAL,
4690 access_pio_host_addr_mem_unc_err_cnt),
4691 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4692 CNTR_NORMAL,
4693 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4694 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4695 CNTR_NORMAL,
4696 access_pio_init_sm_in_err_cnt),
4697 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4698 CNTR_NORMAL,
4699 access_pio_ppmc_pbl_fifo_err_cnt),
4700 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4701 0, CNTR_NORMAL,
4702 access_pio_credit_ret_fifo_parity_err_cnt),
4703 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4704 CNTR_NORMAL,
4705 access_pio_v1_len_mem_bank1_cor_err_cnt),
4706 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4707 CNTR_NORMAL,
4708 access_pio_v1_len_mem_bank0_cor_err_cnt),
4709 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4710 CNTR_NORMAL,
4711 access_pio_v1_len_mem_bank1_unc_err_cnt),
4712 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4713 CNTR_NORMAL,
4714 access_pio_v1_len_mem_bank0_unc_err_cnt),
4715 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4716 CNTR_NORMAL,
4717 access_pio_sm_pkt_reset_parity_err_cnt),
4718 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4719 CNTR_NORMAL,
4720 access_pio_pkt_evict_fifo_parity_err_cnt),
4721 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4722 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4723 CNTR_NORMAL,
4724 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4725 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4726 CNTR_NORMAL,
4727 access_pio_sbrdctl_crrel_parity_err_cnt),
4728 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4729 CNTR_NORMAL,
4730 access_pio_pec_fifo_parity_err_cnt),
4731 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4732 CNTR_NORMAL,
4733 access_pio_pcc_fifo_parity_err_cnt),
4734 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4735 CNTR_NORMAL,
4736 access_pio_sb_mem_fifo1_err_cnt),
4737 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4738 CNTR_NORMAL,
4739 access_pio_sb_mem_fifo0_err_cnt),
4740 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4741 CNTR_NORMAL,
4742 access_pio_csr_parity_err_cnt),
4743 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4744 CNTR_NORMAL,
4745 access_pio_write_addr_parity_err_cnt),
4746 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4747 CNTR_NORMAL,
4748 access_pio_write_bad_ctxt_err_cnt),
4749
4750 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4751 0, CNTR_NORMAL,
4752 access_sdma_pcie_req_tracking_cor_err_cnt),
4753 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4754 0, CNTR_NORMAL,
4755 access_sdma_pcie_req_tracking_unc_err_cnt),
4756 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4757 CNTR_NORMAL,
4758 access_sdma_csr_parity_err_cnt),
4759 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4760 CNTR_NORMAL,
4761 access_sdma_rpy_tag_err_cnt),
4762
4763 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4764 CNTR_NORMAL,
4765 access_tx_read_pio_memory_csr_unc_err_cnt),
4766 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4767 0, CNTR_NORMAL,
4768 access_tx_read_sdma_memory_csr_err_cnt),
4769 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4770 CNTR_NORMAL,
4771 access_tx_egress_fifo_cor_err_cnt),
4772 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4773 CNTR_NORMAL,
4774 access_tx_read_pio_memory_cor_err_cnt),
4775 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4776 CNTR_NORMAL,
4777 access_tx_read_sdma_memory_cor_err_cnt),
4778 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4779 CNTR_NORMAL,
4780 access_tx_sb_hdr_cor_err_cnt),
4781 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4782 CNTR_NORMAL,
4783 access_tx_credit_overrun_err_cnt),
4784 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4785 CNTR_NORMAL,
4786 access_tx_launch_fifo8_cor_err_cnt),
4787 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4788 CNTR_NORMAL,
4789 access_tx_launch_fifo7_cor_err_cnt),
4790 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4791 CNTR_NORMAL,
4792 access_tx_launch_fifo6_cor_err_cnt),
4793 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4794 CNTR_NORMAL,
4795 access_tx_launch_fifo5_cor_err_cnt),
4796 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4797 CNTR_NORMAL,
4798 access_tx_launch_fifo4_cor_err_cnt),
4799 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4800 CNTR_NORMAL,
4801 access_tx_launch_fifo3_cor_err_cnt),
4802 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4803 CNTR_NORMAL,
4804 access_tx_launch_fifo2_cor_err_cnt),
4805 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4806 CNTR_NORMAL,
4807 access_tx_launch_fifo1_cor_err_cnt),
4808 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4809 CNTR_NORMAL,
4810 access_tx_launch_fifo0_cor_err_cnt),
4811 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4812 CNTR_NORMAL,
4813 access_tx_credit_return_vl_err_cnt),
4814 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4815 CNTR_NORMAL,
4816 access_tx_hcrc_insertion_err_cnt),
4817 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4818 CNTR_NORMAL,
4819 access_tx_egress_fifo_unc_err_cnt),
4820 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4821 CNTR_NORMAL,
4822 access_tx_read_pio_memory_unc_err_cnt),
4823 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4824 CNTR_NORMAL,
4825 access_tx_read_sdma_memory_unc_err_cnt),
4826 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4827 CNTR_NORMAL,
4828 access_tx_sb_hdr_unc_err_cnt),
4829 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4830 CNTR_NORMAL,
4831 access_tx_credit_return_partiy_err_cnt),
4832 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4833 0, 0, CNTR_NORMAL,
4834 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4835 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4836 0, 0, CNTR_NORMAL,
4837 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4838 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4839 0, 0, CNTR_NORMAL,
4840 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4841 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4842 0, 0, CNTR_NORMAL,
4843 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4844 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4845 0, 0, CNTR_NORMAL,
4846 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4847 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4848 0, 0, CNTR_NORMAL,
4849 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4850 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4851 0, 0, CNTR_NORMAL,
4852 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4853 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4854 0, 0, CNTR_NORMAL,
4855 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4856 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4857 0, 0, CNTR_NORMAL,
4858 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4859 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4860 0, 0, CNTR_NORMAL,
4861 access_tx_sdma15_disallowed_packet_err_cnt),
4862 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4863 0, 0, CNTR_NORMAL,
4864 access_tx_sdma14_disallowed_packet_err_cnt),
4865 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4866 0, 0, CNTR_NORMAL,
4867 access_tx_sdma13_disallowed_packet_err_cnt),
4868 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4869 0, 0, CNTR_NORMAL,
4870 access_tx_sdma12_disallowed_packet_err_cnt),
4871 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4872 0, 0, CNTR_NORMAL,
4873 access_tx_sdma11_disallowed_packet_err_cnt),
4874 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4875 0, 0, CNTR_NORMAL,
4876 access_tx_sdma10_disallowed_packet_err_cnt),
4877 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4878 0, 0, CNTR_NORMAL,
4879 access_tx_sdma9_disallowed_packet_err_cnt),
4880 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4881 0, 0, CNTR_NORMAL,
4882 access_tx_sdma8_disallowed_packet_err_cnt),
4883 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4884 0, 0, CNTR_NORMAL,
4885 access_tx_sdma7_disallowed_packet_err_cnt),
4886 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4887 0, 0, CNTR_NORMAL,
4888 access_tx_sdma6_disallowed_packet_err_cnt),
4889 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4890 0, 0, CNTR_NORMAL,
4891 access_tx_sdma5_disallowed_packet_err_cnt),
4892 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4893 0, 0, CNTR_NORMAL,
4894 access_tx_sdma4_disallowed_packet_err_cnt),
4895 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4896 0, 0, CNTR_NORMAL,
4897 access_tx_sdma3_disallowed_packet_err_cnt),
4898 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4899 0, 0, CNTR_NORMAL,
4900 access_tx_sdma2_disallowed_packet_err_cnt),
4901 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4902 0, 0, CNTR_NORMAL,
4903 access_tx_sdma1_disallowed_packet_err_cnt),
4904 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4905 0, 0, CNTR_NORMAL,
4906 access_tx_sdma0_disallowed_packet_err_cnt),
4907 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4908 CNTR_NORMAL,
4909 access_tx_config_parity_err_cnt),
4910 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4911 CNTR_NORMAL,
4912 access_tx_sbrd_ctl_csr_parity_err_cnt),
4913 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4914 CNTR_NORMAL,
4915 access_tx_launch_csr_parity_err_cnt),
4916 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4917 CNTR_NORMAL,
4918 access_tx_illegal_vl_err_cnt),
4919 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4920 "TxSbrdCtlStateMachineParityErr", 0, 0,
4921 CNTR_NORMAL,
4922 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4923 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4924 CNTR_NORMAL,
4925 access_egress_reserved_10_err_cnt),
4926 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4927 CNTR_NORMAL,
4928 access_egress_reserved_9_err_cnt),
4929 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4930 0, 0, CNTR_NORMAL,
4931 access_tx_sdma_launch_intf_parity_err_cnt),
4932 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4933 CNTR_NORMAL,
4934 access_tx_pio_launch_intf_parity_err_cnt),
4935 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4936 CNTR_NORMAL,
4937 access_egress_reserved_6_err_cnt),
4938 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4939 CNTR_NORMAL,
4940 access_tx_incorrect_link_state_err_cnt),
4941 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4942 CNTR_NORMAL,
4943 access_tx_linkdown_err_cnt),
4944 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4945 "EgressFifoUnderrunOrParityErr", 0, 0,
4946 CNTR_NORMAL,
4947 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4948 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4949 CNTR_NORMAL,
4950 access_egress_reserved_2_err_cnt),
4951 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4952 CNTR_NORMAL,
4953 access_tx_pkt_integrity_mem_unc_err_cnt),
4954 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4955 CNTR_NORMAL,
4956 access_tx_pkt_integrity_mem_cor_err_cnt),
4957
4958 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4959 CNTR_NORMAL,
4960 access_send_csr_write_bad_addr_err_cnt),
4961 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4962 CNTR_NORMAL,
4963 access_send_csr_read_bad_addr_err_cnt),
4964 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4965 CNTR_NORMAL,
4966 access_send_csr_parity_cnt),
4967
4968 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4969 CNTR_NORMAL,
4970 access_pio_write_out_of_bounds_err_cnt),
4971 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4972 CNTR_NORMAL,
4973 access_pio_write_overflow_err_cnt),
4974 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4975 0, 0, CNTR_NORMAL,
4976 access_pio_write_crosses_boundary_err_cnt),
4977 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4978 CNTR_NORMAL,
4979 access_pio_disallowed_packet_err_cnt),
4980 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4981 CNTR_NORMAL,
4982 access_pio_inconsistent_sop_err_cnt),
4983
4984 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4985 0, 0, CNTR_NORMAL,
4986 access_sdma_header_request_fifo_cor_err_cnt),
4987 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4988 CNTR_NORMAL,
4989 access_sdma_header_storage_cor_err_cnt),
4990 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4991 CNTR_NORMAL,
4992 access_sdma_packet_tracking_cor_err_cnt),
4993 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4994 CNTR_NORMAL,
4995 access_sdma_assembly_cor_err_cnt),
4996 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4997 CNTR_NORMAL,
4998 access_sdma_desc_table_cor_err_cnt),
4999 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5000 0, 0, CNTR_NORMAL,
5001 access_sdma_header_request_fifo_unc_err_cnt),
5002 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5003 CNTR_NORMAL,
5004 access_sdma_header_storage_unc_err_cnt),
5005 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5006 CNTR_NORMAL,
5007 access_sdma_packet_tracking_unc_err_cnt),
5008 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5009 CNTR_NORMAL,
5010 access_sdma_assembly_unc_err_cnt),
5011 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5012 CNTR_NORMAL,
5013 access_sdma_desc_table_unc_err_cnt),
5014 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5015 CNTR_NORMAL,
5016 access_sdma_timeout_err_cnt),
5017 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5018 CNTR_NORMAL,
5019 access_sdma_header_length_err_cnt),
5020 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5021 CNTR_NORMAL,
5022 access_sdma_header_address_err_cnt),
5023 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5024 CNTR_NORMAL,
5025 access_sdma_header_select_err_cnt),
5026 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5027 CNTR_NORMAL,
5028 access_sdma_reserved_9_err_cnt),
5029 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5030 CNTR_NORMAL,
5031 access_sdma_packet_desc_overflow_err_cnt),
5032 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5033 CNTR_NORMAL,
5034 access_sdma_length_mismatch_err_cnt),
5035 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5036 CNTR_NORMAL,
5037 access_sdma_halt_err_cnt),
5038 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5039 CNTR_NORMAL,
5040 access_sdma_mem_read_err_cnt),
5041 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5042 CNTR_NORMAL,
5043 access_sdma_first_desc_err_cnt),
5044 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5045 CNTR_NORMAL,
5046 access_sdma_tail_out_of_bounds_err_cnt),
5047 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5048 CNTR_NORMAL,
5049 access_sdma_too_long_err_cnt),
5050 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5051 CNTR_NORMAL,
5052 access_sdma_gen_mismatch_err_cnt),
5053 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5054 CNTR_NORMAL,
5055 access_sdma_wrong_dw_err_cnt),
5056 };
5057
5058 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5059 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5060 CNTR_NORMAL),
5061 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5062 CNTR_NORMAL),
5063 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5064 CNTR_NORMAL),
5065 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5066 CNTR_NORMAL),
5067 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5068 CNTR_NORMAL),
5069 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5070 CNTR_NORMAL),
5071 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5072 CNTR_NORMAL),
5073 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5074 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5075 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5076 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5077 CNTR_SYNTH | CNTR_VL),
5078 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5079 CNTR_SYNTH | CNTR_VL),
5080 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5081 CNTR_SYNTH | CNTR_VL),
5082 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5083 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5084 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5085 access_sw_link_dn_cnt),
5086 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5087 access_sw_link_up_cnt),
5088 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5089 access_sw_unknown_frame_cnt),
5090 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5091 access_sw_xmit_discards),
5092 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5093 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5094 access_sw_xmit_discards),
5095 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5096 access_xmit_constraint_errs),
5097 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5098 access_rcv_constraint_errs),
5099 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5100 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5101 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5102 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5103 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5104 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5105 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5106 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5107 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5108 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5109 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5110 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5111 [C_SW_IBP_RC_CRWAITS] = SW_IBP_CNTR(RcCrWait, rc_crwaits),
5112 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5113 access_sw_cpu_rc_acks),
5114 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5115 access_sw_cpu_rc_qacks),
5116 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5117 access_sw_cpu_rc_delayed_comp),
5118 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5119 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5120 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5121 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5122 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5123 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5124 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5125 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5126 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5127 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5128 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5129 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5130 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5131 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5132 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5133 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5134 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5135 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5136 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5137 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5138 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5139 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5140 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5141 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5142 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5143 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5144 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5145 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5146 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5147 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5148 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5149 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5150 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5151 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5152 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5153 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5154 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5155 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5156 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5157 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5158 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5159 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5160 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5161 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5162 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5163 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5164 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5165 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5166 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5167 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5168 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5169 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5170 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5171 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5172 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5173 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5174 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5175 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5176 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5177 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5178 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5179 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5180 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5181 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5182 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5183 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5184 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5185 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5186 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5187 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5188 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5189 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5190 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5191 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5192 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5193 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5194 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5195 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5196 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5197 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5198 };
5199
5200
5201
5202
5203 int is_ax(struct hfi1_devdata *dd)
5204 {
5205 u8 chip_rev_minor =
5206 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5207 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5208 return (chip_rev_minor & 0xf0) == 0;
5209 }
5210
5211
5212 int is_bx(struct hfi1_devdata *dd)
5213 {
5214 u8 chip_rev_minor =
5215 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5216 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5217 return (chip_rev_minor & 0xF0) == 0x10;
5218 }
5219
5220
5221 bool is_urg_masked(struct hfi1_ctxtdata *rcd)
5222 {
5223 u64 mask;
5224 u32 is = IS_RCVURGENT_START + rcd->ctxt;
5225 u8 bit = is % 64;
5226
5227 mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64)));
5228 return !(mask & BIT_ULL(bit));
5229 }
5230
5231
5232
5233
5234
5235
5236
5237 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5238 {
5239 char *p = *curp;
5240 int len = *lenp;
5241 int result = 0;
5242 char c;
5243
5244
5245 if (p != buf) {
5246 if (len == 0) {
5247 result = 1;
5248 goto done;
5249 }
5250 *p++ = ',';
5251 len--;
5252 }
5253
5254
5255 while ((c = *s++) != 0) {
5256 if (len == 0) {
5257 result = 1;
5258 goto done;
5259 }
5260 *p++ = c;
5261 len--;
5262 }
5263
5264 done:
5265
5266 *curp = p;
5267 *lenp = len;
5268
5269 return result;
5270 }
5271
5272
5273
5274
5275
5276 static char *flag_string(char *buf, int buf_len, u64 flags,
5277 struct flag_table *table, int table_size)
5278 {
5279 char extra[32];
5280 char *p = buf;
5281 int len = buf_len;
5282 int no_room = 0;
5283 int i;
5284
5285
5286 if (len < 2)
5287 return "";
5288
5289 len--;
5290 for (i = 0; i < table_size; i++) {
5291 if (flags & table[i].flag) {
5292 no_room = append_str(buf, &p, &len, table[i].str);
5293 if (no_room)
5294 break;
5295 flags &= ~table[i].flag;
5296 }
5297 }
5298
5299
5300 if (!no_room && flags) {
5301 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5302 no_room = append_str(buf, &p, &len, extra);
5303 }
5304
5305
5306 if (no_room) {
5307
5308 if (len == 0)
5309 --p;
5310 *p++ = '*';
5311 }
5312
5313
5314 *p = 0;
5315 return buf;
5316 }
5317
5318
5319 static const char * const cce_misc_names[] = {
5320 "CceErrInt",
5321 "RxeErrInt",
5322 "MiscErrInt",
5323 "Reserved3",
5324 "PioErrInt",
5325 "SDmaErrInt",
5326 "EgressErrInt",
5327 "TxeErrInt"
5328 };
5329
5330
5331
5332
5333 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5334 {
5335 if (source < ARRAY_SIZE(cce_misc_names))
5336 strncpy(buf, cce_misc_names[source], bsize);
5337 else
5338 snprintf(buf, bsize, "Reserved%u",
5339 source + IS_GENERAL_ERR_START);
5340
5341 return buf;
5342 }
5343
5344
5345
5346
5347 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5348 {
5349 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5350 return buf;
5351 }
5352
5353
5354
5355
5356 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5357 {
5358 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5359 return buf;
5360 }
5361
5362 static const char * const various_names[] = {
5363 "PbcInt",
5364 "GpioAssertInt",
5365 "Qsfp1Int",
5366 "Qsfp2Int",
5367 "TCritInt"
5368 };
5369
5370
5371
5372
5373 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5374 {
5375 if (source < ARRAY_SIZE(various_names))
5376 strncpy(buf, various_names[source], bsize);
5377 else
5378 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5379 return buf;
5380 }
5381
5382
5383
5384
5385 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5386 {
5387 static const char * const dc_int_names[] = {
5388 "common",
5389 "lcb",
5390 "8051",
5391 "lbm"
5392 };
5393
5394 if (source < ARRAY_SIZE(dc_int_names))
5395 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5396 else
5397 snprintf(buf, bsize, "DCInt%u", source);
5398 return buf;
5399 }
5400
5401 static const char * const sdma_int_names[] = {
5402 "SDmaInt",
5403 "SdmaIdleInt",
5404 "SdmaProgressInt",
5405 };
5406
5407
5408
5409
5410 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5411 {
5412
5413 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5414
5415 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5416
5417 if (likely(what < 3))
5418 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5419 else
5420 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5421 return buf;
5422 }
5423
5424
5425
5426
5427 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5428 {
5429 snprintf(buf, bsize, "RcvAvailInt%u", source);
5430 return buf;
5431 }
5432
5433
5434
5435
5436 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5437 {
5438 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5439 return buf;
5440 }
5441
5442
5443
5444
5445 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5446 {
5447 snprintf(buf, bsize, "SendCreditInt%u", source);
5448 return buf;
5449 }
5450
5451
5452
5453
5454 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5455 {
5456 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5457 return buf;
5458 }
5459
5460 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5461 {
5462 return flag_string(buf, buf_len, flags,
5463 cce_err_status_flags,
5464 ARRAY_SIZE(cce_err_status_flags));
5465 }
5466
5467 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5468 {
5469 return flag_string(buf, buf_len, flags,
5470 rxe_err_status_flags,
5471 ARRAY_SIZE(rxe_err_status_flags));
5472 }
5473
5474 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5475 {
5476 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5477 ARRAY_SIZE(misc_err_status_flags));
5478 }
5479
5480 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5481 {
5482 return flag_string(buf, buf_len, flags,
5483 pio_err_status_flags,
5484 ARRAY_SIZE(pio_err_status_flags));
5485 }
5486
5487 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5488 {
5489 return flag_string(buf, buf_len, flags,
5490 sdma_err_status_flags,
5491 ARRAY_SIZE(sdma_err_status_flags));
5492 }
5493
5494 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5495 {
5496 return flag_string(buf, buf_len, flags,
5497 egress_err_status_flags,
5498 ARRAY_SIZE(egress_err_status_flags));
5499 }
5500
5501 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5502 {
5503 return flag_string(buf, buf_len, flags,
5504 egress_err_info_flags,
5505 ARRAY_SIZE(egress_err_info_flags));
5506 }
5507
5508 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5509 {
5510 return flag_string(buf, buf_len, flags,
5511 send_err_status_flags,
5512 ARRAY_SIZE(send_err_status_flags));
5513 }
5514
5515 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5516 {
5517 char buf[96];
5518 int i = 0;
5519
5520
5521
5522
5523
5524 dd_dev_info(dd, "CCE Error: %s\n",
5525 cce_err_status_string(buf, sizeof(buf), reg));
5526
5527 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5528 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5529
5530
5531 start_freeze_handling(dd->pport, FREEZE_SELF);
5532 }
5533
5534 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5535 if (reg & (1ull << i)) {
5536 incr_cntr64(&dd->cce_err_status_cnt[i]);
5537
5538 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5539 }
5540 }
5541 }
5542
5543
5544
5545
5546
5547 #define RCVERR_CHECK_TIME 10
5548 static void update_rcverr_timer(struct timer_list *t)
5549 {
5550 struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
5551 struct hfi1_pportdata *ppd = dd->pport;
5552 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5553
5554 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5555 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5556 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5557 set_link_down_reason(
5558 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5559 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5560 queue_work(ppd->link_wq, &ppd->link_bounce_work);
5561 }
5562 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5563
5564 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5565 }
5566
5567 static int init_rcverr(struct hfi1_devdata *dd)
5568 {
5569 timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
5570
5571 dd->rcv_ovfl_cnt = 0;
5572 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5573 }
5574
5575 static void free_rcverr(struct hfi1_devdata *dd)
5576 {
5577 if (dd->rcverr_timer.function)
5578 del_timer_sync(&dd->rcverr_timer);
5579 }
5580
5581 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5582 {
5583 char buf[96];
5584 int i = 0;
5585
5586 dd_dev_info(dd, "Receive Error: %s\n",
5587 rxe_err_status_string(buf, sizeof(buf), reg));
5588
5589 if (reg & ALL_RXE_FREEZE_ERR) {
5590 int flags = 0;
5591
5592
5593
5594
5595
5596 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5597 flags = FREEZE_ABORT;
5598
5599 start_freeze_handling(dd->pport, flags);
5600 }
5601
5602 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5603 if (reg & (1ull << i))
5604 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5605 }
5606 }
5607
5608 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5609 {
5610 char buf[96];
5611 int i = 0;
5612
5613 dd_dev_info(dd, "Misc Error: %s",
5614 misc_err_status_string(buf, sizeof(buf), reg));
5615 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5616 if (reg & (1ull << i))
5617 incr_cntr64(&dd->misc_err_status_cnt[i]);
5618 }
5619 }
5620
5621 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5622 {
5623 char buf[96];
5624 int i = 0;
5625
5626 dd_dev_info(dd, "PIO Error: %s\n",
5627 pio_err_status_string(buf, sizeof(buf), reg));
5628
5629 if (reg & ALL_PIO_FREEZE_ERR)
5630 start_freeze_handling(dd->pport, 0);
5631
5632 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5633 if (reg & (1ull << i))
5634 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5635 }
5636 }
5637
5638 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5639 {
5640 char buf[96];
5641 int i = 0;
5642
5643 dd_dev_info(dd, "SDMA Error: %s\n",
5644 sdma_err_status_string(buf, sizeof(buf), reg));
5645
5646 if (reg & ALL_SDMA_FREEZE_ERR)
5647 start_freeze_handling(dd->pport, 0);
5648
5649 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5650 if (reg & (1ull << i))
5651 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5652 }
5653 }
5654
5655 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5656 {
5657 incr_cntr64(&ppd->port_xmit_discards);
5658 }
5659
5660 static void count_port_inactive(struct hfi1_devdata *dd)
5661 {
5662 __count_port_discards(dd->pport);
5663 }
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5675 int vl)
5676 {
5677 struct hfi1_pportdata *ppd = dd->pport;
5678 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE);
5679 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5680 char buf[96];
5681
5682
5683 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5684
5685 dd_dev_info(dd,
5686 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5687 info, egress_err_info_string(buf, sizeof(buf), info), src);
5688
5689
5690 if (info & PORT_DISCARD_EGRESS_ERRS) {
5691 int weight, i;
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707
5708
5709
5710
5711
5712
5713
5714
5715 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5716 for (i = 0; i < weight; i++) {
5717 __count_port_discards(ppd);
5718 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5719 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5720 else if (vl == 15)
5721 incr_cntr64(&ppd->port_xmit_discards_vl
5722 [C_VL_15]);
5723 }
5724 }
5725 }
5726
5727
5728
5729
5730
5731 static inline int port_inactive_err(u64 posn)
5732 {
5733 return (posn >= SEES(TX_LINKDOWN) &&
5734 posn <= SEES(TX_INCORRECT_LINK_STATE));
5735 }
5736
5737
5738
5739
5740
5741 static inline int disallowed_pkt_err(int posn)
5742 {
5743 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5744 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5745 }
5746
5747
5748
5749
5750
5751
5752 static inline int disallowed_pkt_engine(int posn)
5753 {
5754 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5755 }
5756
5757
5758
5759
5760
5761 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5762 {
5763 struct sdma_vl_map *m;
5764 int vl;
5765
5766
5767 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5768 return -1;
5769
5770 rcu_read_lock();
5771 m = rcu_dereference(dd->sdma_map);
5772 vl = m->engine_to_vl[engine];
5773 rcu_read_unlock();
5774
5775 return vl;
5776 }
5777
5778
5779
5780
5781
5782 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5783 {
5784 struct send_context_info *sci;
5785 struct send_context *sc;
5786 int i;
5787
5788 sci = &dd->send_contexts[sw_index];
5789
5790
5791 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5792 return -1;
5793
5794 sc = sci->sc;
5795 if (!sc)
5796 return -1;
5797 if (dd->vld[15].sc == sc)
5798 return 15;
5799 for (i = 0; i < num_vls; i++)
5800 if (dd->vld[i].sc == sc)
5801 return i;
5802
5803 return -1;
5804 }
5805
5806 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5807 {
5808 u64 reg_copy = reg, handled = 0;
5809 char buf[96];
5810 int i = 0;
5811
5812 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5813 start_freeze_handling(dd->pport, 0);
5814 else if (is_ax(dd) &&
5815 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5816 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5817 start_freeze_handling(dd->pport, 0);
5818
5819 while (reg_copy) {
5820 int posn = fls64(reg_copy);
5821
5822 int shift = posn - 1;
5823 u64 mask = 1ULL << shift;
5824
5825 if (port_inactive_err(shift)) {
5826 count_port_inactive(dd);
5827 handled |= mask;
5828 } else if (disallowed_pkt_err(shift)) {
5829 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5830
5831 handle_send_egress_err_info(dd, vl);
5832 handled |= mask;
5833 }
5834 reg_copy &= ~mask;
5835 }
5836
5837 reg &= ~handled;
5838
5839 if (reg)
5840 dd_dev_info(dd, "Egress Error: %s\n",
5841 egress_err_status_string(buf, sizeof(buf), reg));
5842
5843 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5844 if (reg & (1ull << i))
5845 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5846 }
5847 }
5848
5849 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5850 {
5851 char buf[96];
5852 int i = 0;
5853
5854 dd_dev_info(dd, "Send Error: %s\n",
5855 send_err_status_string(buf, sizeof(buf), reg));
5856
5857 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5858 if (reg & (1ull << i))
5859 incr_cntr64(&dd->send_err_status_cnt[i]);
5860 }
5861 }
5862
5863
5864
5865
5866
5867 #define MAX_CLEAR_COUNT 20
5868
5869
5870
5871
5872
5873
5874
5875
5876
5877
5878
5879
5880 static void interrupt_clear_down(struct hfi1_devdata *dd,
5881 u32 context,
5882 const struct err_reg_info *eri)
5883 {
5884 u64 reg;
5885 u32 count;
5886
5887
5888 count = 0;
5889 while (1) {
5890 reg = read_kctxt_csr(dd, context, eri->status);
5891 if (reg == 0)
5892 break;
5893 write_kctxt_csr(dd, context, eri->clear, reg);
5894 if (likely(eri->handler))
5895 eri->handler(dd, context, reg);
5896 count++;
5897 if (count > MAX_CLEAR_COUNT) {
5898 u64 mask;
5899
5900 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5901 eri->desc, reg);
5902
5903
5904
5905
5906 mask = read_kctxt_csr(dd, context, eri->mask);
5907 mask &= ~reg;
5908 write_kctxt_csr(dd, context, eri->mask, mask);
5909 break;
5910 }
5911 }
5912 }
5913
5914
5915
5916
5917 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5918 {
5919 const struct err_reg_info *eri = &misc_errs[source];
5920
5921 if (eri->handler) {
5922 interrupt_clear_down(dd, 0, eri);
5923 } else {
5924 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5925 source);
5926 }
5927 }
5928
5929 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5930 {
5931 return flag_string(buf, buf_len, flags,
5932 sc_err_status_flags,
5933 ARRAY_SIZE(sc_err_status_flags));
5934 }
5935
5936
5937
5938
5939
5940
5941
5942
5943
5944
5945 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5946 unsigned int hw_context)
5947 {
5948 struct send_context_info *sci;
5949 struct send_context *sc;
5950 char flags[96];
5951 u64 status;
5952 u32 sw_index;
5953 int i = 0;
5954 unsigned long irq_flags;
5955
5956 sw_index = dd->hw_to_sw[hw_context];
5957 if (sw_index >= dd->num_send_contexts) {
5958 dd_dev_err(dd,
5959 "out of range sw index %u for send context %u\n",
5960 sw_index, hw_context);
5961 return;
5962 }
5963 sci = &dd->send_contexts[sw_index];
5964 spin_lock_irqsave(&dd->sc_lock, irq_flags);
5965 sc = sci->sc;
5966 if (!sc) {
5967 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5968 sw_index, hw_context);
5969 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5970 return;
5971 }
5972
5973
5974 sc_stop(sc, SCF_HALTED);
5975
5976 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5977
5978 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5979 send_context_err_status_string(flags, sizeof(flags),
5980 status));
5981
5982 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5983 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5984
5985
5986
5987
5988
5989 if (sc->type != SC_USER)
5990 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5991 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5992
5993
5994
5995
5996
5997
5998 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5999 if (status & (1ull << i))
6000 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
6001 }
6002 }
6003
6004 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
6005 unsigned int source, u64 status)
6006 {
6007 struct sdma_engine *sde;
6008 int i = 0;
6009
6010 sde = &dd->per_sdma[source];
6011 #ifdef CONFIG_SDMA_VERBOSITY
6012 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6013 slashstrip(__FILE__), __LINE__, __func__);
6014 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6015 sde->this_idx, source, (unsigned long long)status);
6016 #endif
6017 sde->err_cnt++;
6018 sdma_engine_error(sde, status);
6019
6020
6021
6022
6023
6024
6025 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6026 if (status & (1ull << i))
6027 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6028 }
6029 }
6030
6031
6032
6033
6034 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6035 {
6036 #ifdef CONFIG_SDMA_VERBOSITY
6037 struct sdma_engine *sde = &dd->per_sdma[source];
6038
6039 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6040 slashstrip(__FILE__), __LINE__, __func__);
6041 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6042 source);
6043 sdma_dumpstate(sde);
6044 #endif
6045 interrupt_clear_down(dd, source, &sdma_eng_err);
6046 }
6047
6048
6049
6050
6051 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6052 {
6053 const struct err_reg_info *eri = &various_err[source];
6054
6055
6056
6057
6058
6059
6060 if (source == TCRIT_INT_SOURCE)
6061 handle_temp_err(dd);
6062 else if (eri->handler)
6063 interrupt_clear_down(dd, 0, eri);
6064 else
6065 dd_dev_info(dd,
6066 "%s: Unimplemented/reserved interrupt %d\n",
6067 __func__, source);
6068 }
6069
6070 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6071 {
6072
6073 struct hfi1_pportdata *ppd = dd->pport;
6074 unsigned long flags;
6075 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6076
6077 if (reg & QSFP_HFI0_MODPRST_N) {
6078 if (!qsfp_mod_present(ppd)) {
6079 dd_dev_info(dd, "%s: QSFP module removed\n",
6080 __func__);
6081
6082 ppd->driver_link_ready = 0;
6083
6084
6085
6086
6087
6088 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6089
6090
6091
6092
6093 ppd->qsfp_info.cache_valid = 0;
6094 ppd->qsfp_info.reset_needed = 0;
6095 ppd->qsfp_info.limiting_active = 0;
6096 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6097 flags);
6098
6099 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6100 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6101
6102 if ((ppd->offline_disabled_reason >
6103 HFI1_ODR_MASK(
6104 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6105 (ppd->offline_disabled_reason ==
6106 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6107 ppd->offline_disabled_reason =
6108 HFI1_ODR_MASK(
6109 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6110
6111 if (ppd->host_link_state == HLS_DN_POLL) {
6112
6113
6114
6115
6116
6117
6118 queue_work(ppd->link_wq, &ppd->link_down_work);
6119 }
6120 } else {
6121 dd_dev_info(dd, "%s: QSFP module inserted\n",
6122 __func__);
6123
6124 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6125 ppd->qsfp_info.cache_valid = 0;
6126 ppd->qsfp_info.cache_refresh_required = 1;
6127 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6128 flags);
6129
6130
6131
6132
6133
6134 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6135 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6136 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6137
6138 ppd->offline_disabled_reason =
6139 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6140 }
6141 }
6142
6143 if (reg & QSFP_HFI0_INT_N) {
6144 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6145 __func__);
6146 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6147 ppd->qsfp_info.check_interrupt_flags = 1;
6148 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6149 }
6150
6151
6152 if (qsfp_mod_present(ppd))
6153 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6154 }
6155
6156 static int request_host_lcb_access(struct hfi1_devdata *dd)
6157 {
6158 int ret;
6159
6160 ret = do_8051_command(dd, HCMD_MISC,
6161 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6162 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6163 if (ret != HCMD_SUCCESS) {
6164 dd_dev_err(dd, "%s: command failed with error %d\n",
6165 __func__, ret);
6166 }
6167 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6168 }
6169
6170 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6171 {
6172 int ret;
6173
6174 ret = do_8051_command(dd, HCMD_MISC,
6175 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6176 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6177 if (ret != HCMD_SUCCESS) {
6178 dd_dev_err(dd, "%s: command failed with error %d\n",
6179 __func__, ret);
6180 }
6181 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6182 }
6183
6184
6185
6186
6187
6188 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6189 {
6190 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6191 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6192 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6193 }
6194
6195
6196
6197
6198
6199 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6200 {
6201 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6202 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6203 }
6204
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214
6215 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6216 {
6217 struct hfi1_pportdata *ppd = dd->pport;
6218 int ret = 0;
6219
6220
6221
6222
6223
6224
6225
6226 if (sleep_ok) {
6227 mutex_lock(&ppd->hls_lock);
6228 } else {
6229 while (!mutex_trylock(&ppd->hls_lock))
6230 udelay(1);
6231 }
6232
6233
6234 if (ppd->host_link_state & HLS_DOWN) {
6235 dd_dev_info(dd, "%s: link state %s not up\n",
6236 __func__, link_state_name(ppd->host_link_state));
6237 ret = -EBUSY;
6238 goto done;
6239 }
6240
6241 if (dd->lcb_access_count == 0) {
6242 ret = request_host_lcb_access(dd);
6243 if (ret) {
6244 dd_dev_err(dd,
6245 "%s: unable to acquire LCB access, err %d\n",
6246 __func__, ret);
6247 goto done;
6248 }
6249 set_host_lcb_access(dd);
6250 }
6251 dd->lcb_access_count++;
6252 done:
6253 mutex_unlock(&ppd->hls_lock);
6254 return ret;
6255 }
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6266 {
6267 int ret = 0;
6268
6269
6270
6271
6272
6273
6274 if (sleep_ok) {
6275 mutex_lock(&dd->pport->hls_lock);
6276 } else {
6277 while (!mutex_trylock(&dd->pport->hls_lock))
6278 udelay(1);
6279 }
6280
6281 if (dd->lcb_access_count == 0) {
6282 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6283 __func__);
6284 goto done;
6285 }
6286
6287 if (dd->lcb_access_count == 1) {
6288 set_8051_lcb_access(dd);
6289 ret = request_8051_lcb_access(dd);
6290 if (ret) {
6291 dd_dev_err(dd,
6292 "%s: unable to release LCB access, err %d\n",
6293 __func__, ret);
6294
6295 set_host_lcb_access(dd);
6296 goto done;
6297 }
6298 }
6299 dd->lcb_access_count--;
6300 done:
6301 mutex_unlock(&dd->pport->hls_lock);
6302 return ret;
6303 }
6304
6305
6306
6307
6308
6309
6310
6311
6312
6313
6314 static void init_lcb_access(struct hfi1_devdata *dd)
6315 {
6316 dd->lcb_access_count = 0;
6317 }
6318
6319
6320
6321
6322 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6323 {
6324 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6325 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6326 (u64)return_code <<
6327 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6328 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6329 }
6330
6331
6332
6333
6334 static void handle_8051_request(struct hfi1_pportdata *ppd)
6335 {
6336 struct hfi1_devdata *dd = ppd->dd;
6337 u64 reg;
6338 u16 data = 0;
6339 u8 type;
6340
6341 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6342 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6343 return;
6344
6345
6346 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6347
6348
6349 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6350 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6351 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6352 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6353
6354 switch (type) {
6355 case HREQ_LOAD_CONFIG:
6356 case HREQ_SAVE_CONFIG:
6357 case HREQ_READ_CONFIG:
6358 case HREQ_SET_TX_EQ_ABS:
6359 case HREQ_SET_TX_EQ_REL:
6360 case HREQ_ENABLE:
6361 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6362 type);
6363 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6364 break;
6365 case HREQ_LCB_RESET:
6366
6367 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
6368
6369 (void)read_csr(dd, DCC_CFG_RESET);
6370
6371 udelay(1);
6372
6373 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6374 hreq_response(dd, HREQ_SUCCESS, 0);
6375
6376 break;
6377 case HREQ_CONFIG_DONE:
6378 hreq_response(dd, HREQ_SUCCESS, 0);
6379 break;
6380
6381 case HREQ_INTERFACE_TEST:
6382 hreq_response(dd, HREQ_SUCCESS, data);
6383 break;
6384 default:
6385 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6386 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6387 break;
6388 }
6389 }
6390
6391
6392
6393
6394 void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6395 {
6396 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6397
6398
6399 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6400 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6401 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6402 }
6403
6404
6405
6406
6407
6408
6409 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6410 {
6411 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6412
6413
6414 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6415 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6416
6417
6418
6419
6420
6421 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6422 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6423
6424 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6425 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6426 }
6427
6428
6429
6430
6431
6432 void reset_link_credits(struct hfi1_devdata *dd)
6433 {
6434 int i;
6435
6436
6437 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6438 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6439 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6440 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6441
6442 pio_send_control(dd, PSC_CM_RESET);
6443
6444 dd->vl15buf_cached = 0;
6445 }
6446
6447
6448 static u32 vcu_to_cu(u8 vcu)
6449 {
6450 return 1 << vcu;
6451 }
6452
6453
6454 static u8 cu_to_vcu(u32 cu)
6455 {
6456 return ilog2(cu);
6457 }
6458
6459
6460 static u32 vau_to_au(u8 vau)
6461 {
6462 return 8 * (1 << vau);
6463 }
6464
6465 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6466 {
6467 ppd->sm_trap_qp = 0x0;
6468 ppd->sa_qp = 0x1;
6469 }
6470
6471
6472
6473
6474 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6475 {
6476 u64 reg;
6477
6478
6479 write_csr(dd, DC_LCB_CFG_RUN, 0);
6480
6481 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6482 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6483
6484 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6485 reg = read_csr(dd, DCC_CFG_RESET);
6486 write_csr(dd, DCC_CFG_RESET, reg |
6487 DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
6488 (void)read_csr(dd, DCC_CFG_RESET);
6489 if (!abort) {
6490 udelay(1);
6491 write_csr(dd, DCC_CFG_RESET, reg);
6492 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6493 }
6494 }
6495
6496
6497
6498
6499
6500
6501
6502
6503
6504
6505
6506 static void _dc_shutdown(struct hfi1_devdata *dd)
6507 {
6508 lockdep_assert_held(&dd->dc8051_lock);
6509
6510 if (dd->dc_shutdown)
6511 return;
6512
6513 dd->dc_shutdown = 1;
6514
6515 lcb_shutdown(dd, 1);
6516
6517
6518
6519
6520
6521 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6522 }
6523
6524 static void dc_shutdown(struct hfi1_devdata *dd)
6525 {
6526 mutex_lock(&dd->dc8051_lock);
6527 _dc_shutdown(dd);
6528 mutex_unlock(&dd->dc8051_lock);
6529 }
6530
6531
6532
6533
6534
6535
6536
6537 static void _dc_start(struct hfi1_devdata *dd)
6538 {
6539 lockdep_assert_held(&dd->dc8051_lock);
6540
6541 if (!dd->dc_shutdown)
6542 return;
6543
6544
6545 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6546
6547 if (wait_fm_ready(dd, TIMEOUT_8051_START))
6548 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6549 __func__);
6550
6551
6552 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6553
6554 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6555 dd->dc_shutdown = 0;
6556 }
6557
6558 static void dc_start(struct hfi1_devdata *dd)
6559 {
6560 mutex_lock(&dd->dc8051_lock);
6561 _dc_start(dd);
6562 mutex_unlock(&dd->dc8051_lock);
6563 }
6564
6565
6566
6567
6568 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6569 {
6570 u64 rx_radr, tx_radr;
6571 u32 version;
6572
6573 if (dd->icode != ICODE_FPGA_EMULATION)
6574 return;
6575
6576
6577
6578
6579
6580
6581
6582
6583 if (is_emulator_s(dd))
6584 return;
6585
6586
6587 version = emulator_rev(dd);
6588 if (!is_ax(dd))
6589 version = 0x2d;
6590
6591 if (version <= 0x12) {
6592
6593
6594
6595
6596
6597
6598
6599 rx_radr =
6600 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6601 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6602 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6603
6604
6605
6606
6607 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6608 } else if (version <= 0x18) {
6609
6610
6611 rx_radr =
6612 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6613 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6614 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6615 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6616 } else if (version == 0x19) {
6617
6618
6619 rx_radr =
6620 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6621 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6622 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6623 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6624 } else if (version == 0x1a) {
6625
6626
6627 rx_radr =
6628 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6629 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6630 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6631 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6632 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6633 } else {
6634
6635
6636 rx_radr =
6637 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6638 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6639 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6640 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6641 }
6642
6643 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6644
6645 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6646 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6647 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6648 }
6649
6650
6651
6652
6653
6654
6655 void handle_sma_message(struct work_struct *work)
6656 {
6657 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6658 sma_message_work);
6659 struct hfi1_devdata *dd = ppd->dd;
6660 u64 msg;
6661 int ret;
6662
6663
6664
6665
6666
6667 ret = read_idle_sma(dd, &msg);
6668 if (ret)
6669 return;
6670 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6671
6672
6673
6674 switch (msg & 0xff) {
6675 case SMA_IDLE_ARM:
6676
6677
6678
6679
6680
6681
6682 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6683 ppd->neighbor_normal = 1;
6684 break;
6685 case SMA_IDLE_ACTIVE:
6686
6687
6688
6689
6690
6691
6692 if (ppd->host_link_state == HLS_UP_ARMED &&
6693 ppd->is_active_optimize_enabled) {
6694 ppd->neighbor_normal = 1;
6695 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6696 if (ret)
6697 dd_dev_err(
6698 dd,
6699 "%s: received Active SMA idle message, couldn't set link to Active\n",
6700 __func__);
6701 }
6702 break;
6703 default:
6704 dd_dev_err(dd,
6705 "%s: received unexpected SMA idle message 0x%llx\n",
6706 __func__, msg);
6707 break;
6708 }
6709 }
6710
6711 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6712 {
6713 u64 rcvctrl;
6714 unsigned long flags;
6715
6716 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6717 rcvctrl = read_csr(dd, RCV_CTRL);
6718 rcvctrl |= add;
6719 rcvctrl &= ~clear;
6720 write_csr(dd, RCV_CTRL, rcvctrl);
6721 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6722 }
6723
6724 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6725 {
6726 adjust_rcvctrl(dd, add, 0);
6727 }
6728
6729 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6730 {
6731 adjust_rcvctrl(dd, 0, clear);
6732 }
6733
6734
6735
6736
6737 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6738 {
6739 struct hfi1_devdata *dd = ppd->dd;
6740 struct send_context *sc;
6741 int i;
6742 int sc_flags;
6743
6744 if (flags & FREEZE_SELF)
6745 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6746
6747
6748 dd->flags |= HFI1_FROZEN;
6749
6750
6751 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6752
6753 sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6754 SCF_LINK_DOWN : 0);
6755
6756 for (i = 0; i < dd->num_send_contexts; i++) {
6757 sc = dd->send_contexts[i].sc;
6758 if (sc && (sc->flags & SCF_ENABLED))
6759 sc_stop(sc, sc_flags);
6760 }
6761
6762
6763 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6764
6765 if (flags & FREEZE_ABORT) {
6766 dd_dev_err(dd,
6767 "Aborted freeze recovery. Please REBOOT system\n");
6768 return;
6769 }
6770
6771 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6772 }
6773
6774
6775
6776
6777
6778
6779
6780
6781 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6782 {
6783 unsigned long timeout;
6784 u64 reg;
6785
6786 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6787 while (1) {
6788 reg = read_csr(dd, CCE_STATUS);
6789 if (freeze) {
6790
6791 if ((reg & ALL_FROZE) == ALL_FROZE)
6792 return;
6793 } else {
6794
6795 if ((reg & ALL_FROZE) == 0)
6796 return;
6797 }
6798
6799 if (time_after(jiffies, timeout)) {
6800 dd_dev_err(dd,
6801 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6802 freeze ? "" : "un", reg & ALL_FROZE,
6803 freeze ? ALL_FROZE : 0ull);
6804 return;
6805 }
6806 usleep_range(80, 120);
6807 }
6808 }
6809
6810
6811
6812
6813 static void rxe_freeze(struct hfi1_devdata *dd)
6814 {
6815 int i;
6816 struct hfi1_ctxtdata *rcd;
6817
6818
6819 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6820
6821
6822 for (i = 0; i < dd->num_rcv_contexts; i++) {
6823 rcd = hfi1_rcd_get_by_index(dd, i);
6824 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6825 hfi1_rcd_put(rcd);
6826 }
6827 }
6828
6829
6830
6831
6832
6833
6834
6835 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6836 {
6837 u32 rcvmask;
6838 u16 i;
6839 struct hfi1_ctxtdata *rcd;
6840
6841
6842 for (i = 0; i < dd->num_rcv_contexts; i++) {
6843 rcd = hfi1_rcd_get_by_index(dd, i);
6844
6845
6846 if (!rcd ||
6847 (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
6848 hfi1_rcd_put(rcd);
6849 continue;
6850 }
6851 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6852
6853 rcvmask |= hfi1_rcvhdrtail_kvaddr(rcd) ?
6854 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6855 hfi1_rcvctrl(dd, rcvmask, rcd);
6856 hfi1_rcd_put(rcd);
6857 }
6858
6859
6860 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6861 }
6862
6863
6864
6865
6866
6867
6868 void handle_freeze(struct work_struct *work)
6869 {
6870 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6871 freeze_work);
6872 struct hfi1_devdata *dd = ppd->dd;
6873
6874
6875 wait_for_freeze_status(dd, 1);
6876
6877
6878
6879
6880 pio_freeze(dd);
6881
6882
6883 sdma_freeze(dd);
6884
6885
6886
6887
6888 rxe_freeze(dd);
6889
6890
6891
6892
6893
6894 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6895 wait_for_freeze_status(dd, 0);
6896
6897 if (is_ax(dd)) {
6898 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6899 wait_for_freeze_status(dd, 1);
6900 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6901 wait_for_freeze_status(dd, 0);
6902 }
6903
6904
6905 pio_kernel_unfreeze(dd);
6906
6907
6908 sdma_unfreeze(dd);
6909
6910
6911
6912
6913 rxe_kernel_unfreeze(dd);
6914
6915
6916
6917
6918
6919
6920
6921
6922
6923
6924
6925
6926
6927
6928 dd->flags &= ~HFI1_FROZEN;
6929 wake_up(&dd->event_queue);
6930
6931
6932 }
6933
6934
6935
6936
6937
6938
6939
6940
6941
6942
6943 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
6944 {
6945 int i;
6946 u16 tx_width;
6947 u16 link_speed;
6948
6949 tx_width = tx_link_width(link_width);
6950 link_speed = get_link_speed(ppd->link_speed_active);
6951
6952
6953
6954
6955
6956 for (i = 0; i < C_VL_COUNT + 1; i++)
6957 get_xmit_wait_counters(ppd, tx_width, link_speed, i);
6958 }
6959
6960
6961
6962
6963
6964
6965 void handle_link_up(struct work_struct *work)
6966 {
6967 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6968 link_up_work);
6969 struct hfi1_devdata *dd = ppd->dd;
6970
6971 set_link_state(ppd, HLS_UP_INIT);
6972
6973
6974 read_ltp_rtt(dd);
6975
6976
6977
6978
6979 clear_linkup_counters(dd);
6980
6981
6982
6983 set_linkup_defaults(ppd);
6984
6985
6986
6987
6988
6989
6990
6991 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6992 set_up_vl15(dd, dd->vl15buf_cached);
6993
6994
6995 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6996
6997 dd_dev_err(dd,
6998 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6999 ppd->link_speed_active, ppd->link_speed_enabled);
7000 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
7001 OPA_LINKDOWN_REASON_SPEED_POLICY);
7002 set_link_state(ppd, HLS_DN_OFFLINE);
7003 start_link(ppd);
7004 }
7005 }
7006
7007
7008
7009
7010
7011 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
7012 {
7013 ppd->neighbor_guid = 0;
7014 ppd->neighbor_port_number = 0;
7015 ppd->neighbor_type = 0;
7016 ppd->neighbor_fm_security = 0;
7017 }
7018
7019 static const char * const link_down_reason_strs[] = {
7020 [OPA_LINKDOWN_REASON_NONE] = "None",
7021 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
7022 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
7023 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
7024 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
7025 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
7026 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
7027 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
7028 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
7029 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
7030 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
7031 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7032 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7033 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7034 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7035 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7036 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7037 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7038 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7039 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7040 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7041 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7042 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7043 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7044 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7045 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7046 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7047 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7048 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7049 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7050 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7051 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7052 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7053 "Excessive buffer overrun",
7054 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7055 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7056 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7057 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7058 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7059 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7060 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7061 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7062 "Local media not installed",
7063 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7064 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7065 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7066 "End to end not installed",
7067 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7068 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7069 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7070 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7071 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7072 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7073 };
7074
7075
7076 static const char *link_down_reason_str(u8 reason)
7077 {
7078 const char *str = NULL;
7079
7080 if (reason < ARRAY_SIZE(link_down_reason_strs))
7081 str = link_down_reason_strs[reason];
7082 if (!str)
7083 str = "(invalid)";
7084
7085 return str;
7086 }
7087
7088
7089
7090
7091
7092
7093 void handle_link_down(struct work_struct *work)
7094 {
7095 u8 lcl_reason, neigh_reason = 0;
7096 u8 link_down_reason;
7097 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7098 link_down_work);
7099 int was_up;
7100 static const char ldr_str[] = "Link down reason: ";
7101
7102 if ((ppd->host_link_state &
7103 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7104 ppd->port_type == PORT_TYPE_FIXED)
7105 ppd->offline_disabled_reason =
7106 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7107
7108
7109 was_up = !!(ppd->host_link_state & HLS_UP);
7110 set_link_state(ppd, HLS_DN_OFFLINE);
7111 xchg(&ppd->is_link_down_queued, 0);
7112
7113 if (was_up) {
7114 lcl_reason = 0;
7115
7116 read_link_down_reason(ppd->dd, &link_down_reason);
7117 switch (link_down_reason) {
7118 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7119
7120 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7121 ldr_str);
7122 break;
7123 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7124
7125
7126
7127
7128 read_planned_down_reason_code(ppd->dd, &neigh_reason);
7129 dd_dev_info(ppd->dd,
7130 "%sNeighbor link down message %d, %s\n",
7131 ldr_str, neigh_reason,
7132 link_down_reason_str(neigh_reason));
7133 break;
7134 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7135 dd_dev_info(ppd->dd,
7136 "%sHost requested link to go offline\n",
7137 ldr_str);
7138 break;
7139 default:
7140 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7141 ldr_str, link_down_reason);
7142 break;
7143 }
7144
7145
7146
7147
7148
7149 if (neigh_reason == 0)
7150 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7151 } else {
7152
7153 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7154 }
7155
7156 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7157
7158
7159 if (was_up && ppd->local_link_down_reason.sma == 0 &&
7160 ppd->neigh_link_down_reason.sma == 0) {
7161 ppd->local_link_down_reason.sma =
7162 ppd->local_link_down_reason.latest;
7163 ppd->neigh_link_down_reason.sma =
7164 ppd->neigh_link_down_reason.latest;
7165 }
7166
7167 reset_neighbor_info(ppd);
7168
7169
7170 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7171
7172
7173
7174
7175
7176 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7177 dc_shutdown(ppd->dd);
7178 else
7179 start_link(ppd);
7180 }
7181
7182 void handle_link_bounce(struct work_struct *work)
7183 {
7184 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7185 link_bounce_work);
7186
7187
7188
7189
7190 if (ppd->host_link_state & HLS_UP) {
7191 set_link_state(ppd, HLS_DN_OFFLINE);
7192 start_link(ppd);
7193 } else {
7194 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7195 __func__, link_state_name(ppd->host_link_state));
7196 }
7197 }
7198
7199
7200
7201
7202
7203 static int cap_to_port_ltp(int cap)
7204 {
7205 int port_ltp = PORT_LTP_CRC_MODE_16;
7206
7207 if (cap & CAP_CRC_14B)
7208 port_ltp |= PORT_LTP_CRC_MODE_14;
7209 if (cap & CAP_CRC_48B)
7210 port_ltp |= PORT_LTP_CRC_MODE_48;
7211 if (cap & CAP_CRC_12B_16B_PER_LANE)
7212 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7213
7214 return port_ltp;
7215 }
7216
7217
7218
7219
7220 int port_ltp_to_cap(int port_ltp)
7221 {
7222 int cap_mask = 0;
7223
7224 if (port_ltp & PORT_LTP_CRC_MODE_14)
7225 cap_mask |= CAP_CRC_14B;
7226 if (port_ltp & PORT_LTP_CRC_MODE_48)
7227 cap_mask |= CAP_CRC_48B;
7228 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7229 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7230
7231 return cap_mask;
7232 }
7233
7234
7235
7236
7237 static int lcb_to_port_ltp(int lcb_crc)
7238 {
7239 int port_ltp = 0;
7240
7241 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7242 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7243 else if (lcb_crc == LCB_CRC_48B)
7244 port_ltp = PORT_LTP_CRC_MODE_48;
7245 else if (lcb_crc == LCB_CRC_14B)
7246 port_ltp = PORT_LTP_CRC_MODE_14;
7247 else
7248 port_ltp = PORT_LTP_CRC_MODE_16;
7249
7250 return port_ltp;
7251 }
7252
7253 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7254 {
7255 if (ppd->pkeys[2] != 0) {
7256 ppd->pkeys[2] = 0;
7257 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7258 hfi1_event_pkey_change(ppd->dd, ppd->port);
7259 }
7260 }
7261
7262
7263
7264
7265 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7266 {
7267 switch (width) {
7268 case 0:
7269
7270
7271
7272
7273 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7274 return OPA_LINK_WIDTH_4X;
7275 return 0;
7276 case 1: return OPA_LINK_WIDTH_1X;
7277 case 2: return OPA_LINK_WIDTH_2X;
7278 case 3: return OPA_LINK_WIDTH_3X;
7279 case 4: return OPA_LINK_WIDTH_4X;
7280 default:
7281 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7282 __func__, width);
7283 return OPA_LINK_WIDTH_4X;
7284 }
7285 }
7286
7287
7288
7289
7290 static const u8 bit_counts[16] = {
7291 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7292 };
7293
7294 static inline u8 nibble_to_count(u8 nibble)
7295 {
7296 return bit_counts[nibble & 0xf];
7297 }
7298
7299
7300
7301
7302
7303
7304
7305
7306
7307 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7308 u16 *rx_width)
7309 {
7310 u16 tx, rx;
7311 u8 enable_lane_rx;
7312 u8 enable_lane_tx;
7313 u8 tx_polarity_inversion;
7314 u8 rx_polarity_inversion;
7315 u8 max_rate;
7316
7317
7318 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7319 &rx_polarity_inversion, &max_rate);
7320 read_local_lni(dd, &enable_lane_rx);
7321
7322
7323 tx = nibble_to_count(enable_lane_tx);
7324 rx = nibble_to_count(enable_lane_rx);
7325
7326
7327
7328
7329
7330
7331 if ((dd->icode == ICODE_RTL_SILICON) &&
7332 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7333
7334 switch (max_rate) {
7335 case 0:
7336 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7337 break;
7338 case 1:
7339 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7340 break;
7341 default:
7342 dd_dev_err(dd,
7343 "%s: unexpected max rate %d, using 25Gb\n",
7344 __func__, (int)max_rate);
7345 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7346 break;
7347 }
7348 }
7349
7350 dd_dev_info(dd,
7351 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7352 enable_lane_tx, tx, enable_lane_rx, rx);
7353 *tx_width = link_width_to_bits(dd, tx);
7354 *rx_width = link_width_to_bits(dd, rx);
7355 }
7356
7357
7358
7359
7360
7361
7362
7363
7364
7365
7366
7367
7368
7369
7370 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7371 u16 *rx_width)
7372 {
7373 u16 widths, tx, rx;
7374 u8 misc_bits, local_flags;
7375 u16 active_tx, active_rx;
7376
7377 read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
7378 tx = widths >> 12;
7379 rx = (widths >> 8) & 0xf;
7380
7381 *tx_width = link_width_to_bits(dd, tx);
7382 *rx_width = link_width_to_bits(dd, rx);
7383
7384
7385 get_link_widths(dd, &active_tx, &active_rx);
7386 }
7387
7388
7389
7390
7391
7392
7393
7394
7395
7396 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7397 {
7398 u16 tx_width, rx_width;
7399
7400
7401 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7402
7403
7404 ppd->link_width_active = tx_width;
7405
7406 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7407 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7408
7409 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7410
7411 ppd->current_egress_rate = active_egress_rate(ppd);
7412 }
7413
7414
7415
7416
7417
7418
7419 void handle_verify_cap(struct work_struct *work)
7420 {
7421 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7422 link_vc_work);
7423 struct hfi1_devdata *dd = ppd->dd;
7424 u64 reg;
7425 u8 power_management;
7426 u8 continuous;
7427 u8 vcu;
7428 u8 vau;
7429 u8 z;
7430 u16 vl15buf;
7431 u16 link_widths;
7432 u16 crc_mask;
7433 u16 crc_val;
7434 u16 device_id;
7435 u16 active_tx, active_rx;
7436 u8 partner_supported_crc;
7437 u8 remote_tx_rate;
7438 u8 device_rev;
7439
7440 set_link_state(ppd, HLS_VERIFY_CAP);
7441
7442 lcb_shutdown(dd, 0);
7443 adjust_lcb_for_fpga_serdes(dd);
7444
7445 read_vc_remote_phy(dd, &power_management, &continuous);
7446 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7447 &partner_supported_crc);
7448 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7449 read_remote_device_id(dd, &device_id, &device_rev);
7450
7451
7452 get_link_widths(dd, &active_tx, &active_rx);
7453 dd_dev_info(dd,
7454 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7455 (int)power_management, (int)continuous);
7456 dd_dev_info(dd,
7457 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7458 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7459 (int)partner_supported_crc);
7460 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7461 (u32)remote_tx_rate, (u32)link_widths);
7462 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7463 (u32)device_id, (u32)device_rev);
7464
7465
7466
7467
7468
7469
7470
7471
7472
7473 if (vau == 0)
7474 vau = 1;
7475 set_up_vau(dd, vau);
7476
7477
7478
7479
7480
7481 set_up_vl15(dd, 0);
7482 dd->vl15buf_cached = vl15buf;
7483
7484
7485 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7486
7487
7488 if (crc_mask & CAP_CRC_14B)
7489 crc_val = LCB_CRC_14B;
7490 else if (crc_mask & CAP_CRC_48B)
7491 crc_val = LCB_CRC_48B;
7492 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7493 crc_val = LCB_CRC_12B_16B_PER_LANE;
7494 else
7495 crc_val = LCB_CRC_16B;
7496
7497 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7498 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7499 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7500
7501
7502 reg = read_csr(dd, SEND_CM_CTRL);
7503 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7504 write_csr(dd, SEND_CM_CTRL,
7505 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7506 } else {
7507 write_csr(dd, SEND_CM_CTRL,
7508 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7509 }
7510
7511 ppd->link_speed_active = 0;
7512 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7513
7514 switch (remote_tx_rate) {
7515 case 0:
7516 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7517 break;
7518 case 1:
7519 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7520 break;
7521 }
7522 } else {
7523
7524 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7525
7526 if (rate & 2)
7527 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7528 else if (rate & 1)
7529 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7530 }
7531 if (ppd->link_speed_active == 0) {
7532 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7533 __func__, (int)remote_tx_rate);
7534 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7535 }
7536
7537
7538
7539
7540
7541
7542
7543
7544 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7545
7546 ppd->port_ltp_crc_mode |=
7547 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7548
7549 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7550
7551
7552
7553 assign_remote_cm_au_table(dd, vcu);
7554
7555
7556
7557
7558
7559
7560
7561
7562
7563
7564 if (is_ax(dd)) {
7565 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7566 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7567 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7568 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7569 }
7570
7571
7572 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7573
7574
7575 write_csr(dd, DC_LCB_ERR_EN, 0);
7576 set_8051_lcb_access(dd);
7577
7578
7579 set_link_state(ppd, HLS_GOING_UP);
7580 }
7581
7582
7583
7584
7585
7586
7587
7588
7589
7590
7591
7592
7593
7594
7595
7596
7597 bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
7598 bool refresh_widths)
7599 {
7600 int do_bounce = 0;
7601 int tries;
7602 u16 lwde;
7603 u16 tx, rx;
7604 bool link_downgraded = refresh_widths;
7605
7606
7607 tries = 0;
7608 retry:
7609 mutex_lock(&ppd->hls_lock);
7610
7611 if (ppd->host_link_state & HLS_DOWN) {
7612
7613 if (ppd->host_link_state & HLS_GOING_UP) {
7614 if (++tries < 1000) {
7615 mutex_unlock(&ppd->hls_lock);
7616 usleep_range(100, 120);
7617 goto retry;
7618 }
7619 dd_dev_err(ppd->dd,
7620 "%s: giving up waiting for link state change\n",
7621 __func__);
7622 }
7623 goto done;
7624 }
7625
7626 lwde = ppd->link_width_downgrade_enabled;
7627
7628 if (refresh_widths) {
7629 get_link_widths(ppd->dd, &tx, &rx);
7630 ppd->link_width_downgrade_tx_active = tx;
7631 ppd->link_width_downgrade_rx_active = rx;
7632 }
7633
7634 if (ppd->link_width_downgrade_tx_active == 0 ||
7635 ppd->link_width_downgrade_rx_active == 0) {
7636
7637 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7638 link_downgraded = false;
7639 } else if (lwde == 0) {
7640
7641
7642
7643 if ((ppd->link_width_active !=
7644 ppd->link_width_downgrade_tx_active) ||
7645 (ppd->link_width_active !=
7646 ppd->link_width_downgrade_rx_active)) {
7647 dd_dev_err(ppd->dd,
7648 "Link downgrade is disabled and link has downgraded, downing link\n");
7649 dd_dev_err(ppd->dd,
7650 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7651 ppd->link_width_active,
7652 ppd->link_width_downgrade_tx_active,
7653 ppd->link_width_downgrade_rx_active);
7654 do_bounce = 1;
7655 link_downgraded = false;
7656 }
7657 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7658 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7659
7660 dd_dev_err(ppd->dd,
7661 "Link is outside of downgrade allowed, downing link\n");
7662 dd_dev_err(ppd->dd,
7663 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7664 lwde, ppd->link_width_downgrade_tx_active,
7665 ppd->link_width_downgrade_rx_active);
7666 do_bounce = 1;
7667 link_downgraded = false;
7668 }
7669
7670 done:
7671 mutex_unlock(&ppd->hls_lock);
7672
7673 if (do_bounce) {
7674 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7675 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7676 set_link_state(ppd, HLS_DN_OFFLINE);
7677 start_link(ppd);
7678 }
7679
7680 return link_downgraded;
7681 }
7682
7683
7684
7685
7686
7687
7688 void handle_link_downgrade(struct work_struct *work)
7689 {
7690 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7691 link_downgrade_work);
7692
7693 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7694 if (apply_link_downgrade_policy(ppd, true))
7695 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
7696 }
7697
7698 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7699 {
7700 return flag_string(buf, buf_len, flags, dcc_err_flags,
7701 ARRAY_SIZE(dcc_err_flags));
7702 }
7703
7704 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7705 {
7706 return flag_string(buf, buf_len, flags, lcb_err_flags,
7707 ARRAY_SIZE(lcb_err_flags));
7708 }
7709
7710 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7711 {
7712 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7713 ARRAY_SIZE(dc8051_err_flags));
7714 }
7715
7716 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7717 {
7718 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7719 ARRAY_SIZE(dc8051_info_err_flags));
7720 }
7721
7722 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7723 {
7724 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7725 ARRAY_SIZE(dc8051_info_host_msg_flags));
7726 }
7727
7728 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7729 {
7730 struct hfi1_pportdata *ppd = dd->pport;
7731 u64 info, err, host_msg;
7732 int queue_link_down = 0;
7733 char buf[96];
7734
7735
7736 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7737
7738
7739 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7740 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7741 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7742 host_msg = (info >>
7743 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7744 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7745
7746
7747
7748
7749 if (err & FAILED_LNI) {
7750
7751
7752
7753
7754
7755
7756 if (ppd->host_link_state
7757 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7758 queue_link_down = 1;
7759 dd_dev_info(dd, "Link error: %s\n",
7760 dc8051_info_err_string(buf,
7761 sizeof(buf),
7762 err &
7763 FAILED_LNI));
7764 }
7765 err &= ~(u64)FAILED_LNI;
7766 }
7767
7768 if (err & UNKNOWN_FRAME) {
7769 ppd->unknown_frame_count++;
7770 err &= ~(u64)UNKNOWN_FRAME;
7771 }
7772 if (err) {
7773
7774 dd_dev_err(dd, "8051 info error: %s\n",
7775 dc8051_info_err_string(buf, sizeof(buf),
7776 err));
7777 }
7778
7779
7780
7781
7782 if (host_msg & HOST_REQ_DONE) {
7783
7784
7785
7786
7787
7788
7789
7790
7791
7792 host_msg &= ~(u64)HOST_REQ_DONE;
7793 }
7794 if (host_msg & BC_SMA_MSG) {
7795 queue_work(ppd->link_wq, &ppd->sma_message_work);
7796 host_msg &= ~(u64)BC_SMA_MSG;
7797 }
7798 if (host_msg & LINKUP_ACHIEVED) {
7799 dd_dev_info(dd, "8051: Link up\n");
7800 queue_work(ppd->link_wq, &ppd->link_up_work);
7801 host_msg &= ~(u64)LINKUP_ACHIEVED;
7802 }
7803 if (host_msg & EXT_DEVICE_CFG_REQ) {
7804 handle_8051_request(ppd);
7805 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7806 }
7807 if (host_msg & VERIFY_CAP_FRAME) {
7808 queue_work(ppd->link_wq, &ppd->link_vc_work);
7809 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7810 }
7811 if (host_msg & LINK_GOING_DOWN) {
7812 const char *extra = "";
7813
7814 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7815 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7816 extra = " (ignoring downgrade)";
7817 }
7818 dd_dev_info(dd, "8051: Link down%s\n", extra);
7819 queue_link_down = 1;
7820 host_msg &= ~(u64)LINK_GOING_DOWN;
7821 }
7822 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7823 queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7824 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7825 }
7826 if (host_msg) {
7827
7828 dd_dev_info(dd, "8051 info host message: %s\n",
7829 dc8051_info_host_msg_string(buf,
7830 sizeof(buf),
7831 host_msg));
7832 }
7833
7834 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7835 }
7836 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7837
7838
7839
7840
7841
7842 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7843 write_csr(dd, DC_DC8051_ERR_EN,
7844 read_csr(dd, DC_DC8051_ERR_EN) &
7845 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7846
7847 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7848 }
7849 if (reg) {
7850
7851 dd_dev_err(dd, "8051 error: %s\n",
7852 dc8051_err_string(buf, sizeof(buf), reg));
7853 }
7854
7855 if (queue_link_down) {
7856
7857
7858
7859
7860
7861 if ((ppd->host_link_state &
7862 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7863 ppd->link_enabled == 0) {
7864 dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7865 __func__, ppd->host_link_state,
7866 ppd->link_enabled);
7867 } else {
7868 if (xchg(&ppd->is_link_down_queued, 1) == 1)
7869 dd_dev_info(dd,
7870 "%s: link down request already queued\n",
7871 __func__);
7872 else
7873 queue_work(ppd->link_wq, &ppd->link_down_work);
7874 }
7875 }
7876 }
7877
7878 static const char * const fm_config_txt[] = {
7879 [0] =
7880 "BadHeadDist: Distance violation between two head flits",
7881 [1] =
7882 "BadTailDist: Distance violation between two tail flits",
7883 [2] =
7884 "BadCtrlDist: Distance violation between two credit control flits",
7885 [3] =
7886 "BadCrdAck: Credits return for unsupported VL",
7887 [4] =
7888 "UnsupportedVLMarker: Received VL Marker",
7889 [5] =
7890 "BadPreempt: Exceeded the preemption nesting level",
7891 [6] =
7892 "BadControlFlit: Received unsupported control flit",
7893
7894 [8] =
7895 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7896 };
7897
7898 static const char * const port_rcv_txt[] = {
7899 [1] =
7900 "BadPktLen: Illegal PktLen",
7901 [2] =
7902 "PktLenTooLong: Packet longer than PktLen",
7903 [3] =
7904 "PktLenTooShort: Packet shorter than PktLen",
7905 [4] =
7906 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7907 [5] =
7908 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7909 [6] =
7910 "BadL2: Illegal L2 opcode",
7911 [7] =
7912 "BadSC: Unsupported SC",
7913 [9] =
7914 "BadRC: Illegal RC",
7915 [11] =
7916 "PreemptError: Preempting with same VL",
7917 [12] =
7918 "PreemptVL15: Preempting a VL15 packet",
7919 };
7920
7921 #define OPA_LDR_FMCONFIG_OFFSET 16
7922 #define OPA_LDR_PORTRCV_OFFSET 0
7923 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7924 {
7925 u64 info, hdr0, hdr1;
7926 const char *extra;
7927 char buf[96];
7928 struct hfi1_pportdata *ppd = dd->pport;
7929 u8 lcl_reason = 0;
7930 int do_bounce = 0;
7931
7932 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7933 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7934 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7935 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7936
7937 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7938 }
7939 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7940 }
7941
7942 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7943 struct hfi1_pportdata *ppd = dd->pport;
7944
7945 if (ppd->link_downed < (u32)UINT_MAX)
7946 ppd->link_downed++;
7947 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7948 }
7949
7950 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7951 u8 reason_valid = 1;
7952
7953 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7954 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7955 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7956
7957 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7958 }
7959 switch (info) {
7960 case 0:
7961 case 1:
7962 case 2:
7963 case 3:
7964 case 4:
7965 case 5:
7966 case 6:
7967 extra = fm_config_txt[info];
7968 break;
7969 case 8:
7970 extra = fm_config_txt[info];
7971 if (ppd->port_error_action &
7972 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7973 do_bounce = 1;
7974
7975
7976
7977
7978 lcl_reason =
7979 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7980 }
7981 break;
7982 default:
7983 reason_valid = 0;
7984 snprintf(buf, sizeof(buf), "reserved%lld", info);
7985 extra = buf;
7986 break;
7987 }
7988
7989 if (reason_valid && !do_bounce) {
7990 do_bounce = ppd->port_error_action &
7991 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7992 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7993 }
7994
7995
7996 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7997 extra);
7998 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7999 }
8000
8001 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
8002 u8 reason_valid = 1;
8003
8004 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
8005 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
8006 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
8007 if (!(dd->err_info_rcvport.status_and_code &
8008 OPA_EI_STATUS_SMASK)) {
8009 dd->err_info_rcvport.status_and_code =
8010 info & OPA_EI_CODE_SMASK;
8011
8012 dd->err_info_rcvport.status_and_code |=
8013 OPA_EI_STATUS_SMASK;
8014
8015
8016
8017
8018 dd->err_info_rcvport.packet_flit1 = hdr0;
8019 dd->err_info_rcvport.packet_flit2 = hdr1;
8020 }
8021 switch (info) {
8022 case 1:
8023 case 2:
8024 case 3:
8025 case 4:
8026 case 5:
8027 case 6:
8028 case 7:
8029 case 9:
8030 case 11:
8031 case 12:
8032 extra = port_rcv_txt[info];
8033 break;
8034 default:
8035 reason_valid = 0;
8036 snprintf(buf, sizeof(buf), "reserved%lld", info);
8037 extra = buf;
8038 break;
8039 }
8040
8041 if (reason_valid && !do_bounce) {
8042 do_bounce = ppd->port_error_action &
8043 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8044 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8045 }
8046
8047
8048 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8049 " hdr0 0x%llx, hdr1 0x%llx\n",
8050 extra, hdr0, hdr1);
8051
8052 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8053 }
8054
8055 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8056
8057 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8058 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8059 }
8060 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8061
8062 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8063 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8064 }
8065
8066 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8067 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8068
8069
8070 if (reg)
8071 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8072 dcc_err_string(buf, sizeof(buf), reg));
8073
8074 if (lcl_reason == 0)
8075 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8076
8077 if (do_bounce) {
8078 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8079 __func__);
8080 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8081 queue_work(ppd->link_wq, &ppd->link_bounce_work);
8082 }
8083 }
8084
8085 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8086 {
8087 char buf[96];
8088
8089 dd_dev_info(dd, "LCB Error: %s\n",
8090 lcb_err_string(buf, sizeof(buf), reg));
8091 }
8092
8093
8094
8095
8096 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8097 {
8098 const struct err_reg_info *eri = &dc_errs[source];
8099
8100 if (eri->handler) {
8101 interrupt_clear_down(dd, 0, eri);
8102 } else if (source == 3 ) {
8103
8104
8105
8106
8107
8108
8109
8110
8111
8112 dd_dev_err(dd, "Parity error in DC LBM block\n");
8113 } else {
8114 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8115 }
8116 }
8117
8118
8119
8120
8121 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8122 {
8123 sc_group_release_update(dd, source);
8124 }
8125
8126
8127
8128
8129
8130
8131
8132
8133
8134
8135 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8136 {
8137
8138 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
8139
8140 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8141
8142 #ifdef CONFIG_SDMA_VERBOSITY
8143 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8144 slashstrip(__FILE__), __LINE__, __func__);
8145 sdma_dumpstate(&dd->per_sdma[which]);
8146 #endif
8147
8148 if (likely(what < 3 && which < dd->num_sdma)) {
8149 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8150 } else {
8151
8152 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8153 }
8154 }
8155
8156
8157
8158
8159
8160
8161
8162
8163
8164
8165
8166 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8167 {
8168 struct hfi1_ctxtdata *rcd;
8169 char *err_detail;
8170
8171 if (likely(source < dd->num_rcv_contexts)) {
8172 rcd = hfi1_rcd_get_by_index(dd, source);
8173 if (rcd) {
8174 handle_user_interrupt(rcd);
8175 hfi1_rcd_put(rcd);
8176 return;
8177 }
8178
8179 err_detail = "dataless";
8180 } else {
8181
8182 err_detail = "out of range";
8183 }
8184 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8185 err_detail, source);
8186 }
8187
8188
8189
8190
8191
8192
8193
8194
8195
8196
8197 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8198 {
8199 struct hfi1_ctxtdata *rcd;
8200 char *err_detail;
8201
8202 if (likely(source < dd->num_rcv_contexts)) {
8203 rcd = hfi1_rcd_get_by_index(dd, source);
8204 if (rcd) {
8205 handle_user_interrupt(rcd);
8206 hfi1_rcd_put(rcd);
8207 return;
8208 }
8209
8210 err_detail = "dataless";
8211 } else {
8212
8213 err_detail = "out of range";
8214 }
8215 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8216 err_detail, source);
8217 }
8218
8219
8220
8221
8222 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8223 {
8224 char name[64];
8225
8226 dd_dev_err(dd, "unexpected %s interrupt\n",
8227 is_reserved_name(name, sizeof(name), source));
8228 }
8229
8230 static const struct is_table is_table[] = {
8231
8232
8233
8234
8235 { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8236 is_misc_err_name, is_misc_err_int },
8237 { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8238 is_sdma_eng_err_name, is_sdma_eng_err_int },
8239 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8240 is_sendctxt_err_name, is_sendctxt_err_int },
8241 { IS_SDMA_START, IS_SDMA_IDLE_END,
8242 is_sdma_eng_name, is_sdma_eng_int },
8243 { IS_VARIOUS_START, IS_VARIOUS_END,
8244 is_various_name, is_various_int },
8245 { IS_DC_START, IS_DC_END,
8246 is_dc_name, is_dc_int },
8247 { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8248 is_rcv_avail_name, is_rcv_avail_int },
8249 { IS_RCVURGENT_START, IS_RCVURGENT_END,
8250 is_rcv_urgent_name, is_rcv_urgent_int },
8251 { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8252 is_send_credit_name, is_send_credit_int},
8253 { IS_RESERVED_START, IS_RESERVED_END,
8254 is_reserved_name, is_reserved_int},
8255 };
8256
8257
8258
8259
8260
8261 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8262 {
8263 const struct is_table *entry;
8264
8265
8266 for (entry = &is_table[0]; entry->is_name; entry++) {
8267 if (source <= entry->end) {
8268 trace_hfi1_interrupt(dd, entry, source);
8269 entry->is_int(dd, source - entry->start);
8270 return;
8271 }
8272 }
8273
8274 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8275 }
8276
8277
8278
8279
8280
8281
8282
8283
8284
8285
8286 irqreturn_t general_interrupt(int irq, void *data)
8287 {
8288 struct hfi1_devdata *dd = data;
8289 u64 regs[CCE_NUM_INT_CSRS];
8290 u32 bit;
8291 int i;
8292 irqreturn_t handled = IRQ_NONE;
8293
8294 this_cpu_inc(*dd->int_counter);
8295
8296
8297 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8298 if (dd->gi_mask[i] == 0) {
8299 regs[i] = 0;
8300 continue;
8301 }
8302 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8303 dd->gi_mask[i];
8304
8305 if (regs[i])
8306 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8307 }
8308
8309
8310 for_each_set_bit(bit, (unsigned long *)®s[0],
8311 CCE_NUM_INT_CSRS * 64) {
8312 is_interrupt(dd, bit);
8313 handled = IRQ_HANDLED;
8314 }
8315
8316 return handled;
8317 }
8318
8319 irqreturn_t sdma_interrupt(int irq, void *data)
8320 {
8321 struct sdma_engine *sde = data;
8322 struct hfi1_devdata *dd = sde->dd;
8323 u64 status;
8324
8325 #ifdef CONFIG_SDMA_VERBOSITY
8326 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8327 slashstrip(__FILE__), __LINE__, __func__);
8328 sdma_dumpstate(sde);
8329 #endif
8330
8331 this_cpu_inc(*dd->int_counter);
8332
8333
8334 status = read_csr(dd,
8335 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8336 & sde->imask;
8337 if (likely(status)) {
8338
8339 write_csr(dd,
8340 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8341 status);
8342
8343
8344 sdma_engine_interrupt(sde, status);
8345 } else {
8346 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8347 sde->this_idx);
8348 }
8349 return IRQ_HANDLED;
8350 }
8351
8352
8353
8354
8355
8356
8357 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8358 {
8359 struct hfi1_devdata *dd = rcd->dd;
8360 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8361
8362 write_csr(dd, addr, rcd->imask);
8363
8364 (void)read_csr(dd, addr);
8365 }
8366
8367
8368 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8369 {
8370 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8371 }
8372
8373
8374
8375
8376
8377
8378
8379
8380
8381
8382
8383 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8384 {
8385 u32 tail;
8386
8387 if (hfi1_packet_present(rcd))
8388 return 1;
8389
8390
8391 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8392 return hfi1_rcd_head(rcd) != tail;
8393 }
8394
8395
8396
8397
8398
8399
8400 static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
8401 {
8402 struct hfi1_devdata *dd = rcd->dd;
8403
8404 trace_hfi1_receive_interrupt(dd, rcd);
8405 this_cpu_inc(*dd->int_counter);
8406 aspm_ctx_disable(rcd);
8407 }
8408
8409
8410
8411
8412
8413
8414
8415
8416 static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
8417 {
8418 if (!rcd->rcvhdrq)
8419 return;
8420 clear_recv_intr(rcd);
8421 if (check_packet_present(rcd))
8422 force_recv_intr(rcd);
8423 }
8424
8425
8426
8427
8428
8429
8430
8431
8432
8433
8434
8435
8436
8437 static void hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
8438 {
8439 unsigned long flags;
8440
8441 local_irq_save(flags);
8442 __hfi1_rcd_eoi_intr(rcd);
8443 local_irq_restore(flags);
8444 }
8445
8446
8447
8448
8449
8450
8451 int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget)
8452 {
8453 struct hfi1_netdev_rxq *rxq = container_of(napi,
8454 struct hfi1_netdev_rxq, napi);
8455 struct hfi1_ctxtdata *rcd = rxq->rcd;
8456 int work_done = 0;
8457
8458 work_done = rcd->do_interrupt(rcd, budget);
8459
8460 if (work_done < budget) {
8461 napi_complete_done(napi, work_done);
8462 hfi1_rcd_eoi_intr(rcd);
8463 }
8464
8465 return work_done;
8466 }
8467
8468
8469 irqreturn_t receive_context_interrupt_napi(int irq, void *data)
8470 {
8471 struct hfi1_ctxtdata *rcd = data;
8472
8473 receive_interrupt_common(rcd);
8474
8475 if (likely(rcd->napi)) {
8476 if (likely(napi_schedule_prep(rcd->napi)))
8477 __napi_schedule_irqoff(rcd->napi);
8478 else
8479 __hfi1_rcd_eoi_intr(rcd);
8480 } else {
8481 WARN_ONCE(1, "Napi IRQ handler without napi set up ctxt=%d\n",
8482 rcd->ctxt);
8483 __hfi1_rcd_eoi_intr(rcd);
8484 }
8485
8486 return IRQ_HANDLED;
8487 }
8488
8489
8490
8491
8492
8493
8494
8495
8496
8497 irqreturn_t receive_context_interrupt(int irq, void *data)
8498 {
8499 struct hfi1_ctxtdata *rcd = data;
8500 int disposition;
8501
8502 receive_interrupt_common(rcd);
8503
8504
8505 disposition = rcd->do_interrupt(rcd, 0);
8506
8507
8508
8509
8510
8511
8512 if (disposition == RCV_PKT_LIMIT)
8513 return IRQ_WAKE_THREAD;
8514
8515 __hfi1_rcd_eoi_intr(rcd);
8516 return IRQ_HANDLED;
8517 }
8518
8519
8520
8521
8522
8523 irqreturn_t receive_context_thread(int irq, void *data)
8524 {
8525 struct hfi1_ctxtdata *rcd = data;
8526
8527
8528 (void)rcd->do_interrupt(rcd, 1);
8529
8530 hfi1_rcd_eoi_intr(rcd);
8531
8532 return IRQ_HANDLED;
8533 }
8534
8535
8536
8537 u32 read_physical_state(struct hfi1_devdata *dd)
8538 {
8539 u64 reg;
8540
8541 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8542 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8543 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8544 }
8545
8546 u32 read_logical_state(struct hfi1_devdata *dd)
8547 {
8548 u64 reg;
8549
8550 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8551 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8552 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8553 }
8554
8555 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8556 {
8557 u64 reg;
8558
8559 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8560
8561 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8562 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8563 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8564 }
8565
8566
8567
8568
8569 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8570 {
8571 u32 regno;
8572 int ret;
8573
8574 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8575 if (acquire_lcb_access(dd, 0) == 0) {
8576 *data = read_csr(dd, addr);
8577 release_lcb_access(dd, 0);
8578 return 0;
8579 }
8580 return -EBUSY;
8581 }
8582
8583
8584 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8585 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8586 if (ret != HCMD_SUCCESS)
8587 return -EBUSY;
8588 return 0;
8589 }
8590
8591
8592
8593
8594
8595
8596 struct lcb_datum {
8597 u32 off;
8598 u64 val;
8599 };
8600
8601 static struct lcb_datum lcb_cache[] = {
8602 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8603 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8604 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8605 };
8606
8607 static void update_lcb_cache(struct hfi1_devdata *dd)
8608 {
8609 int i;
8610 int ret;
8611 u64 val;
8612
8613 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8614 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8615
8616
8617 if (likely(ret != -EBUSY))
8618 lcb_cache[i].val = val;
8619 }
8620 }
8621
8622 static int read_lcb_cache(u32 off, u64 *val)
8623 {
8624 int i;
8625
8626 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8627 if (lcb_cache[i].off == off) {
8628 *val = lcb_cache[i].val;
8629 return 0;
8630 }
8631 }
8632
8633 pr_warn("%s bad offset 0x%x\n", __func__, off);
8634 return -1;
8635 }
8636
8637
8638
8639
8640
8641 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8642 {
8643 struct hfi1_pportdata *ppd = dd->pport;
8644
8645
8646 if (ppd->host_link_state & HLS_UP)
8647 return read_lcb_via_8051(dd, addr, data);
8648
8649 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8650 if (read_lcb_cache(addr, data))
8651 return -EBUSY;
8652 return 0;
8653 }
8654
8655
8656 *data = read_csr(dd, addr);
8657 return 0;
8658 }
8659
8660
8661
8662
8663 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8664 {
8665 u32 regno;
8666 int ret;
8667
8668 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8669 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8670 if (acquire_lcb_access(dd, 0) == 0) {
8671 write_csr(dd, addr, data);
8672 release_lcb_access(dd, 0);
8673 return 0;
8674 }
8675 return -EBUSY;
8676 }
8677
8678
8679 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8680 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8681 if (ret != HCMD_SUCCESS)
8682 return -EBUSY;
8683 return 0;
8684 }
8685
8686
8687
8688
8689
8690 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8691 {
8692 struct hfi1_pportdata *ppd = dd->pport;
8693
8694
8695 if (ppd->host_link_state & HLS_UP)
8696 return write_lcb_via_8051(dd, addr, data);
8697
8698 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8699 return -EBUSY;
8700
8701 write_csr(dd, addr, data);
8702 return 0;
8703 }
8704
8705
8706
8707
8708
8709
8710 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
8711 u64 *out_data)
8712 {
8713 u64 reg, completed;
8714 int return_code;
8715 unsigned long timeout;
8716
8717 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8718
8719 mutex_lock(&dd->dc8051_lock);
8720
8721
8722 if (dd->dc_shutdown) {
8723 return_code = -ENODEV;
8724 goto fail;
8725 }
8726
8727
8728
8729
8730
8731
8732
8733
8734
8735
8736
8737 if (dd->dc8051_timed_out) {
8738 if (dd->dc8051_timed_out > 1) {
8739 dd_dev_err(dd,
8740 "Previous 8051 host command timed out, skipping command %u\n",
8741 type);
8742 return_code = -ENXIO;
8743 goto fail;
8744 }
8745 _dc_shutdown(dd);
8746 _dc_start(dd);
8747 }
8748
8749
8750
8751
8752
8753
8754
8755
8756
8757
8758
8759
8760
8761
8762
8763
8764
8765 if (type == HCMD_WRITE_LCB_CSR) {
8766 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8767
8768 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8769 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8770 reg |= ((((*out_data) >> 40) & 0xff) <<
8771 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8772 | ((((*out_data) >> 48) & 0xffff) <<
8773 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8774 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8775 }
8776
8777
8778
8779
8780
8781 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8782 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8783 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8784 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8785 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8786 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8787 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8788
8789
8790 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8791 while (1) {
8792 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8793 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8794 if (completed)
8795 break;
8796 if (time_after(jiffies, timeout)) {
8797 dd->dc8051_timed_out++;
8798 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8799 if (out_data)
8800 *out_data = 0;
8801 return_code = -ETIMEDOUT;
8802 goto fail;
8803 }
8804 udelay(2);
8805 }
8806
8807 if (out_data) {
8808 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8809 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8810 if (type == HCMD_READ_LCB_CSR) {
8811
8812 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8813 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8814 << (48
8815 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8816 }
8817 }
8818 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8819 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8820 dd->dc8051_timed_out = 0;
8821
8822
8823
8824 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8825
8826 fail:
8827 mutex_unlock(&dd->dc8051_lock);
8828 return return_code;
8829 }
8830
8831 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8832 {
8833 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8834 }
8835
8836 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8837 u8 lane_id, u32 config_data)
8838 {
8839 u64 data;
8840 int ret;
8841
8842 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8843 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8844 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8845 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8846 if (ret != HCMD_SUCCESS) {
8847 dd_dev_err(dd,
8848 "load 8051 config: field id %d, lane %d, err %d\n",
8849 (int)field_id, (int)lane_id, ret);
8850 }
8851 return ret;
8852 }
8853
8854
8855
8856
8857
8858
8859 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8860 u32 *result)
8861 {
8862 u64 big_data;
8863 u32 addr;
8864 int ret;
8865
8866
8867 if (lane_id < 4)
8868 addr = (4 * NUM_GENERAL_FIELDS)
8869 + (lane_id * 4 * NUM_LANE_FIELDS);
8870 else
8871 addr = 0;
8872 addr += field_id * 4;
8873
8874
8875 ret = read_8051_data(dd, addr, 8, &big_data);
8876
8877 if (ret == 0) {
8878
8879 if (addr & 0x4)
8880 *result = (u32)(big_data >> 32);
8881 else
8882 *result = (u32)big_data;
8883 } else {
8884 *result = 0;
8885 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8886 __func__, lane_id, field_id);
8887 }
8888
8889 return ret;
8890 }
8891
8892 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8893 u8 continuous)
8894 {
8895 u32 frame;
8896
8897 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8898 | power_management << POWER_MANAGEMENT_SHIFT;
8899 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8900 GENERAL_CONFIG, frame);
8901 }
8902
8903 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8904 u16 vl15buf, u8 crc_sizes)
8905 {
8906 u32 frame;
8907
8908 frame = (u32)vau << VAU_SHIFT
8909 | (u32)z << Z_SHIFT
8910 | (u32)vcu << VCU_SHIFT
8911 | (u32)vl15buf << VL15BUF_SHIFT
8912 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8913 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8914 GENERAL_CONFIG, frame);
8915 }
8916
8917 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
8918 u8 *flag_bits, u16 *link_widths)
8919 {
8920 u32 frame;
8921
8922 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8923 &frame);
8924 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8925 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8926 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8927 }
8928
8929 static int write_vc_local_link_mode(struct hfi1_devdata *dd,
8930 u8 misc_bits,
8931 u8 flag_bits,
8932 u16 link_widths)
8933 {
8934 u32 frame;
8935
8936 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8937 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8938 | (u32)link_widths << LINK_WIDTH_SHIFT;
8939 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8940 frame);
8941 }
8942
8943 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8944 u8 device_rev)
8945 {
8946 u32 frame;
8947
8948 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8949 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8950 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8951 }
8952
8953 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8954 u8 *device_rev)
8955 {
8956 u32 frame;
8957
8958 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8959 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8960 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8961 & REMOTE_DEVICE_REV_MASK;
8962 }
8963
8964 int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8965 {
8966 u32 frame;
8967 u32 mask;
8968
8969 mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8970 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8971
8972 frame &= ~mask;
8973 frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8974 return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8975 frame);
8976 }
8977
8978 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8979 u8 *ver_patch)
8980 {
8981 u32 frame;
8982
8983 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8984 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8985 STS_FM_VERSION_MAJOR_MASK;
8986 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8987 STS_FM_VERSION_MINOR_MASK;
8988
8989 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8990 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8991 STS_FM_VERSION_PATCH_MASK;
8992 }
8993
8994 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8995 u8 *continuous)
8996 {
8997 u32 frame;
8998
8999 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
9000 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
9001 & POWER_MANAGEMENT_MASK;
9002 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
9003 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
9004 }
9005
9006 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
9007 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
9008 {
9009 u32 frame;
9010
9011 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
9012 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
9013 *z = (frame >> Z_SHIFT) & Z_MASK;
9014 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
9015 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
9016 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
9017 }
9018
9019 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
9020 u8 *remote_tx_rate,
9021 u16 *link_widths)
9022 {
9023 u32 frame;
9024
9025 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
9026 &frame);
9027 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
9028 & REMOTE_TX_RATE_MASK;
9029 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
9030 }
9031
9032 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
9033 {
9034 u32 frame;
9035
9036 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
9037 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
9038 }
9039
9040 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
9041 {
9042 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
9043 }
9044
9045 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
9046 {
9047 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
9048 }
9049
9050 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
9051 {
9052 u32 frame;
9053 int ret;
9054
9055 *link_quality = 0;
9056 if (dd->pport->host_link_state & HLS_UP) {
9057 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
9058 &frame);
9059 if (ret == 0)
9060 *link_quality = (frame >> LINK_QUALITY_SHIFT)
9061 & LINK_QUALITY_MASK;
9062 }
9063 }
9064
9065 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
9066 {
9067 u32 frame;
9068
9069 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
9070 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
9071 }
9072
9073 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
9074 {
9075 u32 frame;
9076
9077 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9078 *ldr = (frame & 0xff);
9079 }
9080
9081 static int read_tx_settings(struct hfi1_devdata *dd,
9082 u8 *enable_lane_tx,
9083 u8 *tx_polarity_inversion,
9084 u8 *rx_polarity_inversion,
9085 u8 *max_rate)
9086 {
9087 u32 frame;
9088 int ret;
9089
9090 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9091 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9092 & ENABLE_LANE_TX_MASK;
9093 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9094 & TX_POLARITY_INVERSION_MASK;
9095 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9096 & RX_POLARITY_INVERSION_MASK;
9097 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9098 return ret;
9099 }
9100
9101 static int write_tx_settings(struct hfi1_devdata *dd,
9102 u8 enable_lane_tx,
9103 u8 tx_polarity_inversion,
9104 u8 rx_polarity_inversion,
9105 u8 max_rate)
9106 {
9107 u32 frame;
9108
9109
9110 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9111 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9112 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9113 | max_rate << MAX_RATE_SHIFT;
9114 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9115 }
9116
9117
9118
9119
9120
9121
9122 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9123 {
9124 int ret;
9125
9126 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9127 if (ret != HCMD_SUCCESS) {
9128 dd_dev_err(dd, "read idle message: type %d, err %d\n",
9129 (u32)type, ret);
9130 return -EINVAL;
9131 }
9132 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9133
9134 *data_out >>= IDLE_PAYLOAD_SHIFT;
9135 return 0;
9136 }
9137
9138
9139
9140
9141
9142
9143
9144 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9145 {
9146 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9147 data);
9148 }
9149
9150
9151
9152
9153
9154
9155 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9156 {
9157 int ret;
9158
9159 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9160 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9161 if (ret != HCMD_SUCCESS) {
9162 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9163 data, ret);
9164 return -EINVAL;
9165 }
9166 return 0;
9167 }
9168
9169
9170
9171
9172
9173
9174 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9175 {
9176 u64 data;
9177
9178 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9179 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9180 return send_idle_message(dd, data);
9181 }
9182
9183
9184
9185
9186
9187
9188
9189 static int do_quick_linkup(struct hfi1_devdata *dd)
9190 {
9191 int ret;
9192
9193 lcb_shutdown(dd, 0);
9194
9195 if (loopback) {
9196
9197
9198 write_csr(dd, DC_LCB_CFG_LOOPBACK,
9199 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9200 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9201 }
9202
9203
9204
9205 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9206
9207
9208 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9209
9210 write_csr(dd, DC_LCB_CFG_RUN,
9211 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9212
9213 ret = wait_link_transfer_active(dd, 10);
9214 if (ret)
9215 return ret;
9216
9217 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9218 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9219 }
9220
9221 if (!loopback) {
9222
9223
9224
9225
9226
9227
9228
9229 dd_dev_err(dd,
9230 "Pausing for peer to be finished with LCB set up\n");
9231 msleep(5000);
9232 dd_dev_err(dd, "Continuing with quick linkup\n");
9233 }
9234
9235 write_csr(dd, DC_LCB_ERR_EN, 0);
9236 set_8051_lcb_access(dd);
9237
9238
9239
9240
9241
9242
9243 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9244 if (ret != HCMD_SUCCESS) {
9245 dd_dev_err(dd,
9246 "%s: set physical link state to quick LinkUp failed with return %d\n",
9247 __func__, ret);
9248
9249 set_host_lcb_access(dd);
9250 write_csr(dd, DC_LCB_ERR_EN, ~0ull);
9251
9252 if (ret >= 0)
9253 ret = -EINVAL;
9254 return ret;
9255 }
9256
9257 return 0;
9258 }
9259
9260
9261
9262
9263 static int init_loopback(struct hfi1_devdata *dd)
9264 {
9265 dd_dev_info(dd, "Entering loopback mode\n");
9266
9267
9268 write_csr(dd, DC_DC8051_CFG_MODE,
9269 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9270
9271
9272
9273
9274
9275
9276
9277 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9278 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9279 loopback == LOOPBACK_CABLE)) {
9280 loopback = LOOPBACK_LCB;
9281 quick_linkup = 1;
9282 return 0;
9283 }
9284
9285
9286
9287
9288 if (loopback == LOOPBACK_SERDES)
9289 return 0;
9290
9291
9292 if (loopback == LOOPBACK_LCB) {
9293 quick_linkup = 1;
9294
9295
9296 if (dd->icode == ICODE_FPGA_EMULATION) {
9297 dd_dev_err(dd,
9298 "LCB loopback not supported in emulation\n");
9299 return -EINVAL;
9300 }
9301 return 0;
9302 }
9303
9304
9305 if (loopback == LOOPBACK_CABLE)
9306 return 0;
9307
9308 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9309 return -EINVAL;
9310 }
9311
9312
9313
9314
9315
9316 static u16 opa_to_vc_link_widths(u16 opa_widths)
9317 {
9318 int i;
9319 u16 result = 0;
9320
9321 static const struct link_bits {
9322 u16 from;
9323 u16 to;
9324 } opa_link_xlate[] = {
9325 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9326 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9327 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9328 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
9329 };
9330
9331 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9332 if (opa_widths & opa_link_xlate[i].from)
9333 result |= opa_link_xlate[i].to;
9334 }
9335 return result;
9336 }
9337
9338
9339
9340
9341 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9342 {
9343 struct hfi1_devdata *dd = ppd->dd;
9344 u8 enable_lane_tx;
9345 u8 tx_polarity_inversion;
9346 u8 rx_polarity_inversion;
9347 int ret;
9348 u32 misc_bits = 0;
9349
9350 fabric_serdes_reset(dd);
9351
9352
9353 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9354 &rx_polarity_inversion, &ppd->local_tx_rate);
9355 if (ret)
9356 goto set_local_link_attributes_fail;
9357
9358 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9359
9360 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9361 ppd->local_tx_rate = 1;
9362 else
9363 ppd->local_tx_rate = 0;
9364 } else {
9365
9366 ppd->local_tx_rate = 0;
9367 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9368 ppd->local_tx_rate |= 2;
9369 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9370 ppd->local_tx_rate |= 1;
9371 }
9372
9373 enable_lane_tx = 0xF;
9374 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9375 rx_polarity_inversion, ppd->local_tx_rate);
9376 if (ret != HCMD_SUCCESS)
9377 goto set_local_link_attributes_fail;
9378
9379 ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
9380 if (ret != HCMD_SUCCESS) {
9381 dd_dev_err(dd,
9382 "Failed to set host interface version, return 0x%x\n",
9383 ret);
9384 goto set_local_link_attributes_fail;
9385 }
9386
9387
9388
9389
9390 ret = write_vc_local_phy(dd,
9391 0 ,
9392 1 );
9393 if (ret != HCMD_SUCCESS)
9394 goto set_local_link_attributes_fail;
9395
9396
9397 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9398 ppd->port_crc_mode_enabled);
9399 if (ret != HCMD_SUCCESS)
9400 goto set_local_link_attributes_fail;
9401
9402
9403
9404
9405
9406 if (loopback == LOOPBACK_SERDES)
9407 misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
9408
9409
9410
9411
9412
9413
9414 if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
9415 misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
9416
9417 ret = write_vc_local_link_mode(dd, misc_bits, 0,
9418 opa_to_vc_link_widths(
9419 ppd->link_width_enabled));
9420 if (ret != HCMD_SUCCESS)
9421 goto set_local_link_attributes_fail;
9422
9423
9424 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9425 if (ret == HCMD_SUCCESS)
9426 return 0;
9427
9428 set_local_link_attributes_fail:
9429 dd_dev_err(dd,
9430 "Failed to set local link attributes, return 0x%x\n",
9431 ret);
9432 return ret;
9433 }
9434
9435
9436
9437
9438
9439
9440 int start_link(struct hfi1_pportdata *ppd)
9441 {
9442
9443
9444
9445
9446 tune_serdes(ppd);
9447
9448 if (!ppd->driver_link_ready) {
9449 dd_dev_info(ppd->dd,
9450 "%s: stopping link start because driver is not ready\n",
9451 __func__);
9452 return 0;
9453 }
9454
9455
9456
9457
9458
9459
9460 clear_full_mgmt_pkey(ppd);
9461
9462 return set_link_state(ppd, HLS_DN_POLL);
9463 }
9464
9465 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9466 {
9467 struct hfi1_devdata *dd = ppd->dd;
9468 u64 mask;
9469 unsigned long timeout;
9470
9471
9472
9473
9474
9475
9476
9477
9478
9479 msleep(500);
9480
9481
9482
9483
9484 timeout = jiffies + msecs_to_jiffies(2000);
9485 while (1) {
9486 mask = read_csr(dd, dd->hfi1_id ?
9487 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9488 if (!(mask & QSFP_HFI0_INT_N))
9489 break;
9490 if (time_after(jiffies, timeout)) {
9491 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9492 __func__);
9493 break;
9494 }
9495 udelay(2);
9496 }
9497 }
9498
9499 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9500 {
9501 struct hfi1_devdata *dd = ppd->dd;
9502 u64 mask;
9503
9504 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9505 if (enable) {
9506
9507
9508
9509
9510 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9511 QSFP_HFI0_INT_N);
9512 mask |= (u64)QSFP_HFI0_INT_N;
9513 } else {
9514 mask &= ~(u64)QSFP_HFI0_INT_N;
9515 }
9516 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9517 }
9518
9519 int reset_qsfp(struct hfi1_pportdata *ppd)
9520 {
9521 struct hfi1_devdata *dd = ppd->dd;
9522 u64 mask, qsfp_mask;
9523
9524
9525 set_qsfp_int_n(ppd, 0);
9526
9527
9528 mask = (u64)QSFP_HFI0_RESET_N;
9529
9530 qsfp_mask = read_csr(dd,
9531 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9532 qsfp_mask &= ~mask;
9533 write_csr(dd,
9534 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9535
9536 udelay(10);
9537
9538 qsfp_mask |= mask;
9539 write_csr(dd,
9540 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9541
9542 wait_for_qsfp_init(ppd);
9543
9544
9545
9546
9547
9548 set_qsfp_int_n(ppd, 1);
9549
9550
9551
9552
9553
9554
9555 return set_qsfp_tx(ppd, 0);
9556 }
9557
9558 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9559 u8 *qsfp_interrupt_status)
9560 {
9561 struct hfi1_devdata *dd = ppd->dd;
9562
9563 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9564 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9565 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9566 __func__);
9567
9568 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9569 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9570 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9571 __func__);
9572
9573
9574
9575
9576 if (ppd->host_link_state & HLS_DOWN)
9577 return 0;
9578
9579 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9580 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9581 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9582 __func__);
9583
9584 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9585 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9586 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9587 __func__);
9588
9589
9590
9591 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9592 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9593 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9594 __func__);
9595
9596 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9597 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9598 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9599 __func__);
9600
9601 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9602 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9603 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9604 __func__);
9605
9606 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9607 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9608 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9609 __func__);
9610
9611 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9612 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9613 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9614 __func__);
9615
9616 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9617 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9618 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9619 __func__);
9620
9621 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9622 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9623 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9624 __func__);
9625
9626 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9627 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9628 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9629 __func__);
9630
9631 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9632 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9633 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9634 __func__);
9635
9636 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9637 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9638 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9639 __func__);
9640
9641 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9642 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9643 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9644 __func__);
9645
9646 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9647 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9648 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9649 __func__);
9650
9651
9652
9653
9654 return 0;
9655 }
9656
9657
9658 void qsfp_event(struct work_struct *work)
9659 {
9660 struct qsfp_data *qd;
9661 struct hfi1_pportdata *ppd;
9662 struct hfi1_devdata *dd;
9663
9664 qd = container_of(work, struct qsfp_data, qsfp_work);
9665 ppd = qd->ppd;
9666 dd = ppd->dd;
9667
9668
9669 if (!qsfp_mod_present(ppd))
9670 return;
9671
9672 if (ppd->host_link_state == HLS_DN_DISABLE) {
9673 dd_dev_info(ppd->dd,
9674 "%s: stopping link start because link is disabled\n",
9675 __func__);
9676 return;
9677 }
9678
9679
9680
9681
9682
9683 dc_start(dd);
9684
9685 if (qd->cache_refresh_required) {
9686 set_qsfp_int_n(ppd, 0);
9687
9688 wait_for_qsfp_init(ppd);
9689
9690
9691
9692
9693
9694 set_qsfp_int_n(ppd, 1);
9695
9696 start_link(ppd);
9697 }
9698
9699 if (qd->check_interrupt_flags) {
9700 u8 qsfp_interrupt_status[16] = {0,};
9701
9702 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9703 &qsfp_interrupt_status[0], 16) != 16) {
9704 dd_dev_info(dd,
9705 "%s: Failed to read status of QSFP module\n",
9706 __func__);
9707 } else {
9708 unsigned long flags;
9709
9710 handle_qsfp_error_conditions(
9711 ppd, qsfp_interrupt_status);
9712 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9713 ppd->qsfp_info.check_interrupt_flags = 0;
9714 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9715 flags);
9716 }
9717 }
9718 }
9719
9720 void init_qsfp_int(struct hfi1_devdata *dd)
9721 {
9722 struct hfi1_pportdata *ppd = dd->pport;
9723 u64 qsfp_mask;
9724
9725 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9726
9727 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9728 qsfp_mask);
9729 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9730 qsfp_mask);
9731
9732 set_qsfp_int_n(ppd, 0);
9733
9734
9735 if (qsfp_mod_present(ppd))
9736 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9737 write_csr(dd,
9738 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9739 qsfp_mask);
9740
9741
9742 if (!dd->hfi1_id)
9743 set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
9744 else
9745 set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
9746 }
9747
9748
9749
9750
9751 static void init_lcb(struct hfi1_devdata *dd)
9752 {
9753
9754 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9755 return;
9756
9757
9758
9759
9760 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9761 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9762 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9763 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9764 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9765 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9766 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9767 }
9768
9769
9770
9771
9772
9773 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9774 {
9775 int ret;
9776 u8 status;
9777
9778
9779
9780
9781
9782 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9783 return 0;
9784
9785
9786 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9787 if (ret < 0)
9788 return ret;
9789 if (ret != 1)
9790 return -EIO;
9791
9792 return 0;
9793 }
9794
9795
9796
9797
9798
9799
9800
9801 #define MAX_QSFP_RETRIES 20
9802 #define QSFP_RETRY_WAIT 500
9803
9804
9805
9806
9807
9808 static void try_start_link(struct hfi1_pportdata *ppd)
9809 {
9810 if (test_qsfp_read(ppd)) {
9811
9812 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9813 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9814 return;
9815 }
9816 dd_dev_info(ppd->dd,
9817 "QSFP not responding, waiting and retrying %d\n",
9818 (int)ppd->qsfp_retry_count);
9819 ppd->qsfp_retry_count++;
9820 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9821 msecs_to_jiffies(QSFP_RETRY_WAIT));
9822 return;
9823 }
9824 ppd->qsfp_retry_count = 0;
9825
9826 start_link(ppd);
9827 }
9828
9829
9830
9831
9832 void handle_start_link(struct work_struct *work)
9833 {
9834 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9835 start_link_work.work);
9836 try_start_link(ppd);
9837 }
9838
9839 int bringup_serdes(struct hfi1_pportdata *ppd)
9840 {
9841 struct hfi1_devdata *dd = ppd->dd;
9842 u64 guid;
9843 int ret;
9844
9845 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9846 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9847
9848 guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9849 if (!guid) {
9850 if (dd->base_guid)
9851 guid = dd->base_guid + ppd->port - 1;
9852 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9853 }
9854
9855
9856 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9857
9858
9859 init_lcb(dd);
9860
9861 if (loopback) {
9862 ret = init_loopback(dd);
9863 if (ret < 0)
9864 return ret;
9865 }
9866
9867 get_port_type(ppd);
9868 if (ppd->port_type == PORT_TYPE_QSFP) {
9869 set_qsfp_int_n(ppd, 0);
9870 wait_for_qsfp_init(ppd);
9871 set_qsfp_int_n(ppd, 1);
9872 }
9873
9874 try_start_link(ppd);
9875 return 0;
9876 }
9877
9878 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9879 {
9880 struct hfi1_devdata *dd = ppd->dd;
9881
9882
9883
9884
9885
9886
9887
9888
9889 ppd->driver_link_ready = 0;
9890 ppd->link_enabled = 0;
9891
9892 ppd->qsfp_retry_count = MAX_QSFP_RETRIES;
9893 flush_delayed_work(&ppd->start_link_work);
9894 cancel_delayed_work_sync(&ppd->start_link_work);
9895
9896 ppd->offline_disabled_reason =
9897 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
9898 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
9899 OPA_LINKDOWN_REASON_REBOOT);
9900 set_link_state(ppd, HLS_DN_OFFLINE);
9901
9902
9903 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9904 cancel_work_sync(&ppd->freeze_work);
9905 }
9906
9907 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9908 {
9909 struct hfi1_pportdata *ppd;
9910 int i;
9911
9912 ppd = (struct hfi1_pportdata *)(dd + 1);
9913 for (i = 0; i < dd->num_pports; i++, ppd++) {
9914 ppd->ibport_data.rvp.rc_acks = NULL;
9915 ppd->ibport_data.rvp.rc_qacks = NULL;
9916 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9917 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9918 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9919 if (!ppd->ibport_data.rvp.rc_acks ||
9920 !ppd->ibport_data.rvp.rc_delayed_comp ||
9921 !ppd->ibport_data.rvp.rc_qacks)
9922 return -ENOMEM;
9923 }
9924
9925 return 0;
9926 }
9927
9928
9929
9930
9931 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9932 u32 type, unsigned long pa, u16 order)
9933 {
9934 u64 reg;
9935
9936 if (!(dd->flags & HFI1_PRESENT))
9937 goto done;
9938
9939 if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9940 pa = 0;
9941 order = 0;
9942 } else if (type > PT_INVALID) {
9943 dd_dev_err(dd,
9944 "unexpected receive array type %u for index %u, not handled\n",
9945 type, index);
9946 goto done;
9947 }
9948 trace_hfi1_put_tid(dd, index, type, pa, order);
9949
9950 #define RT_ADDR_SHIFT 12
9951 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9952 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9953 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9954 << RCV_ARRAY_RT_ADDR_SHIFT;
9955 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9956 writeq(reg, dd->rcvarray_wc + (index * 8));
9957
9958 if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9959
9960
9961
9962
9963
9964 flush_wc();
9965 done:
9966 return;
9967 }
9968
9969 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9970 {
9971 struct hfi1_devdata *dd = rcd->dd;
9972 u32 i;
9973
9974
9975 for (i = rcd->eager_base; i < rcd->eager_base +
9976 rcd->egrbufs.alloced; i++)
9977 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9978
9979 for (i = rcd->expected_base;
9980 i < rcd->expected_base + rcd->expected_count; i++)
9981 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9982 }
9983
9984 static const char * const ib_cfg_name_strings[] = {
9985 "HFI1_IB_CFG_LIDLMC",
9986 "HFI1_IB_CFG_LWID_DG_ENB",
9987 "HFI1_IB_CFG_LWID_ENB",
9988 "HFI1_IB_CFG_LWID",
9989 "HFI1_IB_CFG_SPD_ENB",
9990 "HFI1_IB_CFG_SPD",
9991 "HFI1_IB_CFG_RXPOL_ENB",
9992 "HFI1_IB_CFG_LREV_ENB",
9993 "HFI1_IB_CFG_LINKLATENCY",
9994 "HFI1_IB_CFG_HRTBT",
9995 "HFI1_IB_CFG_OP_VLS",
9996 "HFI1_IB_CFG_VL_HIGH_CAP",
9997 "HFI1_IB_CFG_VL_LOW_CAP",
9998 "HFI1_IB_CFG_OVERRUN_THRESH",
9999 "HFI1_IB_CFG_PHYERR_THRESH",
10000 "HFI1_IB_CFG_LINKDEFAULT",
10001 "HFI1_IB_CFG_PKEYS",
10002 "HFI1_IB_CFG_MTU",
10003 "HFI1_IB_CFG_LSTATE",
10004 "HFI1_IB_CFG_VL_HIGH_LIMIT",
10005 "HFI1_IB_CFG_PMA_TICKS",
10006 "HFI1_IB_CFG_PORT"
10007 };
10008
10009 static const char *ib_cfg_name(int which)
10010 {
10011 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
10012 return "invalid";
10013 return ib_cfg_name_strings[which];
10014 }
10015
10016 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
10017 {
10018 struct hfi1_devdata *dd = ppd->dd;
10019 int val = 0;
10020
10021 switch (which) {
10022 case HFI1_IB_CFG_LWID_ENB:
10023 val = ppd->link_width_enabled;
10024 break;
10025 case HFI1_IB_CFG_LWID:
10026 val = ppd->link_width_active;
10027 break;
10028 case HFI1_IB_CFG_SPD_ENB:
10029 val = ppd->link_speed_enabled;
10030 break;
10031 case HFI1_IB_CFG_SPD:
10032 val = ppd->link_speed_active;
10033 break;
10034
10035 case HFI1_IB_CFG_RXPOL_ENB:
10036 case HFI1_IB_CFG_LREV_ENB:
10037 case HFI1_IB_CFG_LINKLATENCY:
10038 goto unimplemented;
10039
10040 case HFI1_IB_CFG_OP_VLS:
10041 val = ppd->actual_vls_operational;
10042 break;
10043 case HFI1_IB_CFG_VL_HIGH_CAP:
10044 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
10045 break;
10046 case HFI1_IB_CFG_VL_LOW_CAP:
10047 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
10048 break;
10049 case HFI1_IB_CFG_OVERRUN_THRESH:
10050 val = ppd->overrun_threshold;
10051 break;
10052 case HFI1_IB_CFG_PHYERR_THRESH:
10053 val = ppd->phy_error_threshold;
10054 break;
10055 case HFI1_IB_CFG_LINKDEFAULT:
10056 val = HLS_DEFAULT;
10057 break;
10058
10059 case HFI1_IB_CFG_HRTBT:
10060 case HFI1_IB_CFG_PMA_TICKS:
10061 default:
10062 unimplemented:
10063 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10064 dd_dev_info(
10065 dd,
10066 "%s: which %s: not implemented\n",
10067 __func__,
10068 ib_cfg_name(which));
10069 break;
10070 }
10071
10072 return val;
10073 }
10074
10075
10076
10077
10078 #define MAX_MAD_PACKET 2048
10079
10080
10081
10082
10083
10084
10085
10086
10087
10088
10089 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10090 {
10091
10092
10093
10094
10095
10096
10097
10098
10099
10100
10101 return (get_hdrqentsize(dd->rcd[0]) - 2 + 1) << 2;
10102 }
10103
10104
10105
10106
10107
10108
10109
10110
10111
10112
10113
10114
10115 static void set_send_length(struct hfi1_pportdata *ppd)
10116 {
10117 struct hfi1_devdata *dd = ppd->dd;
10118 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10119 u32 maxvlmtu = dd->vld[15].mtu;
10120 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10121 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10122 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10123 int i, j;
10124 u32 thres;
10125
10126 for (i = 0; i < ppd->vls_supported; i++) {
10127 if (dd->vld[i].mtu > maxvlmtu)
10128 maxvlmtu = dd->vld[i].mtu;
10129 if (i <= 3)
10130 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10131 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10132 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10133 else
10134 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10135 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10136 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10137 }
10138 write_csr(dd, SEND_LEN_CHECK0, len1);
10139 write_csr(dd, SEND_LEN_CHECK1, len2);
10140
10141
10142 for (i = 0; i < ppd->vls_supported; i++) {
10143 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10144 sc_mtu_to_threshold(dd->vld[i].sc,
10145 dd->vld[i].mtu,
10146 get_hdrqentsize(dd->rcd[0])));
10147 for (j = 0; j < INIT_SC_PER_VL; j++)
10148 sc_set_cr_threshold(
10149 pio_select_send_context_vl(dd, j, i),
10150 thres);
10151 }
10152 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10153 sc_mtu_to_threshold(dd->vld[15].sc,
10154 dd->vld[15].mtu,
10155 dd->rcd[0]->rcvhdrqentsize));
10156 sc_set_cr_threshold(dd->vld[15].sc, thres);
10157
10158
10159 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10160 (ilog2(maxvlmtu >> 8) + 1);
10161 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10162 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10163 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10164 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10165 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10166 }
10167
10168 static void set_lidlmc(struct hfi1_pportdata *ppd)
10169 {
10170 int i;
10171 u64 sreg = 0;
10172 struct hfi1_devdata *dd = ppd->dd;
10173 u32 mask = ~((1U << ppd->lmc) - 1);
10174 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10175 u32 lid;
10176
10177
10178
10179
10180
10181 lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10182 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10183 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10184 c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10185 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10186 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10187 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10188 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10189
10190
10191
10192
10193 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10194 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10195 (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10196 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10197
10198 for (i = 0; i < chip_send_contexts(dd); i++) {
10199 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10200 i, (u32)sreg);
10201 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10202 }
10203
10204
10205 sdma_update_lmc(dd, mask, lid);
10206 }
10207
10208 static const char *state_completed_string(u32 completed)
10209 {
10210 static const char * const state_completed[] = {
10211 "EstablishComm",
10212 "OptimizeEQ",
10213 "VerifyCap"
10214 };
10215
10216 if (completed < ARRAY_SIZE(state_completed))
10217 return state_completed[completed];
10218
10219 return "unknown";
10220 }
10221
10222 static const char all_lanes_dead_timeout_expired[] =
10223 "All lanes were inactive – was the interconnect media removed?";
10224 static const char tx_out_of_policy[] =
10225 "Passing lanes on local port do not meet the local link width policy";
10226 static const char no_state_complete[] =
10227 "State timeout occurred before link partner completed the state";
10228 static const char * const state_complete_reasons[] = {
10229 [0x00] = "Reason unknown",
10230 [0x01] = "Link was halted by driver, refer to LinkDownReason",
10231 [0x02] = "Link partner reported failure",
10232 [0x10] = "Unable to achieve frame sync on any lane",
10233 [0x11] =
10234 "Unable to find a common bit rate with the link partner",
10235 [0x12] =
10236 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10237 [0x13] =
10238 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10239 [0x14] = no_state_complete,
10240 [0x15] =
10241 "State timeout occurred before link partner identified equalization presets",
10242 [0x16] =
10243 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10244 [0x17] = tx_out_of_policy,
10245 [0x20] = all_lanes_dead_timeout_expired,
10246 [0x21] =
10247 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10248 [0x22] = no_state_complete,
10249 [0x23] =
10250 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10251 [0x24] = tx_out_of_policy,
10252 [0x30] = all_lanes_dead_timeout_expired,
10253 [0x31] =
10254 "State timeout occurred waiting for host to process received frames",
10255 [0x32] = no_state_complete,
10256 [0x33] =
10257 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10258 [0x34] = tx_out_of_policy,
10259 [0x35] = "Negotiated link width is mutually exclusive",
10260 [0x36] =
10261 "Timed out before receiving verifycap frames in VerifyCap.Exchange",
10262 [0x37] = "Unable to resolve secure data exchange",
10263 };
10264
10265 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10266 u32 code)
10267 {
10268 const char *str = NULL;
10269
10270 if (code < ARRAY_SIZE(state_complete_reasons))
10271 str = state_complete_reasons[code];
10272
10273 if (str)
10274 return str;
10275 return "Reserved";
10276 }
10277
10278
10279 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10280 const char *prefix)
10281 {
10282 struct hfi1_devdata *dd = ppd->dd;
10283 u32 success;
10284 u32 state;
10285 u32 reason;
10286 u32 lanes;
10287
10288
10289
10290
10291
10292
10293
10294
10295
10296 success = frame & 0x1;
10297 state = (frame >> 1) & 0x7;
10298 reason = (frame >> 8) & 0xff;
10299 lanes = (frame >> 16) & 0xffff;
10300
10301 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10302 prefix, frame);
10303 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10304 state_completed_string(state), state);
10305 dd_dev_err(dd, " state successfully completed: %s\n",
10306 success ? "yes" : "no");
10307 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10308 reason, state_complete_reason_code_string(ppd, reason));
10309 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10310 }
10311
10312
10313
10314
10315
10316
10317 static void check_lni_states(struct hfi1_pportdata *ppd)
10318 {
10319 u32 last_local_state;
10320 u32 last_remote_state;
10321
10322 read_last_local_state(ppd->dd, &last_local_state);
10323 read_last_remote_state(ppd->dd, &last_remote_state);
10324
10325
10326
10327
10328
10329
10330 if (last_local_state == 0 && last_remote_state == 0)
10331 return;
10332
10333 decode_state_complete(ppd, last_local_state, "transmitted");
10334 decode_state_complete(ppd, last_remote_state, "received");
10335 }
10336
10337
10338 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10339 {
10340 u64 reg;
10341 unsigned long timeout;
10342
10343
10344 timeout = jiffies + msecs_to_jiffies(wait_ms);
10345 while (1) {
10346 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10347 if (reg)
10348 break;
10349 if (time_after(jiffies, timeout)) {
10350 dd_dev_err(dd,
10351 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10352 return -ETIMEDOUT;
10353 }
10354 udelay(2);
10355 }
10356 return 0;
10357 }
10358
10359
10360 static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10361 {
10362 struct hfi1_devdata *dd = ppd->dd;
10363
10364
10365
10366
10367 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10368 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10369 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10370
10371 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10372 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10373 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10374 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10375
10376 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10377 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10378 udelay(3);
10379 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10380 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10381
10382 wait_link_transfer_active(dd, 100);
10383
10384
10385
10386
10387 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10388 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10389 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10390
10391 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10392 }
10393
10394
10395
10396
10397
10398
10399
10400
10401
10402 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10403 {
10404 struct hfi1_devdata *dd = ppd->dd;
10405 u32 previous_state;
10406 int offline_state_ret;
10407 int ret;
10408
10409 update_lcb_cache(dd);
10410
10411 previous_state = ppd->host_link_state;
10412 ppd->host_link_state = HLS_GOING_OFFLINE;
10413
10414
10415 ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10416
10417 if (ret != HCMD_SUCCESS) {
10418 dd_dev_err(dd,
10419 "Failed to transition to Offline link state, return %d\n",
10420 ret);
10421 return -EINVAL;
10422 }
10423 if (ppd->offline_disabled_reason ==
10424 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10425 ppd->offline_disabled_reason =
10426 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10427
10428 offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10429 if (offline_state_ret < 0)
10430 return offline_state_ret;
10431
10432
10433 if (ppd->port_type == PORT_TYPE_QSFP &&
10434 ppd->qsfp_info.limiting_active &&
10435 qsfp_mod_present(ppd)) {
10436 int ret;
10437
10438 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10439 if (ret == 0) {
10440 set_qsfp_tx(ppd, 0);
10441 release_chip_resource(dd, qsfp_resource(dd));
10442 } else {
10443
10444 dd_dev_err(dd,
10445 "Unable to acquire lock to turn off QSFP TX\n");
10446 }
10447 }
10448
10449
10450
10451
10452
10453 if (offline_state_ret != PLS_OFFLINE_QUIET) {
10454 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10455 if (ret < 0)
10456 return ret;
10457 }
10458
10459
10460
10461
10462
10463 set_host_lcb_access(dd);
10464 write_csr(dd, DC_LCB_ERR_EN, ~0ull);
10465
10466
10467 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10468 if (ret)
10469 force_logical_link_state_down(ppd);
10470
10471 ppd->host_link_state = HLS_LINK_COOLDOWN;
10472 update_statusp(ppd, IB_PORT_DOWN);
10473
10474
10475
10476
10477
10478
10479
10480
10481
10482
10483 ret = wait_fm_ready(dd, 7000);
10484 if (ret) {
10485 dd_dev_err(dd,
10486 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10487
10488 ppd->host_link_state = HLS_DN_OFFLINE;
10489 return ret;
10490 }
10491
10492
10493
10494
10495
10496
10497
10498 ppd->host_link_state = HLS_DN_OFFLINE;
10499 if (previous_state & HLS_UP) {
10500
10501 handle_linkup_change(dd, 0);
10502 } else if (previous_state
10503 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10504
10505 check_lni_states(ppd);
10506
10507
10508 ppd->qsfp_info.reset_needed = 0;
10509 }
10510
10511
10512 ppd->link_width_active = 0;
10513 ppd->link_width_downgrade_tx_active = 0;
10514 ppd->link_width_downgrade_rx_active = 0;
10515 ppd->current_egress_rate = 0;
10516 return 0;
10517 }
10518
10519
10520 static const char *link_state_name(u32 state)
10521 {
10522 const char *name;
10523 int n = ilog2(state);
10524 static const char * const names[] = {
10525 [__HLS_UP_INIT_BP] = "INIT",
10526 [__HLS_UP_ARMED_BP] = "ARMED",
10527 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10528 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10529 [__HLS_DN_POLL_BP] = "POLL",
10530 [__HLS_DN_DISABLE_BP] = "DISABLE",
10531 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10532 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10533 [__HLS_GOING_UP_BP] = "GOING_UP",
10534 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10535 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10536 };
10537
10538 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10539 return name ? name : "unknown";
10540 }
10541
10542
10543 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10544 {
10545 if (state == HLS_UP_INIT) {
10546 switch (ppd->linkinit_reason) {
10547 case OPA_LINKINIT_REASON_LINKUP:
10548 return "(LINKUP)";
10549 case OPA_LINKINIT_REASON_FLAPPING:
10550 return "(FLAPPING)";
10551 case OPA_LINKINIT_OUTSIDE_POLICY:
10552 return "(OUTSIDE_POLICY)";
10553 case OPA_LINKINIT_QUARANTINED:
10554 return "(QUARANTINED)";
10555 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10556 return "(INSUFIC_CAPABILITY)";
10557 default:
10558 break;
10559 }
10560 }
10561 return "";
10562 }
10563
10564
10565
10566
10567
10568
10569 u32 driver_pstate(struct hfi1_pportdata *ppd)
10570 {
10571 switch (ppd->host_link_state) {
10572 case HLS_UP_INIT:
10573 case HLS_UP_ARMED:
10574 case HLS_UP_ACTIVE:
10575 return IB_PORTPHYSSTATE_LINKUP;
10576 case HLS_DN_POLL:
10577 return IB_PORTPHYSSTATE_POLLING;
10578 case HLS_DN_DISABLE:
10579 return IB_PORTPHYSSTATE_DISABLED;
10580 case HLS_DN_OFFLINE:
10581 return OPA_PORTPHYSSTATE_OFFLINE;
10582 case HLS_VERIFY_CAP:
10583 return IB_PORTPHYSSTATE_TRAINING;
10584 case HLS_GOING_UP:
10585 return IB_PORTPHYSSTATE_TRAINING;
10586 case HLS_GOING_OFFLINE:
10587 return OPA_PORTPHYSSTATE_OFFLINE;
10588 case HLS_LINK_COOLDOWN:
10589 return OPA_PORTPHYSSTATE_OFFLINE;
10590 case HLS_DN_DOWNDEF:
10591 default:
10592 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10593 ppd->host_link_state);
10594 return -1;
10595 }
10596 }
10597
10598
10599
10600
10601
10602
10603 u32 driver_lstate(struct hfi1_pportdata *ppd)
10604 {
10605 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10606 return IB_PORT_DOWN;
10607
10608 switch (ppd->host_link_state & HLS_UP) {
10609 case HLS_UP_INIT:
10610 return IB_PORT_INIT;
10611 case HLS_UP_ARMED:
10612 return IB_PORT_ARMED;
10613 case HLS_UP_ACTIVE:
10614 return IB_PORT_ACTIVE;
10615 default:
10616 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10617 ppd->host_link_state);
10618 return -1;
10619 }
10620 }
10621
10622 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10623 u8 neigh_reason, u8 rem_reason)
10624 {
10625 if (ppd->local_link_down_reason.latest == 0 &&
10626 ppd->neigh_link_down_reason.latest == 0) {
10627 ppd->local_link_down_reason.latest = lcl_reason;
10628 ppd->neigh_link_down_reason.latest = neigh_reason;
10629 ppd->remote_link_down_reason = rem_reason;
10630 }
10631 }
10632
10633
10634
10635
10636
10637
10638
10639
10640 static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10641 {
10642 int i;
10643 u64 reg;
10644
10645 if (!ppd->actual_vls_operational)
10646 return false;
10647
10648 for (i = 0; i < ppd->vls_supported; i++) {
10649 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
10650 if ((reg && !ppd->dd->vld[i].mtu) ||
10651 (!reg && ppd->dd->vld[i].mtu))
10652 return false;
10653 }
10654
10655 return true;
10656 }
10657
10658
10659
10660
10661
10662
10663
10664
10665
10666 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10667 {
10668 struct hfi1_devdata *dd = ppd->dd;
10669 struct ib_event event = {.device = NULL};
10670 int ret1, ret = 0;
10671 int orig_new_state, poll_bounce;
10672
10673 mutex_lock(&ppd->hls_lock);
10674
10675 orig_new_state = state;
10676 if (state == HLS_DN_DOWNDEF)
10677 state = HLS_DEFAULT;
10678
10679
10680 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10681 state == HLS_DN_POLL;
10682
10683 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10684 link_state_name(ppd->host_link_state),
10685 link_state_name(orig_new_state),
10686 poll_bounce ? "(bounce) " : "",
10687 link_state_reason_name(ppd, state));
10688
10689
10690
10691
10692
10693
10694 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10695 ppd->is_sm_config_started = 0;
10696
10697
10698
10699
10700
10701 if (ppd->host_link_state == state && !poll_bounce)
10702 goto done;
10703
10704 switch (state) {
10705 case HLS_UP_INIT:
10706 if (ppd->host_link_state == HLS_DN_POLL &&
10707 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10708
10709
10710
10711
10712
10713
10714
10715
10716 } else if (ppd->host_link_state != HLS_GOING_UP) {
10717 goto unexpected;
10718 }
10719
10720
10721
10722
10723
10724
10725 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10726 if (ret) {
10727 dd_dev_err(dd,
10728 "%s: physical state did not change to LINK-UP\n",
10729 __func__);
10730 break;
10731 }
10732
10733 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10734 if (ret) {
10735 dd_dev_err(dd,
10736 "%s: logical state did not change to INIT\n",
10737 __func__);
10738 break;
10739 }
10740
10741
10742 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10743 ppd->linkinit_reason =
10744 OPA_LINKINIT_REASON_LINKUP;
10745
10746
10747 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10748
10749 handle_linkup_change(dd, 1);
10750 pio_kernel_linkup(dd);
10751
10752
10753
10754
10755
10756
10757 update_xmit_counters(ppd, ppd->link_width_active);
10758
10759 ppd->host_link_state = HLS_UP_INIT;
10760 update_statusp(ppd, IB_PORT_INIT);
10761 break;
10762 case HLS_UP_ARMED:
10763 if (ppd->host_link_state != HLS_UP_INIT)
10764 goto unexpected;
10765
10766 if (!data_vls_operational(ppd)) {
10767 dd_dev_err(dd,
10768 "%s: Invalid data VL credits or mtu\n",
10769 __func__);
10770 ret = -EINVAL;
10771 break;
10772 }
10773
10774 set_logical_state(dd, LSTATE_ARMED);
10775 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10776 if (ret) {
10777 dd_dev_err(dd,
10778 "%s: logical state did not change to ARMED\n",
10779 __func__);
10780 break;
10781 }
10782 ppd->host_link_state = HLS_UP_ARMED;
10783 update_statusp(ppd, IB_PORT_ARMED);
10784
10785
10786
10787
10788
10789 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10790 ppd->neighbor_normal = 1;
10791 break;
10792 case HLS_UP_ACTIVE:
10793 if (ppd->host_link_state != HLS_UP_ARMED)
10794 goto unexpected;
10795
10796 set_logical_state(dd, LSTATE_ACTIVE);
10797 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10798 if (ret) {
10799 dd_dev_err(dd,
10800 "%s: logical state did not change to ACTIVE\n",
10801 __func__);
10802 } else {
10803
10804 sdma_all_running(dd);
10805 ppd->host_link_state = HLS_UP_ACTIVE;
10806 update_statusp(ppd, IB_PORT_ACTIVE);
10807
10808
10809 event.device = &dd->verbs_dev.rdi.ibdev;
10810 event.element.port_num = ppd->port;
10811 event.event = IB_EVENT_PORT_ACTIVE;
10812 }
10813 break;
10814 case HLS_DN_POLL:
10815 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10816 ppd->host_link_state == HLS_DN_OFFLINE) &&
10817 dd->dc_shutdown)
10818 dc_start(dd);
10819
10820 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10821
10822 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10823 u8 tmp = ppd->link_enabled;
10824
10825 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10826 if (ret) {
10827 ppd->link_enabled = tmp;
10828 break;
10829 }
10830 ppd->remote_link_down_reason = 0;
10831
10832 if (ppd->driver_link_ready)
10833 ppd->link_enabled = 1;
10834 }
10835
10836 set_all_slowpath(ppd->dd);
10837 ret = set_local_link_attributes(ppd);
10838 if (ret)
10839 break;
10840
10841 ppd->port_error_action = 0;
10842
10843 if (quick_linkup) {
10844
10845 ret = do_quick_linkup(dd);
10846 } else {
10847 ret1 = set_physical_link_state(dd, PLS_POLLING);
10848 if (!ret1)
10849 ret1 = wait_phys_link_out_of_offline(ppd,
10850 3000);
10851 if (ret1 != HCMD_SUCCESS) {
10852 dd_dev_err(dd,
10853 "Failed to transition to Polling link state, return 0x%x\n",
10854 ret1);
10855 ret = -EINVAL;
10856 }
10857 }
10858
10859
10860
10861
10862
10863
10864
10865 ppd->host_link_state = HLS_DN_POLL;
10866 ppd->offline_disabled_reason =
10867 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10868
10869
10870
10871
10872 if (ret)
10873 goto_offline(ppd, 0);
10874 else
10875 log_physical_state(ppd, PLS_POLLING);
10876 break;
10877 case HLS_DN_DISABLE:
10878
10879 ppd->link_enabled = 0;
10880
10881
10882
10883
10884 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10885 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10886 if (ret)
10887 break;
10888 ppd->remote_link_down_reason = 0;
10889 }
10890
10891 if (!dd->dc_shutdown) {
10892 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10893 if (ret1 != HCMD_SUCCESS) {
10894 dd_dev_err(dd,
10895 "Failed to transition to Disabled link state, return 0x%x\n",
10896 ret1);
10897 ret = -EINVAL;
10898 break;
10899 }
10900 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10901 if (ret) {
10902 dd_dev_err(dd,
10903 "%s: physical state did not change to DISABLED\n",
10904 __func__);
10905 break;
10906 }
10907 dc_shutdown(dd);
10908 }
10909 ppd->host_link_state = HLS_DN_DISABLE;
10910 break;
10911 case HLS_DN_OFFLINE:
10912 if (ppd->host_link_state == HLS_DN_DISABLE)
10913 dc_start(dd);
10914
10915
10916 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10917 if (!ret)
10918 ppd->remote_link_down_reason = 0;
10919 break;
10920 case HLS_VERIFY_CAP:
10921 if (ppd->host_link_state != HLS_DN_POLL)
10922 goto unexpected;
10923 ppd->host_link_state = HLS_VERIFY_CAP;
10924 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10925 break;
10926 case HLS_GOING_UP:
10927 if (ppd->host_link_state != HLS_VERIFY_CAP)
10928 goto unexpected;
10929
10930 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10931 if (ret1 != HCMD_SUCCESS) {
10932 dd_dev_err(dd,
10933 "Failed to transition to link up state, return 0x%x\n",
10934 ret1);
10935 ret = -EINVAL;
10936 break;
10937 }
10938 ppd->host_link_state = HLS_GOING_UP;
10939 break;
10940
10941 case HLS_GOING_OFFLINE:
10942 case HLS_LINK_COOLDOWN:
10943 default:
10944 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10945 __func__, state);
10946 ret = -EINVAL;
10947 break;
10948 }
10949
10950 goto done;
10951
10952 unexpected:
10953 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10954 __func__, link_state_name(ppd->host_link_state),
10955 link_state_name(state));
10956 ret = -EINVAL;
10957
10958 done:
10959 mutex_unlock(&ppd->hls_lock);
10960
10961 if (event.device)
10962 ib_dispatch_event(&event);
10963
10964 return ret;
10965 }
10966
10967 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10968 {
10969 u64 reg;
10970 int ret = 0;
10971
10972 switch (which) {
10973 case HFI1_IB_CFG_LIDLMC:
10974 set_lidlmc(ppd);
10975 break;
10976 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10977
10978
10979
10980
10981 val *= 4096 / 64;
10982 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10983 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10984 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10985 break;
10986 case HFI1_IB_CFG_LINKDEFAULT:
10987
10988 if (val != HLS_DN_POLL)
10989 ret = -EINVAL;
10990 break;
10991 case HFI1_IB_CFG_OP_VLS:
10992 if (ppd->vls_operational != val) {
10993 ppd->vls_operational = val;
10994 if (!ppd->port)
10995 ret = -EINVAL;
10996 }
10997 break;
10998
10999
11000
11001
11002
11003
11004
11005
11006 case HFI1_IB_CFG_LWID_ENB:
11007 ppd->link_width_enabled = val & ppd->link_width_supported;
11008 break;
11009 case HFI1_IB_CFG_LWID_DG_ENB:
11010 ppd->link_width_downgrade_enabled =
11011 val & ppd->link_width_downgrade_supported;
11012 break;
11013 case HFI1_IB_CFG_SPD_ENB:
11014 ppd->link_speed_enabled = val & ppd->link_speed_supported;
11015 break;
11016 case HFI1_IB_CFG_OVERRUN_THRESH:
11017
11018
11019
11020
11021 ppd->overrun_threshold = val;
11022 break;
11023 case HFI1_IB_CFG_PHYERR_THRESH:
11024
11025
11026
11027
11028 ppd->phy_error_threshold = val;
11029 break;
11030
11031 case HFI1_IB_CFG_MTU:
11032 set_send_length(ppd);
11033 break;
11034
11035 case HFI1_IB_CFG_PKEYS:
11036 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
11037 set_partition_keys(ppd);
11038 break;
11039
11040 default:
11041 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
11042 dd_dev_info(ppd->dd,
11043 "%s: which %s, val 0x%x: not implemented\n",
11044 __func__, ib_cfg_name(which), val);
11045 break;
11046 }
11047 return ret;
11048 }
11049
11050
11051 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
11052 {
11053 int i;
11054
11055 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11056 VL_ARB_LOW_PRIO_TABLE_SIZE);
11057 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11058 VL_ARB_HIGH_PRIO_TABLE_SIZE);
11059
11060
11061
11062
11063
11064
11065
11066
11067
11068
11069
11070 for (i = 0; i < MAX_PRIO_TABLE; i++)
11071 spin_lock_init(&ppd->vl_arb_cache[i].lock);
11072 }
11073
11074
11075
11076
11077
11078
11079
11080 static inline struct vl_arb_cache *
11081 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11082 {
11083 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11084 return NULL;
11085 spin_lock(&ppd->vl_arb_cache[idx].lock);
11086 return &ppd->vl_arb_cache[idx];
11087 }
11088
11089 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11090 {
11091 spin_unlock(&ppd->vl_arb_cache[idx].lock);
11092 }
11093
11094 static void vl_arb_get_cache(struct vl_arb_cache *cache,
11095 struct ib_vl_weight_elem *vl)
11096 {
11097 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11098 }
11099
11100 static void vl_arb_set_cache(struct vl_arb_cache *cache,
11101 struct ib_vl_weight_elem *vl)
11102 {
11103 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11104 }
11105
11106 static int vl_arb_match_cache(struct vl_arb_cache *cache,
11107 struct ib_vl_weight_elem *vl)
11108 {
11109 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11110 }
11111
11112
11113
11114 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11115 u32 size, struct ib_vl_weight_elem *vl)
11116 {
11117 struct hfi1_devdata *dd = ppd->dd;
11118 u64 reg;
11119 unsigned int i, is_up = 0;
11120 int drain, ret = 0;
11121
11122 mutex_lock(&ppd->hls_lock);
11123
11124 if (ppd->host_link_state & HLS_UP)
11125 is_up = 1;
11126
11127 drain = !is_ax(dd) && is_up;
11128
11129 if (drain)
11130
11131
11132
11133
11134
11135
11136 ret = stop_drain_data_vls(dd);
11137
11138 if (ret) {
11139 dd_dev_err(
11140 dd,
11141 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11142 __func__);
11143 goto err;
11144 }
11145
11146 for (i = 0; i < size; i++, vl++) {
11147
11148
11149
11150
11151 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11152 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11153 | (((u64)vl->weight
11154 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11155 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11156 write_csr(dd, target + (i * 8), reg);
11157 }
11158 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11159
11160 if (drain)
11161 open_fill_data_vls(dd);
11162
11163 err:
11164 mutex_unlock(&ppd->hls_lock);
11165
11166 return ret;
11167 }
11168
11169
11170
11171
11172 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11173 struct vl_limit *vll)
11174 {
11175 u64 reg = read_csr(dd, csr);
11176
11177 vll->dedicated = cpu_to_be16(
11178 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11179 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11180 vll->shared = cpu_to_be16(
11181 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11182 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11183 }
11184
11185
11186
11187
11188 static int get_buffer_control(struct hfi1_devdata *dd,
11189 struct buffer_control *bc, u16 *overall_limit)
11190 {
11191 u64 reg;
11192 int i;
11193
11194
11195 memset(bc, 0, sizeof(*bc));
11196
11197
11198 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11199 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11200
11201
11202 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11203
11204 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11205 bc->overall_shared_limit = cpu_to_be16(
11206 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11207 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11208 if (overall_limit)
11209 *overall_limit = (reg
11210 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11211 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11212 return sizeof(struct buffer_control);
11213 }
11214
11215 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11216 {
11217 u64 reg;
11218 int i;
11219
11220
11221 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11222 for (i = 0; i < sizeof(u64); i++) {
11223 u8 byte = *(((u8 *)®) + i);
11224
11225 dp->vlnt[2 * i] = byte & 0xf;
11226 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11227 }
11228
11229 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11230 for (i = 0; i < sizeof(u64); i++) {
11231 u8 byte = *(((u8 *)®) + i);
11232
11233 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11234 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11235 }
11236 return sizeof(struct sc2vlnt);
11237 }
11238
11239 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11240 struct ib_vl_weight_elem *vl)
11241 {
11242 unsigned int i;
11243
11244 for (i = 0; i < nelems; i++, vl++) {
11245 vl->vl = 0xf;
11246 vl->weight = 0;
11247 }
11248 }
11249
11250 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11251 {
11252 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11253 DC_SC_VL_VAL(15_0,
11254 0, dp->vlnt[0] & 0xf,
11255 1, dp->vlnt[1] & 0xf,
11256 2, dp->vlnt[2] & 0xf,
11257 3, dp->vlnt[3] & 0xf,
11258 4, dp->vlnt[4] & 0xf,
11259 5, dp->vlnt[5] & 0xf,
11260 6, dp->vlnt[6] & 0xf,
11261 7, dp->vlnt[7] & 0xf,
11262 8, dp->vlnt[8] & 0xf,
11263 9, dp->vlnt[9] & 0xf,
11264 10, dp->vlnt[10] & 0xf,
11265 11, dp->vlnt[11] & 0xf,
11266 12, dp->vlnt[12] & 0xf,
11267 13, dp->vlnt[13] & 0xf,
11268 14, dp->vlnt[14] & 0xf,
11269 15, dp->vlnt[15] & 0xf));
11270 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11271 DC_SC_VL_VAL(31_16,
11272 16, dp->vlnt[16] & 0xf,
11273 17, dp->vlnt[17] & 0xf,
11274 18, dp->vlnt[18] & 0xf,
11275 19, dp->vlnt[19] & 0xf,
11276 20, dp->vlnt[20] & 0xf,
11277 21, dp->vlnt[21] & 0xf,
11278 22, dp->vlnt[22] & 0xf,
11279 23, dp->vlnt[23] & 0xf,
11280 24, dp->vlnt[24] & 0xf,
11281 25, dp->vlnt[25] & 0xf,
11282 26, dp->vlnt[26] & 0xf,
11283 27, dp->vlnt[27] & 0xf,
11284 28, dp->vlnt[28] & 0xf,
11285 29, dp->vlnt[29] & 0xf,
11286 30, dp->vlnt[30] & 0xf,
11287 31, dp->vlnt[31] & 0xf));
11288 }
11289
11290 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11291 u16 limit)
11292 {
11293 if (limit != 0)
11294 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11295 what, (int)limit, idx);
11296 }
11297
11298
11299 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11300 {
11301 u64 reg;
11302
11303 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11304 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11305 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11306 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11307 }
11308
11309
11310 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11311 {
11312 u64 reg;
11313
11314 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11315 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11316 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11317 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11318 }
11319
11320
11321 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11322 {
11323 u64 reg;
11324 u32 addr;
11325
11326 if (vl < TXE_NUM_DATA_VL)
11327 addr = SEND_CM_CREDIT_VL + (8 * vl);
11328 else
11329 addr = SEND_CM_CREDIT_VL15;
11330
11331 reg = read_csr(dd, addr);
11332 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11333 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11334 write_csr(dd, addr, reg);
11335 }
11336
11337
11338 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11339 {
11340 u64 reg;
11341 u32 addr;
11342
11343 if (vl < TXE_NUM_DATA_VL)
11344 addr = SEND_CM_CREDIT_VL + (8 * vl);
11345 else
11346 addr = SEND_CM_CREDIT_VL15;
11347
11348 reg = read_csr(dd, addr);
11349 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11350 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11351 write_csr(dd, addr, reg);
11352 }
11353
11354
11355 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11356 const char *which)
11357 {
11358 unsigned long timeout;
11359 u64 reg;
11360
11361 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11362 while (1) {
11363 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11364
11365 if (reg == 0)
11366 return;
11367 if (time_after(jiffies, timeout))
11368 break;
11369 udelay(1);
11370 }
11371
11372 dd_dev_err(dd,
11373 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11374 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11375
11376
11377
11378
11379 dd_dev_err(dd,
11380 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
11381 }
11382
11383
11384
11385
11386
11387
11388
11389
11390
11391
11392
11393
11394
11395
11396
11397
11398
11399
11400
11401
11402
11403
11404
11405
11406
11407 int set_buffer_control(struct hfi1_pportdata *ppd,
11408 struct buffer_control *new_bc)
11409 {
11410 struct hfi1_devdata *dd = ppd->dd;
11411 u64 changing_mask, ld_mask, stat_mask;
11412 int change_count;
11413 int i, use_all_mask;
11414 int this_shared_changing;
11415 int vl_count = 0, ret;
11416
11417
11418
11419
11420 int any_shared_limit_changing;
11421 struct buffer_control cur_bc;
11422 u8 changing[OPA_MAX_VLS];
11423 u8 lowering_dedicated[OPA_MAX_VLS];
11424 u16 cur_total;
11425 u32 new_total = 0;
11426 const u64 all_mask =
11427 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11428 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11429 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11430 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11431 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11432 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11433 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11434 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11435 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11436
11437 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11438 #define NUM_USABLE_VLS 16
11439
11440
11441 for (i = 0; i < OPA_MAX_VLS; i++) {
11442 if (valid_vl(i)) {
11443 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11444 continue;
11445 }
11446 nonzero_msg(dd, i, "dedicated",
11447 be16_to_cpu(new_bc->vl[i].dedicated));
11448 nonzero_msg(dd, i, "shared",
11449 be16_to_cpu(new_bc->vl[i].shared));
11450 new_bc->vl[i].dedicated = 0;
11451 new_bc->vl[i].shared = 0;
11452 }
11453 new_total += be16_to_cpu(new_bc->overall_shared_limit);
11454
11455
11456 get_buffer_control(dd, &cur_bc, &cur_total);
11457
11458
11459
11460
11461 memset(changing, 0, sizeof(changing));
11462 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11463
11464
11465
11466
11467 stat_mask =
11468 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11469 changing_mask = 0;
11470 ld_mask = 0;
11471 change_count = 0;
11472 any_shared_limit_changing = 0;
11473 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11474 if (!valid_vl(i))
11475 continue;
11476 this_shared_changing = new_bc->vl[i].shared
11477 != cur_bc.vl[i].shared;
11478 if (this_shared_changing)
11479 any_shared_limit_changing = 1;
11480 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11481 this_shared_changing) {
11482 changing[i] = 1;
11483 changing_mask |= stat_mask;
11484 change_count++;
11485 }
11486 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11487 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11488 lowering_dedicated[i] = 1;
11489 ld_mask |= stat_mask;
11490 }
11491 }
11492
11493
11494 if (new_total > cur_total)
11495 set_global_limit(dd, new_total);
11496
11497
11498
11499
11500 use_all_mask = 0;
11501 if ((be16_to_cpu(new_bc->overall_shared_limit) <
11502 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11503 (is_ax(dd) && any_shared_limit_changing)) {
11504 set_global_shared(dd, 0);
11505 cur_bc.overall_shared_limit = 0;
11506 use_all_mask = 1;
11507 }
11508
11509 for (i = 0; i < NUM_USABLE_VLS; i++) {
11510 if (!valid_vl(i))
11511 continue;
11512
11513 if (changing[i]) {
11514 set_vl_shared(dd, i, 0);
11515 cur_bc.vl[i].shared = 0;
11516 }
11517 }
11518
11519 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11520 "shared");
11521
11522 if (change_count > 0) {
11523 for (i = 0; i < NUM_USABLE_VLS; i++) {
11524 if (!valid_vl(i))
11525 continue;
11526
11527 if (lowering_dedicated[i]) {
11528 set_vl_dedicated(dd, i,
11529 be16_to_cpu(new_bc->
11530 vl[i].dedicated));
11531 cur_bc.vl[i].dedicated =
11532 new_bc->vl[i].dedicated;
11533 }
11534 }
11535
11536 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11537
11538
11539 for (i = 0; i < NUM_USABLE_VLS; i++) {
11540 if (!valid_vl(i))
11541 continue;
11542
11543 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11544 be16_to_cpu(cur_bc.vl[i].dedicated))
11545 set_vl_dedicated(dd, i,
11546 be16_to_cpu(new_bc->
11547 vl[i].dedicated));
11548 }
11549 }
11550
11551
11552 for (i = 0; i < NUM_USABLE_VLS; i++) {
11553 if (!valid_vl(i))
11554 continue;
11555
11556 if (be16_to_cpu(new_bc->vl[i].shared) >
11557 be16_to_cpu(cur_bc.vl[i].shared))
11558 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11559 }
11560
11561
11562 if (be16_to_cpu(new_bc->overall_shared_limit) >
11563 be16_to_cpu(cur_bc.overall_shared_limit))
11564 set_global_shared(dd,
11565 be16_to_cpu(new_bc->overall_shared_limit));
11566
11567
11568 if (new_total < cur_total)
11569 set_global_limit(dd, new_total);
11570
11571
11572
11573
11574
11575 if (change_count > 0) {
11576 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11577 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11578 be16_to_cpu(new_bc->vl[i].shared) > 0)
11579 vl_count++;
11580 ppd->actual_vls_operational = vl_count;
11581 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11582 ppd->actual_vls_operational :
11583 ppd->vls_operational,
11584 NULL);
11585 if (ret == 0)
11586 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11587 ppd->actual_vls_operational :
11588 ppd->vls_operational, NULL);
11589 if (ret)
11590 return ret;
11591 }
11592 return 0;
11593 }
11594
11595
11596
11597
11598
11599
11600 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11601
11602 {
11603 int size;
11604 struct vl_arb_cache *vlc;
11605
11606 switch (which) {
11607 case FM_TBL_VL_HIGH_ARB:
11608 size = 256;
11609
11610
11611
11612
11613 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11614 vl_arb_get_cache(vlc, t);
11615 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11616 break;
11617 case FM_TBL_VL_LOW_ARB:
11618 size = 256;
11619
11620
11621
11622
11623 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11624 vl_arb_get_cache(vlc, t);
11625 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11626 break;
11627 case FM_TBL_BUFFER_CONTROL:
11628 size = get_buffer_control(ppd->dd, t, NULL);
11629 break;
11630 case FM_TBL_SC2VLNT:
11631 size = get_sc2vlnt(ppd->dd, t);
11632 break;
11633 case FM_TBL_VL_PREEMPT_ELEMS:
11634 size = 256;
11635
11636 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11637 break;
11638 case FM_TBL_VL_PREEMPT_MATRIX:
11639 size = 256;
11640
11641
11642
11643
11644 break;
11645 default:
11646 return -EINVAL;
11647 }
11648 return size;
11649 }
11650
11651
11652
11653
11654 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11655 {
11656 int ret = 0;
11657 struct vl_arb_cache *vlc;
11658
11659 switch (which) {
11660 case FM_TBL_VL_HIGH_ARB:
11661 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11662 if (vl_arb_match_cache(vlc, t)) {
11663 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11664 break;
11665 }
11666 vl_arb_set_cache(vlc, t);
11667 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11668 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11669 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11670 break;
11671 case FM_TBL_VL_LOW_ARB:
11672 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11673 if (vl_arb_match_cache(vlc, t)) {
11674 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11675 break;
11676 }
11677 vl_arb_set_cache(vlc, t);
11678 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11679 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11680 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11681 break;
11682 case FM_TBL_BUFFER_CONTROL:
11683 ret = set_buffer_control(ppd, t);
11684 break;
11685 case FM_TBL_SC2VLNT:
11686 set_sc2vlnt(ppd->dd, t);
11687 break;
11688 default:
11689 ret = -EINVAL;
11690 }
11691 return ret;
11692 }
11693
11694
11695
11696
11697
11698
11699 static int disable_data_vls(struct hfi1_devdata *dd)
11700 {
11701 if (is_ax(dd))
11702 return 1;
11703
11704 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11705
11706 return 0;
11707 }
11708
11709
11710
11711
11712
11713
11714
11715
11716
11717 int open_fill_data_vls(struct hfi1_devdata *dd)
11718 {
11719 if (is_ax(dd))
11720 return 1;
11721
11722 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11723
11724 return 0;
11725 }
11726
11727
11728
11729
11730
11731
11732 static void drain_data_vls(struct hfi1_devdata *dd)
11733 {
11734 sc_wait(dd);
11735 sdma_wait(dd);
11736 pause_for_credit_return(dd);
11737 }
11738
11739
11740
11741
11742
11743
11744
11745
11746
11747
11748
11749 int stop_drain_data_vls(struct hfi1_devdata *dd)
11750 {
11751 int ret;
11752
11753 ret = disable_data_vls(dd);
11754 if (ret == 0)
11755 drain_data_vls(dd);
11756
11757 return ret;
11758 }
11759
11760
11761
11762
11763
11764 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11765 {
11766 u32 cclocks;
11767
11768 if (dd->icode == ICODE_FPGA_EMULATION)
11769 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11770 else
11771 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11772 if (ns && !cclocks)
11773 cclocks = 1;
11774 return cclocks;
11775 }
11776
11777
11778
11779
11780
11781 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11782 {
11783 u32 ns;
11784
11785 if (dd->icode == ICODE_FPGA_EMULATION)
11786 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11787 else
11788 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11789 if (cclocks && !ns)
11790 ns = 1;
11791 return ns;
11792 }
11793
11794
11795
11796
11797
11798
11799
11800 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11801 {
11802 struct hfi1_devdata *dd = rcd->dd;
11803 u32 timeout = rcd->rcvavail_timeout;
11804
11805
11806
11807
11808
11809
11810
11811
11812
11813
11814 if (npkts < rcv_intr_count) {
11815
11816
11817
11818
11819 if (timeout < 2)
11820 return;
11821 timeout >>= 1;
11822 } else {
11823
11824
11825
11826
11827 if (timeout >= dd->rcv_intr_timeout_csr)
11828 return;
11829 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11830 }
11831
11832 rcd->rcvavail_timeout = timeout;
11833
11834
11835
11836
11837 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11838 (u64)timeout <<
11839 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11840 }
11841
11842 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11843 u32 intr_adjust, u32 npkts)
11844 {
11845 struct hfi1_devdata *dd = rcd->dd;
11846 u64 reg;
11847 u32 ctxt = rcd->ctxt;
11848
11849
11850
11851
11852
11853 if (intr_adjust)
11854 adjust_rcv_timeout(rcd, npkts);
11855 if (updegr) {
11856 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11857 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11858 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11859 }
11860 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11861 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11862 << RCV_HDR_HEAD_HEAD_SHIFT);
11863 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11864 }
11865
11866 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11867 {
11868 u32 head, tail;
11869
11870 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11871 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11872
11873 if (hfi1_rcvhdrtail_kvaddr(rcd))
11874 tail = get_rcvhdrtail(rcd);
11875 else
11876 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11877
11878 return head == tail;
11879 }
11880
11881
11882
11883
11884
11885
11886
11887
11888
11889
11890
11891
11892
11893
11894
11895
11896
11897
11898
11899
11900 static u32 encoded_size(u32 size)
11901 {
11902 switch (size) {
11903 case 4 * 1024: return 0x1;
11904 case 8 * 1024: return 0x2;
11905 case 16 * 1024: return 0x3;
11906 case 32 * 1024: return 0x4;
11907 case 64 * 1024: return 0x5;
11908 case 128 * 1024: return 0x6;
11909 case 256 * 1024: return 0x7;
11910 case 512 * 1024: return 0x8;
11911 case 1 * 1024 * 1024: return 0x9;
11912 case 2 * 1024 * 1024: return 0xa;
11913 }
11914 return 0x1;
11915 }
11916
11917
11918
11919
11920
11921
11922
11923
11924
11925 u8 encode_rcv_header_entry_size(u8 size)
11926 {
11927
11928 if (size == 2)
11929 return 1;
11930 if (size == 16)
11931 return 2;
11932 if (size == 32)
11933 return 4;
11934 return 0;
11935 }
11936
11937
11938
11939
11940
11941
11942 int hfi1_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
11943 {
11944 if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
11945 dd_dev_err(dd, "Receive header queue count too small\n");
11946 return -EINVAL;
11947 }
11948
11949 if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
11950 dd_dev_err(dd,
11951 "Receive header queue count cannot be greater than %u\n",
11952 HFI1_MAX_HDRQ_EGRBUF_CNT);
11953 return -EINVAL;
11954 }
11955
11956 if (thecnt % HDRQ_INCREMENT) {
11957 dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
11958 thecnt, HDRQ_INCREMENT);
11959 return -EINVAL;
11960 }
11961
11962 return 0;
11963 }
11964
11965
11966
11967
11968
11969
11970
11971
11972 void set_hdrq_regs(struct hfi1_devdata *dd, u8 ctxt, u8 entsize, u16 hdrcnt)
11973 {
11974 u64 reg;
11975
11976 reg = (((u64)hdrcnt >> HDRQ_SIZE_SHIFT) & RCV_HDR_CNT_CNT_MASK) <<
11977 RCV_HDR_CNT_CNT_SHIFT;
11978 write_kctxt_csr(dd, ctxt, RCV_HDR_CNT, reg);
11979 reg = ((u64)encode_rcv_header_entry_size(entsize) &
11980 RCV_HDR_ENT_SIZE_ENT_SIZE_MASK) <<
11981 RCV_HDR_ENT_SIZE_ENT_SIZE_SHIFT;
11982 write_kctxt_csr(dd, ctxt, RCV_HDR_ENT_SIZE, reg);
11983 reg = ((u64)DEFAULT_RCVHDRSIZE & RCV_HDR_SIZE_HDR_SIZE_MASK) <<
11984 RCV_HDR_SIZE_HDR_SIZE_SHIFT;
11985 write_kctxt_csr(dd, ctxt, RCV_HDR_SIZE, reg);
11986
11987
11988
11989
11990
11991 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11992 dd->rcvhdrtail_dummy_dma);
11993 }
11994
11995 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11996 struct hfi1_ctxtdata *rcd)
11997 {
11998 u64 rcvctrl, reg;
11999 int did_enable = 0;
12000 u16 ctxt;
12001
12002 if (!rcd)
12003 return;
12004
12005 ctxt = rcd->ctxt;
12006
12007 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
12008
12009 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
12010
12011 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
12012 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
12013
12014 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
12015 rcd->rcvhdrq_dma);
12016 if (hfi1_rcvhdrtail_kvaddr(rcd))
12017 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12018 rcd->rcvhdrqtailaddr_dma);
12019 hfi1_set_seq_cnt(rcd, 1);
12020
12021
12022 hfi1_set_rcd_head(rcd, 0);
12023
12024
12025
12026
12027
12028
12029
12030 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
12031
12032
12033 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
12034
12035
12036 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
12037
12038
12039 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
12040 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
12041 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
12042 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
12043
12044
12045 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
12046 did_enable = 1;
12047
12048
12049 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
12050
12051
12052 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
12053 & RCV_EGR_CTRL_EGR_CNT_MASK)
12054 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
12055 (((rcd->eager_base >> RCV_SHIFT)
12056 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
12057 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
12058 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
12059
12060
12061
12062
12063
12064
12065
12066 reg = (((rcd->expected_count >> RCV_SHIFT)
12067 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
12068 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
12069 (((rcd->expected_base >> RCV_SHIFT)
12070 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
12071 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
12072 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
12073 if (ctxt == HFI1_CTRL_CTXT)
12074 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
12075 }
12076 if (op & HFI1_RCVCTRL_CTXT_DIS) {
12077 write_csr(dd, RCV_VL15, 0);
12078
12079
12080
12081
12082
12083 if (dd->rcvhdrtail_dummy_dma) {
12084 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12085 dd->rcvhdrtail_dummy_dma);
12086
12087 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
12088 }
12089
12090 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
12091 }
12092 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
12093 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
12094 IS_RCVAVAIL_START + rcd->ctxt, true);
12095 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
12096 }
12097 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
12098 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
12099 IS_RCVAVAIL_START + rcd->ctxt, false);
12100 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
12101 }
12102 if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && hfi1_rcvhdrtail_kvaddr(rcd))
12103 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
12104 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
12105
12106 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
12107 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
12108 }
12109 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
12110 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
12111 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
12112 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
12113 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
12114
12115
12116
12117
12118 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
12119 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
12120 }
12121 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
12122 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
12123 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
12124 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12125 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
12126 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
12127 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
12128 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12129 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
12130 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12131 if (op & HFI1_RCVCTRL_URGENT_ENB)
12132 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12133 IS_RCVURGENT_START + rcd->ctxt, true);
12134 if (op & HFI1_RCVCTRL_URGENT_DIS)
12135 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12136 IS_RCVURGENT_START + rcd->ctxt, false);
12137
12138 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
12139 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
12140
12141
12142 if (did_enable &&
12143 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
12144 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12145 if (reg != 0) {
12146 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
12147 ctxt, reg);
12148 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12149 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
12150 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
12151 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12152 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12153 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
12154 ctxt, reg, reg == 0 ? "not" : "still");
12155 }
12156 }
12157
12158 if (did_enable) {
12159
12160
12161
12162
12163
12164 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
12165 (u64)rcd->rcvavail_timeout <<
12166 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
12167
12168
12169 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12170 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12171 }
12172
12173 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12174
12175
12176
12177
12178
12179 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12180 dd->rcvhdrtail_dummy_dma);
12181 }
12182
12183 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12184 {
12185 int ret;
12186 u64 val = 0;
12187
12188 if (namep) {
12189 ret = dd->cntrnameslen;
12190 *namep = dd->cntrnames;
12191 } else {
12192 const struct cntr_entry *entry;
12193 int i, j;
12194
12195 ret = (dd->ndevcntrs) * sizeof(u64);
12196
12197
12198 *cntrp = dd->cntrs;
12199
12200
12201
12202
12203 for (i = 0; i < DEV_CNTR_LAST; i++) {
12204 entry = &dev_cntrs[i];
12205 hfi1_cdbg(CNTR, "reading %s", entry->name);
12206 if (entry->flags & CNTR_DISABLED) {
12207
12208 hfi1_cdbg(CNTR, "\tDisabled\n");
12209 } else {
12210 if (entry->flags & CNTR_VL) {
12211 hfi1_cdbg(CNTR, "\tPer VL\n");
12212 for (j = 0; j < C_VL_COUNT; j++) {
12213 val = entry->rw_cntr(entry,
12214 dd, j,
12215 CNTR_MODE_R,
12216 0);
12217 hfi1_cdbg(
12218 CNTR,
12219 "\t\tRead 0x%llx for %d\n",
12220 val, j);
12221 dd->cntrs[entry->offset + j] =
12222 val;
12223 }
12224 } else if (entry->flags & CNTR_SDMA) {
12225 hfi1_cdbg(CNTR,
12226 "\t Per SDMA Engine\n");
12227 for (j = 0; j < chip_sdma_engines(dd);
12228 j++) {
12229 val =
12230 entry->rw_cntr(entry, dd, j,
12231 CNTR_MODE_R, 0);
12232 hfi1_cdbg(CNTR,
12233 "\t\tRead 0x%llx for %d\n",
12234 val, j);
12235 dd->cntrs[entry->offset + j] =
12236 val;
12237 }
12238 } else {
12239 val = entry->rw_cntr(entry, dd,
12240 CNTR_INVALID_VL,
12241 CNTR_MODE_R, 0);
12242 dd->cntrs[entry->offset] = val;
12243 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12244 }
12245 }
12246 }
12247 }
12248 return ret;
12249 }
12250
12251
12252
12253
12254 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12255 {
12256 int ret;
12257 u64 val = 0;
12258
12259 if (namep) {
12260 ret = ppd->dd->portcntrnameslen;
12261 *namep = ppd->dd->portcntrnames;
12262 } else {
12263 const struct cntr_entry *entry;
12264 int i, j;
12265
12266 ret = ppd->dd->nportcntrs * sizeof(u64);
12267 *cntrp = ppd->cntrs;
12268
12269 for (i = 0; i < PORT_CNTR_LAST; i++) {
12270 entry = &port_cntrs[i];
12271 hfi1_cdbg(CNTR, "reading %s", entry->name);
12272 if (entry->flags & CNTR_DISABLED) {
12273
12274 hfi1_cdbg(CNTR, "\tDisabled\n");
12275 continue;
12276 }
12277
12278 if (entry->flags & CNTR_VL) {
12279 hfi1_cdbg(CNTR, "\tPer VL");
12280 for (j = 0; j < C_VL_COUNT; j++) {
12281 val = entry->rw_cntr(entry, ppd, j,
12282 CNTR_MODE_R,
12283 0);
12284 hfi1_cdbg(
12285 CNTR,
12286 "\t\tRead 0x%llx for %d",
12287 val, j);
12288 ppd->cntrs[entry->offset + j] = val;
12289 }
12290 } else {
12291 val = entry->rw_cntr(entry, ppd,
12292 CNTR_INVALID_VL,
12293 CNTR_MODE_R,
12294 0);
12295 ppd->cntrs[entry->offset] = val;
12296 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12297 }
12298 }
12299 }
12300 return ret;
12301 }
12302
12303 static void free_cntrs(struct hfi1_devdata *dd)
12304 {
12305 struct hfi1_pportdata *ppd;
12306 int i;
12307
12308 if (dd->synth_stats_timer.function)
12309 del_timer_sync(&dd->synth_stats_timer);
12310 ppd = (struct hfi1_pportdata *)(dd + 1);
12311 for (i = 0; i < dd->num_pports; i++, ppd++) {
12312 kfree(ppd->cntrs);
12313 kfree(ppd->scntrs);
12314 free_percpu(ppd->ibport_data.rvp.rc_acks);
12315 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12316 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12317 ppd->cntrs = NULL;
12318 ppd->scntrs = NULL;
12319 ppd->ibport_data.rvp.rc_acks = NULL;
12320 ppd->ibport_data.rvp.rc_qacks = NULL;
12321 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12322 }
12323 kfree(dd->portcntrnames);
12324 dd->portcntrnames = NULL;
12325 kfree(dd->cntrs);
12326 dd->cntrs = NULL;
12327 kfree(dd->scntrs);
12328 dd->scntrs = NULL;
12329 kfree(dd->cntrnames);
12330 dd->cntrnames = NULL;
12331 if (dd->update_cntr_wq) {
12332 destroy_workqueue(dd->update_cntr_wq);
12333 dd->update_cntr_wq = NULL;
12334 }
12335 }
12336
12337 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12338 u64 *psval, void *context, int vl)
12339 {
12340 u64 val;
12341 u64 sval = *psval;
12342
12343 if (entry->flags & CNTR_DISABLED) {
12344 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12345 return 0;
12346 }
12347
12348 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12349
12350 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12351
12352
12353 if (entry->flags & CNTR_SYNTH) {
12354 if (sval == CNTR_MAX) {
12355
12356 return CNTR_MAX;
12357 }
12358
12359 if (entry->flags & CNTR_32BIT) {
12360
12361 u64 upper = sval >> 32;
12362 u64 lower = (sval << 32) >> 32;
12363
12364 if (lower > val) {
12365 if (upper == CNTR_32BIT_MAX)
12366 val = CNTR_MAX;
12367 else
12368 upper++;
12369 }
12370
12371 if (val != CNTR_MAX)
12372 val = (upper << 32) | val;
12373
12374 } else {
12375
12376 if ((val < sval) || (val > CNTR_MAX))
12377 val = CNTR_MAX;
12378 }
12379 }
12380
12381 *psval = val;
12382
12383 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12384
12385 return val;
12386 }
12387
12388 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12389 struct cntr_entry *entry,
12390 u64 *psval, void *context, int vl, u64 data)
12391 {
12392 u64 val;
12393
12394 if (entry->flags & CNTR_DISABLED) {
12395 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12396 return 0;
12397 }
12398
12399 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12400
12401 if (entry->flags & CNTR_SYNTH) {
12402 *psval = data;
12403 if (entry->flags & CNTR_32BIT) {
12404 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12405 (data << 32) >> 32);
12406 val = data;
12407 } else {
12408 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12409 data);
12410 }
12411 } else {
12412 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12413 }
12414
12415 *psval = val;
12416
12417 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12418
12419 return val;
12420 }
12421
12422 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12423 {
12424 struct cntr_entry *entry;
12425 u64 *sval;
12426
12427 entry = &dev_cntrs[index];
12428 sval = dd->scntrs + entry->offset;
12429
12430 if (vl != CNTR_INVALID_VL)
12431 sval += vl;
12432
12433 return read_dev_port_cntr(dd, entry, sval, dd, vl);
12434 }
12435
12436 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12437 {
12438 struct cntr_entry *entry;
12439 u64 *sval;
12440
12441 entry = &dev_cntrs[index];
12442 sval = dd->scntrs + entry->offset;
12443
12444 if (vl != CNTR_INVALID_VL)
12445 sval += vl;
12446
12447 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12448 }
12449
12450 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12451 {
12452 struct cntr_entry *entry;
12453 u64 *sval;
12454
12455 entry = &port_cntrs[index];
12456 sval = ppd->scntrs + entry->offset;
12457
12458 if (vl != CNTR_INVALID_VL)
12459 sval += vl;
12460
12461 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12462 (index <= C_RCV_HDR_OVF_LAST)) {
12463
12464 return 0;
12465 }
12466
12467 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12468 }
12469
12470 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12471 {
12472 struct cntr_entry *entry;
12473 u64 *sval;
12474
12475 entry = &port_cntrs[index];
12476 sval = ppd->scntrs + entry->offset;
12477
12478 if (vl != CNTR_INVALID_VL)
12479 sval += vl;
12480
12481 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12482 (index <= C_RCV_HDR_OVF_LAST)) {
12483
12484 return 0;
12485 }
12486
12487 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12488 }
12489
12490 static void do_update_synth_timer(struct work_struct *work)
12491 {
12492 u64 cur_tx;
12493 u64 cur_rx;
12494 u64 total_flits;
12495 u8 update = 0;
12496 int i, j, vl;
12497 struct hfi1_pportdata *ppd;
12498 struct cntr_entry *entry;
12499 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12500 update_cntr_work);
12501
12502
12503
12504
12505
12506
12507
12508 entry = &dev_cntrs[C_DC_RCV_FLITS];
12509 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12510
12511 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12512 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12513
12514 hfi1_cdbg(
12515 CNTR,
12516 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12517 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12518
12519 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12520
12521
12522
12523
12524 update = 1;
12525 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12526 dd->unit);
12527 } else {
12528 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12529 hfi1_cdbg(CNTR,
12530 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12531 total_flits, (u64)CNTR_32BIT_MAX);
12532 if (total_flits >= CNTR_32BIT_MAX) {
12533 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12534 dd->unit);
12535 update = 1;
12536 }
12537 }
12538
12539 if (update) {
12540 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12541 for (i = 0; i < DEV_CNTR_LAST; i++) {
12542 entry = &dev_cntrs[i];
12543 if (entry->flags & CNTR_VL) {
12544 for (vl = 0; vl < C_VL_COUNT; vl++)
12545 read_dev_cntr(dd, i, vl);
12546 } else {
12547 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12548 }
12549 }
12550 ppd = (struct hfi1_pportdata *)(dd + 1);
12551 for (i = 0; i < dd->num_pports; i++, ppd++) {
12552 for (j = 0; j < PORT_CNTR_LAST; j++) {
12553 entry = &port_cntrs[j];
12554 if (entry->flags & CNTR_VL) {
12555 for (vl = 0; vl < C_VL_COUNT; vl++)
12556 read_port_cntr(ppd, j, vl);
12557 } else {
12558 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12559 }
12560 }
12561 }
12562
12563
12564
12565
12566
12567
12568
12569 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12570 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12571 CNTR_MODE_R, 0);
12572
12573 entry = &dev_cntrs[C_DC_RCV_FLITS];
12574 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12575 CNTR_MODE_R, 0);
12576
12577 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12578 dd->unit, dd->last_tx, dd->last_rx);
12579
12580 } else {
12581 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12582 }
12583 }
12584
12585 static void update_synth_timer(struct timer_list *t)
12586 {
12587 struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
12588
12589 queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12590 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12591 }
12592
12593 #define C_MAX_NAME 16
12594 static int init_cntrs(struct hfi1_devdata *dd)
12595 {
12596 int i, rcv_ctxts, j;
12597 size_t sz;
12598 char *p;
12599 char name[C_MAX_NAME];
12600 struct hfi1_pportdata *ppd;
12601 const char *bit_type_32 = ",32";
12602 const int bit_type_32_sz = strlen(bit_type_32);
12603 u32 sdma_engines = chip_sdma_engines(dd);
12604
12605
12606 timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
12607
12608
12609
12610
12611
12612
12613 dd->ndevcntrs = 0;
12614 sz = 0;
12615
12616 for (i = 0; i < DEV_CNTR_LAST; i++) {
12617 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12618 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12619 continue;
12620 }
12621
12622 if (dev_cntrs[i].flags & CNTR_VL) {
12623 dev_cntrs[i].offset = dd->ndevcntrs;
12624 for (j = 0; j < C_VL_COUNT; j++) {
12625 snprintf(name, C_MAX_NAME, "%s%d",
12626 dev_cntrs[i].name, vl_from_idx(j));
12627 sz += strlen(name);
12628
12629 if (dev_cntrs[i].flags & CNTR_32BIT)
12630 sz += bit_type_32_sz;
12631 sz++;
12632 dd->ndevcntrs++;
12633 }
12634 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12635 dev_cntrs[i].offset = dd->ndevcntrs;
12636 for (j = 0; j < sdma_engines; j++) {
12637 snprintf(name, C_MAX_NAME, "%s%d",
12638 dev_cntrs[i].name, j);
12639 sz += strlen(name);
12640
12641 if (dev_cntrs[i].flags & CNTR_32BIT)
12642 sz += bit_type_32_sz;
12643 sz++;
12644 dd->ndevcntrs++;
12645 }
12646 } else {
12647
12648 sz += strlen(dev_cntrs[i].name) + 1;
12649
12650 if (dev_cntrs[i].flags & CNTR_32BIT)
12651 sz += bit_type_32_sz;
12652 dev_cntrs[i].offset = dd->ndevcntrs;
12653 dd->ndevcntrs++;
12654 }
12655 }
12656
12657
12658 dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12659 GFP_KERNEL);
12660 if (!dd->cntrs)
12661 goto bail;
12662
12663 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12664 if (!dd->scntrs)
12665 goto bail;
12666
12667
12668 dd->cntrnameslen = sz;
12669 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12670 if (!dd->cntrnames)
12671 goto bail;
12672
12673
12674 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12675 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12676
12677 } else if (dev_cntrs[i].flags & CNTR_VL) {
12678 for (j = 0; j < C_VL_COUNT; j++) {
12679 snprintf(name, C_MAX_NAME, "%s%d",
12680 dev_cntrs[i].name,
12681 vl_from_idx(j));
12682 memcpy(p, name, strlen(name));
12683 p += strlen(name);
12684
12685
12686 if (dev_cntrs[i].flags & CNTR_32BIT) {
12687 memcpy(p, bit_type_32, bit_type_32_sz);
12688 p += bit_type_32_sz;
12689 }
12690
12691 *p++ = '\n';
12692 }
12693 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12694 for (j = 0; j < sdma_engines; j++) {
12695 snprintf(name, C_MAX_NAME, "%s%d",
12696 dev_cntrs[i].name, j);
12697 memcpy(p, name, strlen(name));
12698 p += strlen(name);
12699
12700
12701 if (dev_cntrs[i].flags & CNTR_32BIT) {
12702 memcpy(p, bit_type_32, bit_type_32_sz);
12703 p += bit_type_32_sz;
12704 }
12705
12706 *p++ = '\n';
12707 }
12708 } else {
12709 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12710 p += strlen(dev_cntrs[i].name);
12711
12712
12713 if (dev_cntrs[i].flags & CNTR_32BIT) {
12714 memcpy(p, bit_type_32, bit_type_32_sz);
12715 p += bit_type_32_sz;
12716 }
12717
12718 *p++ = '\n';
12719 }
12720 }
12721
12722
12723
12724
12725
12726
12727
12728
12729
12730
12731 rcv_ctxts = dd->num_rcv_contexts;
12732 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12733 i <= C_RCV_HDR_OVF_LAST; i++) {
12734 port_cntrs[i].flags |= CNTR_DISABLED;
12735 }
12736
12737
12738 sz = 0;
12739 dd->nportcntrs = 0;
12740 for (i = 0; i < PORT_CNTR_LAST; i++) {
12741 if (port_cntrs[i].flags & CNTR_DISABLED) {
12742 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12743 continue;
12744 }
12745
12746 if (port_cntrs[i].flags & CNTR_VL) {
12747 port_cntrs[i].offset = dd->nportcntrs;
12748 for (j = 0; j < C_VL_COUNT; j++) {
12749 snprintf(name, C_MAX_NAME, "%s%d",
12750 port_cntrs[i].name, vl_from_idx(j));
12751 sz += strlen(name);
12752
12753 if (port_cntrs[i].flags & CNTR_32BIT)
12754 sz += bit_type_32_sz;
12755 sz++;
12756 dd->nportcntrs++;
12757 }
12758 } else {
12759
12760 sz += strlen(port_cntrs[i].name) + 1;
12761
12762 if (port_cntrs[i].flags & CNTR_32BIT)
12763 sz += bit_type_32_sz;
12764 port_cntrs[i].offset = dd->nportcntrs;
12765 dd->nportcntrs++;
12766 }
12767 }
12768
12769
12770 dd->portcntrnameslen = sz;
12771 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12772 if (!dd->portcntrnames)
12773 goto bail;
12774
12775
12776 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12777 if (port_cntrs[i].flags & CNTR_DISABLED)
12778 continue;
12779
12780 if (port_cntrs[i].flags & CNTR_VL) {
12781 for (j = 0; j < C_VL_COUNT; j++) {
12782 snprintf(name, C_MAX_NAME, "%s%d",
12783 port_cntrs[i].name, vl_from_idx(j));
12784 memcpy(p, name, strlen(name));
12785 p += strlen(name);
12786
12787
12788 if (port_cntrs[i].flags & CNTR_32BIT) {
12789 memcpy(p, bit_type_32, bit_type_32_sz);
12790 p += bit_type_32_sz;
12791 }
12792
12793 *p++ = '\n';
12794 }
12795 } else {
12796 memcpy(p, port_cntrs[i].name,
12797 strlen(port_cntrs[i].name));
12798 p += strlen(port_cntrs[i].name);
12799
12800
12801 if (port_cntrs[i].flags & CNTR_32BIT) {
12802 memcpy(p, bit_type_32, bit_type_32_sz);
12803 p += bit_type_32_sz;
12804 }
12805
12806 *p++ = '\n';
12807 }
12808 }
12809
12810
12811 ppd = (struct hfi1_pportdata *)(dd + 1);
12812 for (i = 0; i < dd->num_pports; i++, ppd++) {
12813 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12814 if (!ppd->cntrs)
12815 goto bail;
12816
12817 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12818 if (!ppd->scntrs)
12819 goto bail;
12820 }
12821
12822
12823 if (init_cpu_counters(dd))
12824 goto bail;
12825
12826 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12827 WQ_MEM_RECLAIM, dd->unit);
12828 if (!dd->update_cntr_wq)
12829 goto bail;
12830
12831 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12832
12833 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12834 return 0;
12835 bail:
12836 free_cntrs(dd);
12837 return -ENOMEM;
12838 }
12839
12840 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12841 {
12842 switch (chip_lstate) {
12843 case LSTATE_DOWN:
12844 return IB_PORT_DOWN;
12845 case LSTATE_INIT:
12846 return IB_PORT_INIT;
12847 case LSTATE_ARMED:
12848 return IB_PORT_ARMED;
12849 case LSTATE_ACTIVE:
12850 return IB_PORT_ACTIVE;
12851 default:
12852 dd_dev_err(dd,
12853 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12854 chip_lstate);
12855 return IB_PORT_DOWN;
12856 }
12857 }
12858
12859 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12860 {
12861
12862 switch (chip_pstate & 0xf0) {
12863 case PLS_DISABLED:
12864 return IB_PORTPHYSSTATE_DISABLED;
12865 case PLS_OFFLINE:
12866 return OPA_PORTPHYSSTATE_OFFLINE;
12867 case PLS_POLLING:
12868 return IB_PORTPHYSSTATE_POLLING;
12869 case PLS_CONFIGPHY:
12870 return IB_PORTPHYSSTATE_TRAINING;
12871 case PLS_LINKUP:
12872 return IB_PORTPHYSSTATE_LINKUP;
12873 case PLS_PHYTEST:
12874 return IB_PORTPHYSSTATE_PHY_TEST;
12875 default:
12876 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12877 chip_pstate);
12878 return IB_PORTPHYSSTATE_DISABLED;
12879 }
12880 }
12881
12882
12883 const char *opa_lstate_name(u32 lstate)
12884 {
12885 static const char * const port_logical_names[] = {
12886 "PORT_NOP",
12887 "PORT_DOWN",
12888 "PORT_INIT",
12889 "PORT_ARMED",
12890 "PORT_ACTIVE",
12891 "PORT_ACTIVE_DEFER",
12892 };
12893 if (lstate < ARRAY_SIZE(port_logical_names))
12894 return port_logical_names[lstate];
12895 return "unknown";
12896 }
12897
12898
12899 const char *opa_pstate_name(u32 pstate)
12900 {
12901 static const char * const port_physical_names[] = {
12902 "PHYS_NOP",
12903 "reserved1",
12904 "PHYS_POLL",
12905 "PHYS_DISABLED",
12906 "PHYS_TRAINING",
12907 "PHYS_LINKUP",
12908 "PHYS_LINK_ERR_RECOVER",
12909 "PHYS_PHY_TEST",
12910 "reserved8",
12911 "PHYS_OFFLINE",
12912 "PHYS_GANGED",
12913 "PHYS_TEST",
12914 };
12915 if (pstate < ARRAY_SIZE(port_physical_names))
12916 return port_physical_names[pstate];
12917 return "unknown";
12918 }
12919
12920
12921
12922
12923
12924
12925
12926
12927
12928
12929
12930
12931 static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12932 {
12933
12934
12935
12936
12937
12938
12939
12940
12941 if (ppd->statusp) {
12942 switch (state) {
12943 case IB_PORT_DOWN:
12944 case IB_PORT_INIT:
12945 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12946 HFI1_STATUS_IB_READY);
12947 break;
12948 case IB_PORT_ARMED:
12949 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12950 break;
12951 case IB_PORT_ACTIVE:
12952 *ppd->statusp |= HFI1_STATUS_IB_READY;
12953 break;
12954 }
12955 }
12956 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12957 opa_lstate_name(state), state);
12958 }
12959
12960
12961
12962
12963
12964
12965
12966
12967
12968
12969
12970 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12971 int msecs)
12972 {
12973 unsigned long timeout;
12974 u32 new_state;
12975
12976 timeout = jiffies + msecs_to_jiffies(msecs);
12977 while (1) {
12978 new_state = chip_to_opa_lstate(ppd->dd,
12979 read_logical_state(ppd->dd));
12980 if (new_state == state)
12981 break;
12982 if (time_after(jiffies, timeout)) {
12983 dd_dev_err(ppd->dd,
12984 "timeout waiting for link state 0x%x\n",
12985 state);
12986 return -ETIMEDOUT;
12987 }
12988 msleep(20);
12989 }
12990
12991 return 0;
12992 }
12993
12994 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12995 {
12996 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12997
12998 dd_dev_info(ppd->dd,
12999 "physical state changed to %s (0x%x), phy 0x%x\n",
13000 opa_pstate_name(ib_pstate), ib_pstate, state);
13001 }
13002
13003
13004
13005
13006
13007 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
13008 {
13009 u32 read_state = read_physical_state(ppd->dd);
13010
13011 if (read_state == state) {
13012 log_state_transition(ppd, state);
13013 } else {
13014 dd_dev_err(ppd->dd,
13015 "anticipated phy link state 0x%x, read 0x%x\n",
13016 state, read_state);
13017 }
13018 }
13019
13020
13021
13022
13023
13024
13025
13026
13027
13028
13029 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
13030 int msecs)
13031 {
13032 u32 read_state;
13033 unsigned long timeout;
13034
13035 timeout = jiffies + msecs_to_jiffies(msecs);
13036 while (1) {
13037 read_state = read_physical_state(ppd->dd);
13038 if (read_state == state)
13039 break;
13040 if (time_after(jiffies, timeout)) {
13041 dd_dev_err(ppd->dd,
13042 "timeout waiting for phy link state 0x%x\n",
13043 state);
13044 return -ETIMEDOUT;
13045 }
13046 usleep_range(1950, 2050);
13047 }
13048
13049 log_state_transition(ppd, state);
13050 return 0;
13051 }
13052
13053
13054
13055
13056
13057
13058
13059
13060
13061
13062 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
13063 int msecs)
13064 {
13065 u32 read_state;
13066 unsigned long timeout;
13067
13068 timeout = jiffies + msecs_to_jiffies(msecs);
13069 while (1) {
13070 read_state = read_physical_state(ppd->dd);
13071 if ((read_state & 0xF0) == PLS_OFFLINE)
13072 break;
13073 if (time_after(jiffies, timeout)) {
13074 dd_dev_err(ppd->dd,
13075 "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
13076 read_state, msecs);
13077 return -ETIMEDOUT;
13078 }
13079 usleep_range(1950, 2050);
13080 }
13081
13082 log_state_transition(ppd, read_state);
13083 return read_state;
13084 }
13085
13086
13087
13088
13089
13090
13091
13092
13093
13094
13095 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
13096 int msecs)
13097 {
13098 u32 read_state;
13099 unsigned long timeout;
13100
13101 timeout = jiffies + msecs_to_jiffies(msecs);
13102 while (1) {
13103 read_state = read_physical_state(ppd->dd);
13104 if ((read_state & 0xF0) != PLS_OFFLINE)
13105 break;
13106 if (time_after(jiffies, timeout)) {
13107 dd_dev_err(ppd->dd,
13108 "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
13109 read_state, msecs);
13110 return -ETIMEDOUT;
13111 }
13112 usleep_range(1950, 2050);
13113 }
13114
13115 log_state_transition(ppd, read_state);
13116 return read_state;
13117 }
13118
13119 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
13120 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
13121
13122 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
13123 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
13124
13125 void hfi1_init_ctxt(struct send_context *sc)
13126 {
13127 if (sc) {
13128 struct hfi1_devdata *dd = sc->dd;
13129 u64 reg;
13130 u8 set = (sc->type == SC_USER ?
13131 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
13132 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
13133 reg = read_kctxt_csr(dd, sc->hw_context,
13134 SEND_CTXT_CHECK_ENABLE);
13135 if (set)
13136 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
13137 else
13138 SET_STATIC_RATE_CONTROL_SMASK(reg);
13139 write_kctxt_csr(dd, sc->hw_context,
13140 SEND_CTXT_CHECK_ENABLE, reg);
13141 }
13142 }
13143
13144 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
13145 {
13146 int ret = 0;
13147 u64 reg;
13148
13149 if (dd->icode != ICODE_RTL_SILICON) {
13150 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
13151 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
13152 __func__);
13153 return -EINVAL;
13154 }
13155 reg = read_csr(dd, ASIC_STS_THERM);
13156 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
13157 ASIC_STS_THERM_CURR_TEMP_MASK);
13158 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
13159 ASIC_STS_THERM_LO_TEMP_MASK);
13160 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
13161 ASIC_STS_THERM_HI_TEMP_MASK);
13162 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
13163 ASIC_STS_THERM_CRIT_TEMP_MASK);
13164
13165 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
13166
13167 return ret;
13168 }
13169
13170
13171
13172
13173
13174
13175
13176
13177
13178
13179
13180 static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
13181 bool set)
13182 {
13183 u64 reg;
13184 u16 idx = src / BITS_PER_REGISTER;
13185
13186 spin_lock(&dd->irq_src_lock);
13187 reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
13188 if (set)
13189 reg |= bits;
13190 else
13191 reg &= ~bits;
13192 write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
13193 spin_unlock(&dd->irq_src_lock);
13194 }
13195
13196
13197
13198
13199
13200
13201
13202
13203
13204
13205 int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
13206 {
13207 u64 bits = 0;
13208 u64 bit;
13209 u16 src;
13210
13211 if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
13212 return -EINVAL;
13213
13214 if (last < first)
13215 return -ERANGE;
13216
13217 for (src = first; src <= last; src++) {
13218 bit = src % BITS_PER_REGISTER;
13219
13220 if (!bit && bits) {
13221 read_mod_write(dd, src - 1, bits, set);
13222 bits = 0;
13223 }
13224 bits |= BIT_ULL(bit);
13225 }
13226 read_mod_write(dd, last, bits, set);
13227
13228 return 0;
13229 }
13230
13231
13232
13233
13234 void clear_all_interrupts(struct hfi1_devdata *dd)
13235 {
13236 int i;
13237
13238 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13239 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13240
13241 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13242 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13243 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13244 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13245 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13246 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13247 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13248 for (i = 0; i < chip_send_contexts(dd); i++)
13249 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13250 for (i = 0; i < chip_sdma_engines(dd); i++)
13251 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13252
13253 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13254 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13255 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13256 }
13257
13258
13259
13260
13261
13262 void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13263 {
13264 u64 reg;
13265 int m, n;
13266
13267
13268 m = isrc / 64;
13269 n = isrc % 64;
13270 if (likely(m < CCE_NUM_INT_CSRS)) {
13271 dd->gi_mask[m] &= ~((u64)1 << n);
13272 } else {
13273 dd_dev_err(dd, "remap interrupt err\n");
13274 return;
13275 }
13276
13277
13278 m = isrc / 8;
13279 n = isrc % 8;
13280 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13281 reg &= ~((u64)0xff << (8 * n));
13282 reg |= ((u64)msix_intr & 0xff) << (8 * n);
13283 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13284 }
13285
13286 void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
13287 {
13288
13289
13290
13291
13292
13293
13294
13295 remap_intr(dd, IS_SDMA_START + engine, msix_intr);
13296 remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
13297 remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
13298 }
13299
13300
13301
13302
13303
13304 void reset_interrupts(struct hfi1_devdata *dd)
13305 {
13306 int i;
13307
13308
13309 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13310 dd->gi_mask[i] = ~(u64)0;
13311
13312
13313 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13314 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13315 }
13316
13317
13318
13319
13320
13321
13322 static int set_up_interrupts(struct hfi1_devdata *dd)
13323 {
13324 int ret;
13325
13326
13327 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
13328
13329
13330 clear_all_interrupts(dd);
13331
13332
13333 reset_interrupts(dd);
13334
13335
13336 ret = msix_initialize(dd);
13337 if (ret)
13338 return ret;
13339
13340 ret = msix_request_irqs(dd);
13341 if (ret)
13342 msix_clean_up_interrupts(dd);
13343
13344 return ret;
13345 }
13346
13347
13348
13349
13350
13351
13352
13353
13354
13355
13356
13357
13358 static int set_up_context_variables(struct hfi1_devdata *dd)
13359 {
13360 unsigned long num_kernel_contexts;
13361 u16 num_netdev_contexts;
13362 int ret;
13363 unsigned ngroups;
13364 int rmt_count;
13365 int user_rmt_reduced;
13366 u32 n_usr_ctxts;
13367 u32 send_contexts = chip_send_contexts(dd);
13368 u32 rcv_contexts = chip_rcv_contexts(dd);
13369
13370
13371
13372
13373
13374
13375
13376
13377 if (n_krcvqs)
13378
13379
13380
13381
13382
13383 num_kernel_contexts = n_krcvqs + 1;
13384 else
13385 num_kernel_contexts = DEFAULT_KRCVQS + 1;
13386
13387
13388
13389
13390 if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
13391 dd_dev_err(dd,
13392 "Reducing # kernel rcv contexts to: %d, from %lu\n",
13393 send_contexts - num_vls - 1,
13394 num_kernel_contexts);
13395 num_kernel_contexts = send_contexts - num_vls - 1;
13396 }
13397
13398
13399
13400
13401
13402
13403 if (num_user_contexts < 0)
13404 n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
13405 else
13406 n_usr_ctxts = num_user_contexts;
13407
13408
13409
13410 if (num_kernel_contexts + n_usr_ctxts > rcv_contexts) {
13411 dd_dev_err(dd,
13412 "Reducing # user receive contexts to: %u, from %u\n",
13413 (u32)(rcv_contexts - num_kernel_contexts),
13414 n_usr_ctxts);
13415
13416 n_usr_ctxts = rcv_contexts - num_kernel_contexts;
13417 }
13418
13419 num_netdev_contexts =
13420 hfi1_num_netdev_contexts(dd, rcv_contexts -
13421 (num_kernel_contexts + n_usr_ctxts),
13422 &node_affinity.real_cpu_mask);
13423
13424
13425
13426
13427
13428
13429
13430
13431
13432
13433
13434
13435 rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_netdev_contexts * 2);
13436 if (HFI1_CAP_IS_KSET(TID_RDMA))
13437 rmt_count += num_kernel_contexts - 1;
13438 if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
13439 user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
13440 dd_dev_err(dd,
13441 "RMT size is reducing the number of user receive contexts from %u to %d\n",
13442 n_usr_ctxts,
13443 user_rmt_reduced);
13444
13445 n_usr_ctxts = user_rmt_reduced;
13446 }
13447
13448
13449 dd->num_rcv_contexts =
13450 num_kernel_contexts + n_usr_ctxts + num_netdev_contexts;
13451 dd->n_krcv_queues = num_kernel_contexts;
13452 dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13453 dd->num_netdev_contexts = num_netdev_contexts;
13454 dd->num_user_contexts = n_usr_ctxts;
13455 dd->freectxts = n_usr_ctxts;
13456 dd_dev_info(dd,
13457 "rcv contexts: chip %d, used %d (kernel %d, netdev %u, user %u)\n",
13458 rcv_contexts,
13459 (int)dd->num_rcv_contexts,
13460 (int)dd->n_krcv_queues,
13461 dd->num_netdev_contexts,
13462 dd->num_user_contexts);
13463
13464
13465
13466
13467
13468
13469
13470
13471
13472
13473
13474
13475 dd->rcv_entries.group_size = RCV_INCREMENT;
13476 ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
13477 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13478 dd->rcv_entries.nctxt_extra = ngroups -
13479 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13480 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13481 dd->rcv_entries.ngroups,
13482 dd->rcv_entries.nctxt_extra);
13483 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13484 MAX_EAGER_ENTRIES * 2) {
13485 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13486 dd->rcv_entries.group_size;
13487 dd_dev_info(dd,
13488 "RcvArray group count too high, change to %u\n",
13489 dd->rcv_entries.ngroups);
13490 dd->rcv_entries.nctxt_extra = 0;
13491 }
13492
13493
13494
13495 ret = init_sc_pools_and_sizes(dd);
13496 if (ret >= 0) {
13497 dd->num_send_contexts = ret;
13498 dd_dev_info(
13499 dd,
13500 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13501 send_contexts,
13502 dd->num_send_contexts,
13503 dd->sc_sizes[SC_KERNEL].count,
13504 dd->sc_sizes[SC_ACK].count,
13505 dd->sc_sizes[SC_USER].count,
13506 dd->sc_sizes[SC_VL15].count);
13507 ret = 0;
13508 }
13509
13510 return ret;
13511 }
13512
13513
13514
13515
13516
13517
13518 static void set_partition_keys(struct hfi1_pportdata *ppd)
13519 {
13520 struct hfi1_devdata *dd = ppd->dd;
13521 u64 reg = 0;
13522 int i;
13523
13524 dd_dev_info(dd, "Setting partition keys\n");
13525 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13526 reg |= (ppd->pkeys[i] &
13527 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13528 ((i % 4) *
13529 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13530
13531 if ((i % 4) == 3) {
13532 write_csr(dd, RCV_PARTITION_KEY +
13533 ((i - 3) * 2), reg);
13534 reg = 0;
13535 }
13536 }
13537
13538
13539 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13540 }
13541
13542
13543
13544
13545
13546
13547
13548
13549
13550 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13551 {
13552 int i, j;
13553
13554
13555 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13556 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13557
13558
13559 for (i = 0; i < chip_send_contexts(dd); i++)
13560 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13561
13562
13563
13564
13565
13566
13567
13568
13569
13570
13571
13572 for (i = 0; i < chip_rcv_contexts(dd); i++) {
13573 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13574 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13575 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13576 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13577 }
13578
13579
13580 for (i = 0; i < chip_rcv_array_count(dd); i++)
13581 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13582
13583
13584 for (i = 0; i < 32; i++)
13585 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13586 }
13587
13588
13589
13590
13591 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13592 u64 ctrl_bits)
13593 {
13594 unsigned long timeout;
13595 u64 reg;
13596
13597
13598 reg = read_csr(dd, CCE_STATUS);
13599 if ((reg & status_bits) == 0)
13600 return;
13601
13602
13603 write_csr(dd, CCE_CTRL, ctrl_bits);
13604
13605
13606 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13607 while (1) {
13608 reg = read_csr(dd, CCE_STATUS);
13609 if ((reg & status_bits) == 0)
13610 return;
13611 if (time_after(jiffies, timeout)) {
13612 dd_dev_err(dd,
13613 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13614 status_bits, reg & status_bits);
13615 return;
13616 }
13617 udelay(1);
13618 }
13619 }
13620
13621
13622 static void reset_cce_csrs(struct hfi1_devdata *dd)
13623 {
13624 int i;
13625
13626
13627
13628
13629
13630 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13631 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13632 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13633 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13634 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13635
13636 write_csr(dd, CCE_ERR_MASK, 0);
13637 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13638
13639 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13640 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13641 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13642
13643 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13644 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13645 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13646 CCE_MSIX_TABLE_UPPER_RESETCSR);
13647 }
13648 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13649
13650 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13651 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13652 }
13653 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13654 write_csr(dd, CCE_INT_MAP, 0);
13655 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13656
13657 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13658 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13659
13660
13661 }
13662 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13663 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13664 }
13665
13666
13667 static void reset_misc_csrs(struct hfi1_devdata *dd)
13668 {
13669 int i;
13670
13671 for (i = 0; i < 32; i++) {
13672 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13673 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13674 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13675 }
13676
13677
13678
13679
13680
13681 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13682 write_csr(dd, MISC_CFG_RSA_MU, 0);
13683 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13684
13685
13686
13687
13688
13689 write_csr(dd, MISC_ERR_MASK, 0);
13690 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13691
13692 }
13693
13694
13695 static void reset_txe_csrs(struct hfi1_devdata *dd)
13696 {
13697 int i;
13698
13699
13700
13701
13702 write_csr(dd, SEND_CTRL, 0);
13703 __cm_reset(dd, 0);
13704
13705
13706
13707
13708 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13709 pio_reset_all(dd);
13710
13711 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13712 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13713
13714
13715 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13716 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13717
13718
13719 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13720 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13721
13722 write_csr(dd, SEND_BTH_QP, 0);
13723 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13724 write_csr(dd, SEND_SC2VLT0, 0);
13725 write_csr(dd, SEND_SC2VLT1, 0);
13726 write_csr(dd, SEND_SC2VLT2, 0);
13727 write_csr(dd, SEND_SC2VLT3, 0);
13728 write_csr(dd, SEND_LEN_CHECK0, 0);
13729 write_csr(dd, SEND_LEN_CHECK1, 0);
13730
13731 write_csr(dd, SEND_ERR_MASK, 0);
13732 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13733
13734 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13735 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13736 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13737 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13738 for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
13739 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13740 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13741 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13742 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13743 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13744 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13745 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13746
13747 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13748 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13749 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13750 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13751 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13752 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13753 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13754 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13755
13756
13757
13758
13759 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13760
13761
13762
13763
13764
13765
13766 for (i = 0; i < chip_send_contexts(dd); i++) {
13767 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13768 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13769 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13770 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13771 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13772 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13773 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13774 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13775 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13776 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13777 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13778 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13779 }
13780
13781
13782
13783
13784 for (i = 0; i < chip_sdma_engines(dd); i++) {
13785 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13786
13787 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13788 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13789 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13790
13791 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13792 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13793
13794 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13795 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13796
13797
13798 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13799 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13800
13801 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13802 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13803 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13804 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13805 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13806 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13807 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13808 }
13809 }
13810
13811
13812
13813
13814
13815 static void init_rbufs(struct hfi1_devdata *dd)
13816 {
13817 u64 reg;
13818 int count;
13819
13820
13821
13822
13823
13824 count = 0;
13825 while (1) {
13826 reg = read_csr(dd, RCV_STATUS);
13827 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13828 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13829 break;
13830
13831
13832
13833
13834
13835
13836
13837 if (count++ > 500) {
13838 dd_dev_err(dd,
13839 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13840 __func__, reg);
13841 break;
13842 }
13843 udelay(2);
13844 }
13845
13846
13847 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13848
13849
13850
13851
13852
13853
13854
13855 read_csr(dd, RCV_CTRL);
13856
13857
13858 count = 0;
13859 while (1) {
13860
13861 udelay(2);
13862 reg = read_csr(dd, RCV_STATUS);
13863 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13864 break;
13865
13866
13867 if (count++ > 50) {
13868 dd_dev_err(dd,
13869 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13870 __func__);
13871 break;
13872 }
13873 }
13874 }
13875
13876
13877 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13878 {
13879 int i, j;
13880
13881
13882
13883
13884 write_csr(dd, RCV_CTRL, 0);
13885 init_rbufs(dd);
13886
13887
13888
13889
13890 write_csr(dd, RCV_BTH_QP, 0);
13891 write_csr(dd, RCV_MULTICAST, 0);
13892 write_csr(dd, RCV_BYPASS, 0);
13893 write_csr(dd, RCV_VL15, 0);
13894
13895 write_csr(dd, RCV_ERR_INFO,
13896 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13897
13898 write_csr(dd, RCV_ERR_MASK, 0);
13899 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13900
13901 for (i = 0; i < 32; i++)
13902 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13903 for (i = 0; i < 4; i++)
13904 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13905 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13906 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13907 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13908 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13909 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13910 clear_rsm_rule(dd, i);
13911 for (i = 0; i < 32; i++)
13912 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13913
13914
13915
13916
13917 for (i = 0; i < chip_rcv_contexts(dd); i++) {
13918
13919 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13920
13921 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13922 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13923 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13924 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13925 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13926 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13927 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13928 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13929 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13930 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13931
13932
13933
13934 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13935
13936 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13937
13938 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13939 write_uctxt_csr(dd, i,
13940 RCV_TID_FLOW_TABLE + (8 * j), 0);
13941 }
13942 }
13943 }
13944
13945
13946
13947
13948
13949
13950
13951
13952
13953
13954
13955
13956 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13957 {
13958 int i;
13959
13960
13961
13962 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13963 0,
13964 0, 0, 1, 1,
13965 2, 2, 3, 3,
13966 4, 4, 5, 5,
13967 6, 6, 7, 7));
13968 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13969 1,
13970 8, 0, 9, 0,
13971 10, 0, 11, 0,
13972 12, 0, 13, 0,
13973 14, 0, 15, 15));
13974 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13975 2,
13976 16, 0, 17, 0,
13977 18, 0, 19, 0,
13978 20, 0, 21, 0,
13979 22, 0, 23, 0));
13980 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13981 3,
13982 24, 0, 25, 0,
13983 26, 0, 27, 0,
13984 28, 0, 29, 0,
13985 30, 0, 31, 0));
13986
13987
13988 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13989 15_0,
13990 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13991 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13992 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13993 31_16,
13994 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13995 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13996
13997
13998 for (i = 0; i < 32; i++) {
13999 if (i < 8 || i == 15)
14000 *((u8 *)(dd->sc2vl) + i) = (u8)i;
14001 else
14002 *((u8 *)(dd->sc2vl) + i) = 0;
14003 }
14004 }
14005
14006
14007
14008
14009
14010
14011
14012
14013
14014
14015 static int init_chip(struct hfi1_devdata *dd)
14016 {
14017 int i;
14018 int ret = 0;
14019
14020
14021
14022
14023
14024
14025
14026
14027
14028
14029
14030
14031
14032 write_csr(dd, SEND_CTRL, 0);
14033 for (i = 0; i < chip_send_contexts(dd); i++)
14034 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
14035 for (i = 0; i < chip_sdma_engines(dd); i++)
14036 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
14037
14038 write_csr(dd, RCV_CTRL, 0);
14039 for (i = 0; i < chip_rcv_contexts(dd); i++)
14040 write_csr(dd, RCV_CTXT_CTRL, 0);
14041
14042 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
14043 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
14044
14045
14046
14047
14048
14049
14050
14051 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
14052 (void)read_csr(dd, CCE_DC_CTRL);
14053
14054 if (use_flr) {
14055
14056
14057
14058
14059
14060 dd_dev_info(dd, "Resetting CSRs with FLR\n");
14061
14062
14063 pcie_flr(dd->pcidev);
14064
14065
14066 ret = restore_pci_variables(dd);
14067 if (ret) {
14068 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14069 __func__);
14070 return ret;
14071 }
14072
14073 if (is_ax(dd)) {
14074 dd_dev_info(dd, "Resetting CSRs with FLR\n");
14075 pcie_flr(dd->pcidev);
14076 ret = restore_pci_variables(dd);
14077 if (ret) {
14078 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14079 __func__);
14080 return ret;
14081 }
14082 }
14083 } else {
14084 dd_dev_info(dd, "Resetting CSRs with writes\n");
14085 reset_cce_csrs(dd);
14086 reset_txe_csrs(dd);
14087 reset_rxe_csrs(dd);
14088 reset_misc_csrs(dd);
14089 }
14090
14091 write_csr(dd, CCE_DC_CTRL, 0);
14092
14093
14094 setextled(dd, 0);
14095
14096
14097
14098
14099
14100
14101
14102
14103
14104
14105
14106 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
14107 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
14108 init_chip_resources(dd);
14109 return ret;
14110 }
14111
14112 static void init_early_variables(struct hfi1_devdata *dd)
14113 {
14114 int i;
14115
14116
14117 dd->vau = CM_VAU;
14118 dd->link_credits = CM_GLOBAL_CREDITS;
14119 if (is_ax(dd))
14120 dd->link_credits--;
14121 dd->vcu = cu_to_vcu(hfi1_cu);
14122
14123 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14124 if (dd->vl15_init > dd->link_credits)
14125 dd->vl15_init = dd->link_credits;
14126
14127 write_uninitialized_csrs_and_memories(dd);
14128
14129 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14130 for (i = 0; i < dd->num_pports; i++) {
14131 struct hfi1_pportdata *ppd = &dd->pport[i];
14132
14133 set_partition_keys(ppd);
14134 }
14135 init_sc2vl_tables(dd);
14136 }
14137
14138 static void init_kdeth_qp(struct hfi1_devdata *dd)
14139 {
14140 write_csr(dd, SEND_BTH_QP,
14141 (RVT_KDETH_QP_PREFIX & SEND_BTH_QP_KDETH_QP_MASK) <<
14142 SEND_BTH_QP_KDETH_QP_SHIFT);
14143
14144 write_csr(dd, RCV_BTH_QP,
14145 (RVT_KDETH_QP_PREFIX & RCV_BTH_QP_KDETH_QP_MASK) <<
14146 RCV_BTH_QP_KDETH_QP_SHIFT);
14147 }
14148
14149
14150
14151
14152
14153
14154 u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
14155 {
14156 u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
14157
14158 reg >>= (idx % 8) * 8;
14159 return reg;
14160 }
14161
14162
14163
14164
14165
14166
14167
14168
14169
14170
14171
14172
14173
14174
14175
14176
14177
14178
14179 static void init_qpmap_table(struct hfi1_devdata *dd,
14180 u32 first_ctxt,
14181 u32 last_ctxt)
14182 {
14183 u64 reg = 0;
14184 u64 regno = RCV_QP_MAP_TABLE;
14185 int i;
14186 u64 ctxt = first_ctxt;
14187
14188 for (i = 0; i < 256; i++) {
14189 reg |= ctxt << (8 * (i % 8));
14190 ctxt++;
14191 if (ctxt > last_ctxt)
14192 ctxt = first_ctxt;
14193 if (i % 8 == 7) {
14194 write_csr(dd, regno, reg);
14195 reg = 0;
14196 regno += 8;
14197 }
14198 }
14199
14200 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14201 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14202 }
14203
14204 struct rsm_map_table {
14205 u64 map[NUM_MAP_REGS];
14206 unsigned int used;
14207 };
14208
14209 struct rsm_rule_data {
14210 u8 offset;
14211 u8 pkt_type;
14212 u32 field1_off;
14213 u32 field2_off;
14214 u32 index1_off;
14215 u32 index1_width;
14216 u32 index2_off;
14217 u32 index2_width;
14218 u32 mask1;
14219 u32 value1;
14220 u32 mask2;
14221 u32 value2;
14222 };
14223
14224
14225
14226
14227
14228 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14229 {
14230 struct rsm_map_table *rmt;
14231 u8 rxcontext = is_ax(dd) ? 0 : 0xff;
14232
14233 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14234 if (rmt) {
14235 memset(rmt->map, rxcontext, sizeof(rmt->map));
14236 rmt->used = 0;
14237 }
14238
14239 return rmt;
14240 }
14241
14242
14243
14244
14245
14246 static void complete_rsm_map_table(struct hfi1_devdata *dd,
14247 struct rsm_map_table *rmt)
14248 {
14249 int i;
14250
14251 if (rmt) {
14252
14253 for (i = 0; i < NUM_MAP_REGS; i++)
14254 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14255
14256
14257 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14258 }
14259 }
14260
14261
14262 static bool has_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14263 {
14264 return read_csr(dd, RCV_RSM_CFG + (8 * rule_index)) != 0;
14265 }
14266
14267
14268
14269
14270 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14271 struct rsm_rule_data *rrd)
14272 {
14273 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14274 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14275 1ull << rule_index |
14276 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14277 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14278 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14279 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14280 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14281 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14282 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14283 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14284 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14285 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14286 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14287 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14288 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14289 }
14290
14291
14292
14293
14294 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14295 {
14296 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14297 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14298 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14299 }
14300
14301
14302 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14303 unsigned int *np)
14304 {
14305 int i;
14306 unsigned int m, n;
14307 u8 max_by_vl = 0;
14308
14309
14310 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14311 num_vls == 1 ||
14312 krcvqsset <= 1)
14313 goto no_qos;
14314
14315
14316 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14317 if (krcvqs[i] > max_by_vl)
14318 max_by_vl = krcvqs[i];
14319 if (max_by_vl > 32)
14320 goto no_qos;
14321 m = ilog2(__roundup_pow_of_two(max_by_vl));
14322
14323
14324 n = ilog2(__roundup_pow_of_two(num_vls));
14325
14326
14327 if ((m + n) > 7)
14328 goto no_qos;
14329
14330 if (mp)
14331 *mp = m;
14332 if (np)
14333 *np = n;
14334
14335 return 1 << (m + n);
14336
14337 no_qos:
14338 if (mp)
14339 *mp = 0;
14340 if (np)
14341 *np = 0;
14342 return 0;
14343 }
14344
14345
14346
14347
14348
14349
14350
14351
14352
14353
14354
14355
14356
14357
14358
14359 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14360 {
14361 struct rsm_rule_data rrd;
14362 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14363 unsigned int rmt_entries;
14364 u64 reg;
14365
14366 if (!rmt)
14367 goto bail;
14368 rmt_entries = qos_rmt_entries(dd, &m, &n);
14369 if (rmt_entries == 0)
14370 goto bail;
14371 qpns_per_vl = 1 << m;
14372
14373
14374 rmt_entries = 1 << (m + n);
14375 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14376 goto bail;
14377
14378
14379 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14380 unsigned tctxt;
14381
14382 for (qpn = 0, tctxt = ctxt;
14383 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14384 unsigned idx, regoff, regidx;
14385
14386
14387 idx = rmt->used + ((qpn << n) ^ i);
14388 regoff = (idx % 8) * 8;
14389 regidx = idx / 8;
14390
14391 reg = rmt->map[regidx];
14392 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14393 << regoff);
14394 reg |= (u64)(tctxt++) << regoff;
14395 rmt->map[regidx] = reg;
14396 if (tctxt == ctxt + krcvqs[i])
14397 tctxt = ctxt;
14398 }
14399 ctxt += krcvqs[i];
14400 }
14401
14402 rrd.offset = rmt->used;
14403 rrd.pkt_type = 2;
14404 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14405 rrd.field2_off = LRH_SC_MATCH_OFFSET;
14406 rrd.index1_off = LRH_SC_SELECT_OFFSET;
14407 rrd.index1_width = n;
14408 rrd.index2_off = QPN_SELECT_OFFSET;
14409 rrd.index2_width = m + n;
14410 rrd.mask1 = LRH_BTH_MASK;
14411 rrd.value1 = LRH_BTH_VALUE;
14412 rrd.mask2 = LRH_SC_MASK;
14413 rrd.value2 = LRH_SC_VALUE;
14414
14415
14416 add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14417
14418
14419 rmt->used += rmt_entries;
14420
14421 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14422 dd->qos_shift = n + 1;
14423 return;
14424 bail:
14425 dd->qos_shift = 1;
14426 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14427 }
14428
14429 static void init_fecn_handling(struct hfi1_devdata *dd,
14430 struct rsm_map_table *rmt)
14431 {
14432 struct rsm_rule_data rrd;
14433 u64 reg;
14434 int i, idx, regoff, regidx, start;
14435 u8 offset;
14436 u32 total_cnt;
14437
14438 if (HFI1_CAP_IS_KSET(TID_RDMA))
14439
14440 start = 1;
14441 else
14442 start = dd->first_dyn_alloc_ctxt;
14443
14444 total_cnt = dd->num_rcv_contexts - start;
14445
14446
14447 if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
14448 dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n");
14449 return;
14450 }
14451
14452
14453
14454
14455
14456
14457
14458
14459
14460
14461
14462 offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start);
14463
14464 for (i = start, idx = rmt->used; i < dd->num_rcv_contexts;
14465 i++, idx++) {
14466
14467 regoff = (idx % 8) * 8;
14468 regidx = idx / 8;
14469 reg = rmt->map[regidx];
14470 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14471 reg |= (u64)i << regoff;
14472 rmt->map[regidx] = reg;
14473 }
14474
14475
14476
14477
14478
14479
14480
14481
14482
14483
14484 rrd.offset = offset;
14485 rrd.pkt_type = 0;
14486 rrd.field1_off = 95;
14487 rrd.field2_off = 133;
14488 rrd.index1_off = 64;
14489 rrd.index1_width = 8;
14490 rrd.index2_off = 0;
14491 rrd.index2_width = 0;
14492 rrd.mask1 = 1;
14493 rrd.value1 = 1;
14494 rrd.mask2 = 1;
14495 rrd.value2 = 1;
14496
14497
14498 add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14499
14500 rmt->used += total_cnt;
14501 }
14502
14503 static inline bool hfi1_is_rmt_full(int start, int spare)
14504 {
14505 return (start + spare) > NUM_MAP_ENTRIES;
14506 }
14507
14508 static bool hfi1_netdev_update_rmt(struct hfi1_devdata *dd)
14509 {
14510 u8 i, j;
14511 u8 ctx_id = 0;
14512 u64 reg;
14513 u32 regoff;
14514 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
14515 int ctxt_count = hfi1_netdev_ctxt_count(dd);
14516
14517
14518 if (has_rsm_rule(dd, RSM_INS_VNIC) || has_rsm_rule(dd, RSM_INS_AIP)) {
14519 dd_dev_info(dd, "Contexts are already mapped in RMT\n");
14520 return true;
14521 }
14522
14523 if (hfi1_is_rmt_full(rmt_start, NUM_NETDEV_MAP_ENTRIES)) {
14524 dd_dev_err(dd, "Not enough RMT entries used = %d\n",
14525 rmt_start);
14526 return false;
14527 }
14528
14529 dev_dbg(&(dd)->pcidev->dev, "RMT start = %d, end %d\n",
14530 rmt_start,
14531 rmt_start + NUM_NETDEV_MAP_ENTRIES);
14532
14533
14534 regoff = RCV_RSM_MAP_TABLE + (rmt_start / 8) * 8;
14535 reg = read_csr(dd, regoff);
14536 for (i = 0; i < NUM_NETDEV_MAP_ENTRIES; i++) {
14537
14538 j = (rmt_start + i) % 8;
14539 reg &= ~(0xffllu << (j * 8));
14540 reg |= (u64)hfi1_netdev_get_ctxt(dd, ctx_id++)->ctxt << (j * 8);
14541
14542 ctx_id %= ctxt_count;
14543
14544 if (j == 7 || ((i + 1) == NUM_NETDEV_MAP_ENTRIES)) {
14545 dev_dbg(&(dd)->pcidev->dev,
14546 "RMT[%d] =0x%llx\n",
14547 regoff - RCV_RSM_MAP_TABLE, reg);
14548
14549 write_csr(dd, regoff, reg);
14550 regoff += 8;
14551 if (i < (NUM_NETDEV_MAP_ENTRIES - 1))
14552 reg = read_csr(dd, regoff);
14553 }
14554 }
14555
14556 return true;
14557 }
14558
14559 static void hfi1_enable_rsm_rule(struct hfi1_devdata *dd,
14560 int rule, struct rsm_rule_data *rrd)
14561 {
14562 if (!hfi1_netdev_update_rmt(dd)) {
14563 dd_dev_err(dd, "Failed to update RMT for RSM%d rule\n", rule);
14564 return;
14565 }
14566
14567 add_rsm_rule(dd, rule, rrd);
14568 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14569 }
14570
14571 void hfi1_init_aip_rsm(struct hfi1_devdata *dd)
14572 {
14573
14574
14575
14576
14577 if (atomic_fetch_inc(&dd->ipoib_rsm_usr_num) == 0) {
14578 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
14579 struct rsm_rule_data rrd = {
14580 .offset = rmt_start,
14581 .pkt_type = IB_PACKET_TYPE,
14582 .field1_off = LRH_BTH_MATCH_OFFSET,
14583 .mask1 = LRH_BTH_MASK,
14584 .value1 = LRH_BTH_VALUE,
14585 .field2_off = BTH_DESTQP_MATCH_OFFSET,
14586 .mask2 = BTH_DESTQP_MASK,
14587 .value2 = BTH_DESTQP_VALUE,
14588 .index1_off = DETH_AIP_SQPN_SELECT_OFFSET +
14589 ilog2(NUM_NETDEV_MAP_ENTRIES),
14590 .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
14591 .index2_off = DETH_AIP_SQPN_SELECT_OFFSET,
14592 .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
14593 };
14594
14595 hfi1_enable_rsm_rule(dd, RSM_INS_AIP, &rrd);
14596 }
14597 }
14598
14599
14600 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14601 {
14602 int rmt_start = hfi1_netdev_get_free_rmt_idx(dd);
14603 struct rsm_rule_data rrd = {
14604
14605 .offset = rmt_start,
14606 .pkt_type = 4,
14607
14608 .field1_off = L2_TYPE_MATCH_OFFSET,
14609 .mask1 = L2_TYPE_MASK,
14610 .value1 = L2_16B_VALUE,
14611
14612 .field2_off = L4_TYPE_MATCH_OFFSET,
14613 .mask2 = L4_16B_TYPE_MASK,
14614 .value2 = L4_16B_ETH_VALUE,
14615
14616 .index1_off = L4_16B_HDR_VESWID_OFFSET,
14617 .index1_width = ilog2(NUM_NETDEV_MAP_ENTRIES),
14618 .index2_off = L2_16B_ENTROPY_OFFSET,
14619 .index2_width = ilog2(NUM_NETDEV_MAP_ENTRIES)
14620 };
14621
14622 hfi1_enable_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14623 }
14624
14625 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14626 {
14627 clear_rsm_rule(dd, RSM_INS_VNIC);
14628 }
14629
14630 void hfi1_deinit_aip_rsm(struct hfi1_devdata *dd)
14631 {
14632
14633 if (atomic_fetch_add_unless(&dd->ipoib_rsm_usr_num, -1, 0) == 1)
14634 clear_rsm_rule(dd, RSM_INS_AIP);
14635 }
14636
14637 static int init_rxe(struct hfi1_devdata *dd)
14638 {
14639 struct rsm_map_table *rmt;
14640 u64 val;
14641
14642
14643 write_csr(dd, RCV_ERR_MASK, ~0ull);
14644
14645 rmt = alloc_rsm_map_table(dd);
14646 if (!rmt)
14647 return -ENOMEM;
14648
14649
14650 init_qos(dd, rmt);
14651 init_fecn_handling(dd, rmt);
14652 complete_rsm_map_table(dd, rmt);
14653
14654 hfi1_netdev_set_free_rmt_idx(dd, rmt->used);
14655 kfree(rmt);
14656
14657
14658
14659
14660
14661
14662
14663
14664
14665
14666
14667
14668
14669
14670 val = read_csr(dd, RCV_BYPASS);
14671 val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
14672 val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
14673 RCV_BYPASS_HDR_SIZE_SHIFT);
14674 write_csr(dd, RCV_BYPASS, val);
14675 return 0;
14676 }
14677
14678 static void init_other(struct hfi1_devdata *dd)
14679 {
14680
14681 write_csr(dd, CCE_ERR_MASK, ~0ull);
14682
14683 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14684
14685 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14686 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14687 }
14688
14689
14690
14691
14692
14693
14694
14695
14696
14697 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14698 u32 csr0to3, u32 csr4to7)
14699 {
14700 write_csr(dd, csr0to3,
14701 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14702 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14703 2ull * cu <<
14704 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14705 4ull * cu <<
14706 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14707 write_csr(dd, csr4to7,
14708 8ull * cu <<
14709 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14710 16ull * cu <<
14711 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14712 32ull * cu <<
14713 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14714 64ull * cu <<
14715 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14716 }
14717
14718 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14719 {
14720 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14721 SEND_CM_LOCAL_AU_TABLE4_TO7);
14722 }
14723
14724 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14725 {
14726 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14727 SEND_CM_REMOTE_AU_TABLE4_TO7);
14728 }
14729
14730 static void init_txe(struct hfi1_devdata *dd)
14731 {
14732 int i;
14733
14734
14735 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14736 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14737 write_csr(dd, SEND_ERR_MASK, ~0ull);
14738 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14739
14740
14741 for (i = 0; i < chip_send_contexts(dd); i++)
14742 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14743 for (i = 0; i < chip_sdma_engines(dd); i++)
14744 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14745
14746
14747 assign_local_cm_au_table(dd, dd->vcu);
14748
14749
14750
14751
14752
14753 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14754 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14755 }
14756
14757 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14758 u16 jkey)
14759 {
14760 u8 hw_ctxt;
14761 u64 reg;
14762
14763 if (!rcd || !rcd->sc)
14764 return -EINVAL;
14765
14766 hw_ctxt = rcd->sc->hw_context;
14767 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK |
14768 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14769 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14770
14771 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14772 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14773 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14774
14775
14776
14777 if (!is_ax(dd)) {
14778 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14779 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14780 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14781 }
14782
14783
14784 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14785 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14786 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14787 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14788
14789 return 0;
14790 }
14791
14792 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14793 {
14794 u8 hw_ctxt;
14795 u64 reg;
14796
14797 if (!rcd || !rcd->sc)
14798 return -EINVAL;
14799
14800 hw_ctxt = rcd->sc->hw_context;
14801 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14802
14803
14804
14805
14806
14807 if (!is_ax(dd)) {
14808 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14809 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14810 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14811 }
14812
14813 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14814
14815 return 0;
14816 }
14817
14818 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14819 u16 pkey)
14820 {
14821 u8 hw_ctxt;
14822 u64 reg;
14823
14824 if (!rcd || !rcd->sc)
14825 return -EINVAL;
14826
14827 hw_ctxt = rcd->sc->hw_context;
14828 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14829 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14830 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14831 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14832 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14833 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14834 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14835
14836 return 0;
14837 }
14838
14839 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14840 {
14841 u8 hw_ctxt;
14842 u64 reg;
14843
14844 if (!ctxt || !ctxt->sc)
14845 return -EINVAL;
14846
14847 hw_ctxt = ctxt->sc->hw_context;
14848 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14849 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14850 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14851 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14852
14853 return 0;
14854 }
14855
14856
14857
14858
14859
14860 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14861 {
14862 aspm_exit(dd);
14863 free_cntrs(dd);
14864 free_rcverr(dd);
14865 finish_chip_resources(dd);
14866 }
14867
14868 #define HFI_BASE_GUID(dev) \
14869 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14870
14871
14872
14873
14874
14875
14876 static int init_asic_data(struct hfi1_devdata *dd)
14877 {
14878 unsigned long index;
14879 struct hfi1_devdata *peer;
14880 struct hfi1_asic_data *asic_data;
14881 int ret = 0;
14882
14883
14884 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14885 if (!asic_data)
14886 return -ENOMEM;
14887
14888 xa_lock_irq(&hfi1_dev_table);
14889
14890 xa_for_each(&hfi1_dev_table, index, peer) {
14891 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) &&
14892 dd->unit != peer->unit)
14893 break;
14894 }
14895
14896 if (peer) {
14897
14898 dd->asic_data = peer->asic_data;
14899 kfree(asic_data);
14900 } else {
14901 dd->asic_data = asic_data;
14902 mutex_init(&dd->asic_data->asic_resource_mutex);
14903 }
14904 dd->asic_data->dds[dd->hfi1_id] = dd;
14905 xa_unlock_irq(&hfi1_dev_table);
14906
14907
14908 if (!peer)
14909 ret = set_up_i2c(dd, dd->asic_data);
14910
14911 return ret;
14912 }
14913
14914
14915
14916
14917
14918
14919
14920 static int obtain_boardname(struct hfi1_devdata *dd)
14921 {
14922
14923 const char generic[] =
14924 "Cornelis Omni-Path Host Fabric Interface Adapter 100 Series";
14925 unsigned long size;
14926 int ret;
14927
14928 ret = read_hfi1_efi_var(dd, "description", &size,
14929 (void **)&dd->boardname);
14930 if (ret) {
14931 dd_dev_info(dd, "Board description not found\n");
14932
14933 dd->boardname = kstrdup(generic, GFP_KERNEL);
14934 if (!dd->boardname)
14935 return -ENOMEM;
14936 }
14937 return 0;
14938 }
14939
14940
14941
14942
14943
14944
14945
14946
14947
14948 static int check_int_registers(struct hfi1_devdata *dd)
14949 {
14950 u64 reg;
14951 u64 all_bits = ~(u64)0;
14952 u64 mask;
14953
14954
14955 mask = read_csr(dd, CCE_INT_MASK);
14956 write_csr(dd, CCE_INT_MASK, 0ull);
14957 reg = read_csr(dd, CCE_INT_MASK);
14958 if (reg)
14959 goto err_exit;
14960
14961
14962 write_csr(dd, CCE_INT_CLEAR, all_bits);
14963 reg = read_csr(dd, CCE_INT_STATUS);
14964 if (reg)
14965 goto err_exit;
14966
14967
14968 write_csr(dd, CCE_INT_FORCE, all_bits);
14969 reg = read_csr(dd, CCE_INT_STATUS);
14970 if (reg != all_bits)
14971 goto err_exit;
14972
14973
14974 write_csr(dd, CCE_INT_CLEAR, all_bits);
14975 write_csr(dd, CCE_INT_MASK, mask);
14976
14977 return 0;
14978 err_exit:
14979 write_csr(dd, CCE_INT_MASK, mask);
14980 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14981 return -EINVAL;
14982 }
14983
14984
14985
14986
14987
14988
14989
14990
14991 int hfi1_init_dd(struct hfi1_devdata *dd)
14992 {
14993 struct pci_dev *pdev = dd->pcidev;
14994 struct hfi1_pportdata *ppd;
14995 u64 reg;
14996 int i, ret;
14997 static const char * const inames[] = {
14998 "RTL silicon",
14999 "RTL VCS simulation",
15000 "RTL FPGA emulation",
15001 "Functional simulator"
15002 };
15003 struct pci_dev *parent = pdev->bus->self;
15004 u32 sdma_engines = chip_sdma_engines(dd);
15005
15006 ppd = dd->pport;
15007 for (i = 0; i < dd->num_pports; i++, ppd++) {
15008 int vl;
15009
15010 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
15011
15012 ppd->link_width_supported =
15013 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
15014 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
15015 ppd->link_width_downgrade_supported =
15016 ppd->link_width_supported;
15017
15018 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
15019 ppd->link_width_downgrade_enabled =
15020 ppd->link_width_downgrade_supported;
15021
15022
15023
15024 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
15025 num_vls > HFI1_MAX_VLS_SUPPORTED) {
15026 dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
15027 num_vls, HFI1_MAX_VLS_SUPPORTED);
15028 num_vls = HFI1_MAX_VLS_SUPPORTED;
15029 }
15030 ppd->vls_supported = num_vls;
15031 ppd->vls_operational = ppd->vls_supported;
15032
15033 for (vl = 0; vl < num_vls; vl++)
15034 dd->vld[vl].mtu = hfi1_max_mtu;
15035 dd->vld[15].mtu = MAX_MAD_PACKET;
15036
15037
15038
15039
15040 ppd->overrun_threshold = 0x4;
15041 ppd->phy_error_threshold = 0xf;
15042 ppd->port_crc_mode_enabled = link_crc_mask;
15043
15044 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
15045
15046 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
15047
15048 ppd->host_link_state = HLS_DN_OFFLINE;
15049 init_vl_arb_caches(ppd);
15050 }
15051
15052
15053
15054
15055
15056
15057 ret = hfi1_pcie_ddinit(dd, pdev);
15058 if (ret < 0)
15059 goto bail_free;
15060
15061
15062 ret = save_pci_variables(dd);
15063 if (ret < 0)
15064 goto bail_cleanup;
15065
15066 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
15067 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
15068 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
15069 & CCE_REVISION_CHIP_REV_MINOR_MASK;
15070
15071
15072
15073
15074
15075
15076 if (!parent) {
15077 ret = check_int_registers(dd);
15078 if (ret)
15079 goto bail_cleanup;
15080 }
15081
15082
15083
15084
15085
15086 reg = read_csr(dd, CCE_REVISION2);
15087 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
15088 & CCE_REVISION2_HFI_ID_MASK;
15089
15090 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
15091 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
15092 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
15093 dd->icode < ARRAY_SIZE(inames) ?
15094 inames[dd->icode] : "unknown", (int)dd->irev);
15095
15096
15097 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
15098
15099 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
15100
15101 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
15102
15103
15104 ppd = dd->pport;
15105 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
15106 ppd->link_width_supported =
15107 ppd->link_width_enabled =
15108 ppd->link_width_downgrade_supported =
15109 ppd->link_width_downgrade_enabled =
15110 OPA_LINK_WIDTH_1X;
15111 }
15112
15113 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
15114 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
15115 num_vls, sdma_engines);
15116 num_vls = sdma_engines;
15117 ppd->vls_supported = sdma_engines;
15118 ppd->vls_operational = ppd->vls_supported;
15119 }
15120
15121
15122
15123
15124
15125
15126
15127
15128
15129 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
15130 if (dd->rcv_intr_timeout_csr >
15131 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
15132 dd->rcv_intr_timeout_csr =
15133 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
15134 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
15135 dd->rcv_intr_timeout_csr = 1;
15136
15137
15138 read_guid(dd);
15139
15140
15141 ret = init_asic_data(dd);
15142 if (ret)
15143 goto bail_cleanup;
15144
15145
15146 ret = init_chip(dd);
15147 if (ret)
15148 goto bail_cleanup;
15149
15150
15151 ret = pcie_speeds(dd);
15152 if (ret)
15153 goto bail_cleanup;
15154
15155
15156 ret = eprom_init(dd);
15157 if (ret)
15158 goto bail_free_rcverr;
15159
15160
15161 get_platform_config(dd);
15162
15163
15164 ret = hfi1_firmware_init(dd);
15165 if (ret)
15166 goto bail_cleanup;
15167
15168
15169
15170
15171
15172
15173
15174
15175
15176
15177
15178
15179
15180 ret = do_pcie_gen3_transition(dd);
15181 if (ret)
15182 goto bail_cleanup;
15183
15184
15185
15186
15187
15188 tune_pcie_caps(dd);
15189
15190
15191 init_early_variables(dd);
15192
15193 parse_platform_config(dd);
15194
15195 ret = obtain_boardname(dd);
15196 if (ret)
15197 goto bail_cleanup;
15198
15199 snprintf(dd->boardversion, BOARD_VERS_MAX,
15200 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15201 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15202 (u32)dd->majrev,
15203 (u32)dd->minrev,
15204 (dd->revision >> CCE_REVISION_SW_SHIFT)
15205 & CCE_REVISION_SW_MASK);
15206
15207
15208 ret = hfi1_alloc_rx(dd);
15209 if (ret)
15210 goto bail_cleanup;
15211
15212 ret = set_up_context_variables(dd);
15213 if (ret)
15214 goto bail_cleanup;
15215
15216
15217 ret = init_rxe(dd);
15218 if (ret)
15219 goto bail_cleanup;
15220
15221
15222 init_txe(dd);
15223
15224 init_other(dd);
15225
15226 init_kdeth_qp(dd);
15227
15228 ret = hfi1_dev_affinity_init(dd);
15229 if (ret)
15230 goto bail_cleanup;
15231
15232
15233 ret = init_send_contexts(dd);
15234 if (ret)
15235 goto bail_cleanup;
15236
15237 ret = hfi1_create_kctxts(dd);
15238 if (ret)
15239 goto bail_cleanup;
15240
15241
15242
15243
15244
15245 aspm_init(dd);
15246
15247 ret = init_pervl_scs(dd);
15248 if (ret)
15249 goto bail_cleanup;
15250
15251
15252 for (i = 0; i < dd->num_pports; ++i) {
15253 ret = sdma_init(dd, i);
15254 if (ret)
15255 goto bail_cleanup;
15256 }
15257
15258
15259 ret = set_up_interrupts(dd);
15260 if (ret)
15261 goto bail_cleanup;
15262
15263 ret = hfi1_comp_vectors_set_up(dd);
15264 if (ret)
15265 goto bail_clear_intr;
15266
15267
15268 init_lcb_access(dd);
15269
15270
15271
15272
15273
15274
15275 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15276 (dd->base_guid & 0xFFFFFF) |
15277 ((dd->base_guid >> 11) & 0xF000000));
15278
15279 dd->oui1 = dd->base_guid >> 56 & 0xFF;
15280 dd->oui2 = dd->base_guid >> 48 & 0xFF;
15281 dd->oui3 = dd->base_guid >> 40 & 0xFF;
15282
15283 ret = load_firmware(dd);
15284 if (ret)
15285 goto bail_clear_intr;
15286
15287 thermal_init(dd);
15288
15289 ret = init_cntrs(dd);
15290 if (ret)
15291 goto bail_clear_intr;
15292
15293 ret = init_rcverr(dd);
15294 if (ret)
15295 goto bail_free_cntrs;
15296
15297 init_completion(&dd->user_comp);
15298
15299
15300 refcount_set(&dd->user_refcount, 1);
15301
15302 goto bail;
15303
15304 bail_free_rcverr:
15305 free_rcverr(dd);
15306 bail_free_cntrs:
15307 free_cntrs(dd);
15308 bail_clear_intr:
15309 hfi1_comp_vectors_clean_up(dd);
15310 msix_clean_up_interrupts(dd);
15311 bail_cleanup:
15312 hfi1_free_rx(dd);
15313 hfi1_pcie_ddcleanup(dd);
15314 bail_free:
15315 hfi1_free_devdata(dd);
15316 bail:
15317 return ret;
15318 }
15319
15320 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15321 u32 dw_len)
15322 {
15323 u32 delta_cycles;
15324 u32 current_egress_rate = ppd->current_egress_rate;
15325
15326
15327 if (desired_egress_rate == -1)
15328 return 0;
15329
15330 if (desired_egress_rate >= current_egress_rate)
15331 return 0;
15332
15333 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15334 egress_cycles(dw_len * 4, current_egress_rate);
15335
15336 return (u16)delta_cycles;
15337 }
15338
15339
15340
15341
15342
15343
15344
15345
15346
15347
15348
15349
15350
15351
15352
15353
15354 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15355 u32 dw_len)
15356 {
15357 u64 pbc, delay = 0;
15358
15359 if (unlikely(srate_mbs))
15360 delay = delay_cycles(ppd, srate_mbs, dw_len);
15361
15362 pbc = flags
15363 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15364 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15365 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15366 | (dw_len & PBC_LENGTH_DWS_MASK)
15367 << PBC_LENGTH_DWS_SHIFT;
15368
15369 return pbc;
15370 }
15371
15372 #define SBUS_THERMAL 0x4f
15373 #define SBUS_THERM_MONITOR_MODE 0x1
15374
15375 #define THERM_FAILURE(dev, ret, reason) \
15376 dd_dev_err((dd), \
15377 "Thermal sensor initialization failed: %s (%d)\n", \
15378 (reason), (ret))
15379
15380
15381
15382
15383
15384
15385
15386
15387
15388
15389
15390 static int thermal_init(struct hfi1_devdata *dd)
15391 {
15392 int ret = 0;
15393
15394 if (dd->icode != ICODE_RTL_SILICON ||
15395 check_chip_resource(dd, CR_THERM_INIT, NULL))
15396 return ret;
15397
15398 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15399 if (ret) {
15400 THERM_FAILURE(dd, ret, "Acquire SBus");
15401 return ret;
15402 }
15403
15404 dd_dev_info(dd, "Initializing thermal sensor\n");
15405
15406 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15407 msleep(100);
15408
15409
15410 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15411 RESET_SBUS_RECEIVER, 0);
15412 if (ret) {
15413 THERM_FAILURE(dd, ret, "Bus Reset");
15414 goto done;
15415 }
15416
15417 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15418 WRITE_SBUS_RECEIVER, 0x1);
15419 if (ret) {
15420 THERM_FAILURE(dd, ret, "Therm Block Reset");
15421 goto done;
15422 }
15423
15424 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15425 WRITE_SBUS_RECEIVER, 0x32);
15426 if (ret) {
15427 THERM_FAILURE(dd, ret, "Write Clock Div");
15428 goto done;
15429 }
15430
15431 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15432 WRITE_SBUS_RECEIVER,
15433 SBUS_THERM_MONITOR_MODE);
15434 if (ret) {
15435 THERM_FAILURE(dd, ret, "Write Mode Sel");
15436 goto done;
15437 }
15438
15439 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15440 WRITE_SBUS_RECEIVER, 0x2);
15441 if (ret) {
15442 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15443 goto done;
15444 }
15445
15446 msleep(22);
15447
15448
15449 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15450
15451
15452 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15453 if (ret)
15454 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15455
15456 done:
15457 release_chip_resource(dd, CR_SBUS);
15458 return ret;
15459 }
15460
15461 static void handle_temp_err(struct hfi1_devdata *dd)
15462 {
15463 struct hfi1_pportdata *ppd = &dd->pport[0];
15464
15465
15466
15467
15468
15469 dd_dev_emerg(dd,
15470 "Critical temperature reached! Forcing device into freeze mode!\n");
15471 dd->flags |= HFI1_FORCED_FREEZE;
15472 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15473
15474
15475
15476
15477
15478
15479
15480
15481
15482
15483
15484 ppd->driver_link_ready = 0;
15485 ppd->link_enabled = 0;
15486 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15487 PLS_OFFLINE);
15488
15489
15490
15491
15492 dc_shutdown(dd);
15493 }