0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/bitops.h>
0009 #include <linux/delay.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/pci.h>
0012 #include <linux/module.h>
0013 #include <linux/seq_file.h>
0014 #include <linux/cpu_rmap.h>
0015 #include "net_driver.h"
0016 #include "bitfield.h"
0017 #include "efx.h"
0018 #include "nic.h"
0019 #include "farch_regs.h"
0020 #include "io.h"
0021 #include "workarounds.h"
0022
0023
0024
0025
0026
0027
0028
0029
0030 int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer,
0031 unsigned int len, gfp_t gfp_flags)
0032 {
0033 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
0034 &buffer->dma_addr, gfp_flags);
0035 if (!buffer->addr)
0036 return -ENOMEM;
0037 buffer->len = len;
0038 return 0;
0039 }
0040
0041 void ef4_nic_free_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer)
0042 {
0043 if (buffer->addr) {
0044 dma_free_coherent(&efx->pci_dev->dev, buffer->len,
0045 buffer->addr, buffer->dma_addr);
0046 buffer->addr = NULL;
0047 }
0048 }
0049
0050
0051
0052
0053 bool ef4_nic_event_present(struct ef4_channel *channel)
0054 {
0055 return ef4_event_present(ef4_event(channel, channel->eventq_read_ptr));
0056 }
0057
0058 void ef4_nic_event_test_start(struct ef4_channel *channel)
0059 {
0060 channel->event_test_cpu = -1;
0061 smp_wmb();
0062 channel->efx->type->ev_test_generate(channel);
0063 }
0064
0065 int ef4_nic_irq_test_start(struct ef4_nic *efx)
0066 {
0067 efx->last_irq_cpu = -1;
0068 smp_wmb();
0069 return efx->type->irq_test_generate(efx);
0070 }
0071
0072
0073
0074
0075 int ef4_nic_init_interrupt(struct ef4_nic *efx)
0076 {
0077 struct ef4_channel *channel;
0078 unsigned int n_irqs;
0079 int rc;
0080
0081 if (!EF4_INT_MODE_USE_MSI(efx)) {
0082 rc = request_irq(efx->legacy_irq,
0083 efx->type->irq_handle_legacy, IRQF_SHARED,
0084 efx->name, efx);
0085 if (rc) {
0086 netif_err(efx, drv, efx->net_dev,
0087 "failed to hook legacy IRQ %d\n",
0088 efx->pci_dev->irq);
0089 goto fail1;
0090 }
0091 return 0;
0092 }
0093
0094 #ifdef CONFIG_RFS_ACCEL
0095 if (efx->interrupt_mode == EF4_INT_MODE_MSIX) {
0096 efx->net_dev->rx_cpu_rmap =
0097 alloc_irq_cpu_rmap(efx->n_rx_channels);
0098 if (!efx->net_dev->rx_cpu_rmap) {
0099 rc = -ENOMEM;
0100 goto fail1;
0101 }
0102 }
0103 #endif
0104
0105
0106 n_irqs = 0;
0107 ef4_for_each_channel(channel, efx) {
0108 rc = request_irq(channel->irq, efx->type->irq_handle_msi,
0109 IRQF_PROBE_SHARED,
0110 efx->msi_context[channel->channel].name,
0111 &efx->msi_context[channel->channel]);
0112 if (rc) {
0113 netif_err(efx, drv, efx->net_dev,
0114 "failed to hook IRQ %d\n", channel->irq);
0115 goto fail2;
0116 }
0117 ++n_irqs;
0118
0119 #ifdef CONFIG_RFS_ACCEL
0120 if (efx->interrupt_mode == EF4_INT_MODE_MSIX &&
0121 channel->channel < efx->n_rx_channels) {
0122 rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
0123 channel->irq);
0124 if (rc)
0125 goto fail2;
0126 }
0127 #endif
0128 }
0129
0130 return 0;
0131
0132 fail2:
0133 #ifdef CONFIG_RFS_ACCEL
0134 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
0135 efx->net_dev->rx_cpu_rmap = NULL;
0136 #endif
0137 ef4_for_each_channel(channel, efx) {
0138 if (n_irqs-- == 0)
0139 break;
0140 free_irq(channel->irq, &efx->msi_context[channel->channel]);
0141 }
0142 fail1:
0143 return rc;
0144 }
0145
0146 void ef4_nic_fini_interrupt(struct ef4_nic *efx)
0147 {
0148 struct ef4_channel *channel;
0149
0150 #ifdef CONFIG_RFS_ACCEL
0151 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
0152 efx->net_dev->rx_cpu_rmap = NULL;
0153 #endif
0154
0155 if (EF4_INT_MODE_USE_MSI(efx)) {
0156
0157 ef4_for_each_channel(channel, efx)
0158 free_irq(channel->irq,
0159 &efx->msi_context[channel->channel]);
0160 } else {
0161
0162 free_irq(efx->legacy_irq, efx);
0163 }
0164 }
0165
0166
0167
0168 #define REGISTER_REVISION_FA 1
0169 #define REGISTER_REVISION_FB 2
0170 #define REGISTER_REVISION_FC 3
0171 #define REGISTER_REVISION_FZ 3
0172 #define REGISTER_REVISION_ED 4
0173 #define REGISTER_REVISION_EZ 4
0174
0175 struct ef4_nic_reg {
0176 u32 offset:24;
0177 u32 min_revision:3, max_revision:3;
0178 };
0179
0180 #define REGISTER(name, arch, min_rev, max_rev) { \
0181 arch ## R_ ## min_rev ## max_rev ## _ ## name, \
0182 REGISTER_REVISION_ ## arch ## min_rev, \
0183 REGISTER_REVISION_ ## arch ## max_rev \
0184 }
0185 #define REGISTER_AA(name) REGISTER(name, F, A, A)
0186 #define REGISTER_AB(name) REGISTER(name, F, A, B)
0187 #define REGISTER_AZ(name) REGISTER(name, F, A, Z)
0188 #define REGISTER_BB(name) REGISTER(name, F, B, B)
0189 #define REGISTER_BZ(name) REGISTER(name, F, B, Z)
0190 #define REGISTER_CZ(name) REGISTER(name, F, C, Z)
0191
0192 static const struct ef4_nic_reg ef4_nic_regs[] = {
0193 REGISTER_AZ(ADR_REGION),
0194 REGISTER_AZ(INT_EN_KER),
0195 REGISTER_BZ(INT_EN_CHAR),
0196 REGISTER_AZ(INT_ADR_KER),
0197 REGISTER_BZ(INT_ADR_CHAR),
0198
0199
0200 REGISTER_AZ(HW_INIT),
0201 REGISTER_CZ(USR_EV_CFG),
0202 REGISTER_AB(EE_SPI_HCMD),
0203 REGISTER_AB(EE_SPI_HADR),
0204 REGISTER_AB(EE_SPI_HDATA),
0205 REGISTER_AB(EE_BASE_PAGE),
0206 REGISTER_AB(EE_VPD_CFG0),
0207
0208
0209
0210 REGISTER_AB(NIC_STAT),
0211 REGISTER_AB(GPIO_CTL),
0212 REGISTER_AB(GLB_CTL),
0213
0214 REGISTER_BZ(DP_CTRL),
0215 REGISTER_AZ(MEM_STAT),
0216 REGISTER_AZ(CS_DEBUG),
0217 REGISTER_AZ(ALTERA_BUILD),
0218 REGISTER_AZ(CSR_SPARE),
0219 REGISTER_AB(PCIE_SD_CTL0123),
0220 REGISTER_AB(PCIE_SD_CTL45),
0221 REGISTER_AB(PCIE_PCS_CTL_STAT),
0222
0223
0224 REGISTER_AZ(EVQ_CTL),
0225 REGISTER_AZ(EVQ_CNT1),
0226 REGISTER_AZ(EVQ_CNT2),
0227 REGISTER_AZ(BUF_TBL_CFG),
0228 REGISTER_AZ(SRM_RX_DC_CFG),
0229 REGISTER_AZ(SRM_TX_DC_CFG),
0230 REGISTER_AZ(SRM_CFG),
0231
0232 REGISTER_AZ(SRM_UPD_EVQ),
0233 REGISTER_AZ(SRAM_PARITY),
0234 REGISTER_AZ(RX_CFG),
0235 REGISTER_BZ(RX_FILTER_CTL),
0236
0237 REGISTER_AZ(RX_DC_CFG),
0238 REGISTER_AZ(RX_DC_PF_WM),
0239 REGISTER_BZ(RX_RSS_TKEY),
0240
0241 REGISTER_AA(RX_SELF_RST),
0242
0243 REGISTER_CZ(RX_RSS_IPV6_REG1),
0244 REGISTER_CZ(RX_RSS_IPV6_REG2),
0245 REGISTER_CZ(RX_RSS_IPV6_REG3),
0246
0247 REGISTER_AZ(TX_DC_CFG),
0248 REGISTER_AA(TX_CHKSM_CFG),
0249 REGISTER_AZ(TX_CFG),
0250
0251 REGISTER_AZ(TX_RESERVED),
0252 REGISTER_BZ(TX_PACE),
0253
0254 REGISTER_BB(TX_VLAN),
0255 REGISTER_BZ(TX_IPFIL_PORTEN),
0256 REGISTER_AB(MD_TXD),
0257 REGISTER_AB(MD_RXD),
0258 REGISTER_AB(MD_CS),
0259 REGISTER_AB(MD_PHY_ADR),
0260 REGISTER_AB(MD_ID),
0261
0262 REGISTER_AB(MAC_STAT_DMA),
0263 REGISTER_AB(MAC_CTRL),
0264 REGISTER_BB(GEN_MODE),
0265 REGISTER_AB(MAC_MC_HASH_REG0),
0266 REGISTER_AB(MAC_MC_HASH_REG1),
0267 REGISTER_AB(GM_CFG1),
0268 REGISTER_AB(GM_CFG2),
0269
0270 REGISTER_AB(GM_MAX_FLEN),
0271
0272 REGISTER_AB(GM_ADR1),
0273 REGISTER_AB(GM_ADR2),
0274 REGISTER_AB(GMF_CFG0),
0275 REGISTER_AB(GMF_CFG1),
0276 REGISTER_AB(GMF_CFG2),
0277 REGISTER_AB(GMF_CFG3),
0278 REGISTER_AB(GMF_CFG4),
0279 REGISTER_AB(GMF_CFG5),
0280 REGISTER_BB(TX_SRC_MAC_CTL),
0281 REGISTER_AB(XM_ADR_LO),
0282 REGISTER_AB(XM_ADR_HI),
0283 REGISTER_AB(XM_GLB_CFG),
0284 REGISTER_AB(XM_TX_CFG),
0285 REGISTER_AB(XM_RX_CFG),
0286 REGISTER_AB(XM_MGT_INT_MASK),
0287 REGISTER_AB(XM_FC),
0288 REGISTER_AB(XM_PAUSE_TIME),
0289 REGISTER_AB(XM_TX_PARAM),
0290 REGISTER_AB(XM_RX_PARAM),
0291
0292 REGISTER_AB(XX_PWR_RST),
0293 REGISTER_AB(XX_SD_CTL),
0294 REGISTER_AB(XX_TXDRV_CTL),
0295
0296
0297 };
0298
0299 struct ef4_nic_reg_table {
0300 u32 offset:24;
0301 u32 min_revision:3, max_revision:3;
0302 u32 step:6, rows:21;
0303 };
0304
0305 #define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
0306 offset, \
0307 REGISTER_REVISION_ ## arch ## min_rev, \
0308 REGISTER_REVISION_ ## arch ## max_rev, \
0309 step, rows \
0310 }
0311 #define REGISTER_TABLE(name, arch, min_rev, max_rev) \
0312 REGISTER_TABLE_DIMENSIONS( \
0313 name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
0314 arch, min_rev, max_rev, \
0315 arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
0316 arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
0317 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
0318 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
0319 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
0320 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
0321 #define REGISTER_TABLE_BB_CZ(name) \
0322 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
0323 FR_BZ_ ## name ## _STEP, \
0324 FR_BB_ ## name ## _ROWS), \
0325 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
0326 FR_BZ_ ## name ## _STEP, \
0327 FR_CZ_ ## name ## _ROWS)
0328 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
0329
0330 static const struct ef4_nic_reg_table ef4_nic_reg_tables[] = {
0331
0332
0333 REGISTER_TABLE_BB(TX_IPFIL_TBL),
0334 REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
0335 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
0336 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
0337 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
0338 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
0339 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
0340 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
0341
0342
0343
0344
0345 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
0346 F, A, A, 8, 1024),
0347 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
0348 F, B, Z, 8, 1024),
0349 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
0350 REGISTER_TABLE_BB_CZ(TIMER_TBL),
0351 REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
0352 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
0353
0354 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
0355 REGISTER_TABLE_CZ(MC_TREG_SMEM),
0356
0357
0358 REGISTER_TABLE_BZ(RX_FILTER_TBL0),
0359 };
0360
0361 size_t ef4_nic_get_regs_len(struct ef4_nic *efx)
0362 {
0363 const struct ef4_nic_reg *reg;
0364 const struct ef4_nic_reg_table *table;
0365 size_t len = 0;
0366
0367 for (reg = ef4_nic_regs;
0368 reg < ef4_nic_regs + ARRAY_SIZE(ef4_nic_regs);
0369 reg++)
0370 if (efx->type->revision >= reg->min_revision &&
0371 efx->type->revision <= reg->max_revision)
0372 len += sizeof(ef4_oword_t);
0373
0374 for (table = ef4_nic_reg_tables;
0375 table < ef4_nic_reg_tables + ARRAY_SIZE(ef4_nic_reg_tables);
0376 table++)
0377 if (efx->type->revision >= table->min_revision &&
0378 efx->type->revision <= table->max_revision)
0379 len += table->rows * min_t(size_t, table->step, 16);
0380
0381 return len;
0382 }
0383
0384 void ef4_nic_get_regs(struct ef4_nic *efx, void *buf)
0385 {
0386 const struct ef4_nic_reg *reg;
0387 const struct ef4_nic_reg_table *table;
0388
0389 for (reg = ef4_nic_regs;
0390 reg < ef4_nic_regs + ARRAY_SIZE(ef4_nic_regs);
0391 reg++) {
0392 if (efx->type->revision >= reg->min_revision &&
0393 efx->type->revision <= reg->max_revision) {
0394 ef4_reado(efx, (ef4_oword_t *)buf, reg->offset);
0395 buf += sizeof(ef4_oword_t);
0396 }
0397 }
0398
0399 for (table = ef4_nic_reg_tables;
0400 table < ef4_nic_reg_tables + ARRAY_SIZE(ef4_nic_reg_tables);
0401 table++) {
0402 size_t size, i;
0403
0404 if (!(efx->type->revision >= table->min_revision &&
0405 efx->type->revision <= table->max_revision))
0406 continue;
0407
0408 size = min_t(size_t, table->step, 16);
0409
0410 for (i = 0; i < table->rows; i++) {
0411 switch (table->step) {
0412 case 4:
0413 ef4_readd(efx, buf, table->offset + 4 * i);
0414 break;
0415 case 8:
0416 ef4_sram_readq(efx,
0417 efx->membase + table->offset,
0418 buf, i);
0419 break;
0420 case 16:
0421 ef4_reado_table(efx, buf, table->offset, i);
0422 break;
0423 case 32:
0424 ef4_reado_table(efx, buf, table->offset, 2 * i);
0425 break;
0426 default:
0427 WARN_ON(1);
0428 return;
0429 }
0430 buf += size;
0431 }
0432 }
0433 }
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443
0444
0445
0446 size_t ef4_nic_describe_stats(const struct ef4_hw_stat_desc *desc, size_t count,
0447 const unsigned long *mask, u8 *names)
0448 {
0449 size_t visible = 0;
0450 size_t index;
0451
0452 for_each_set_bit(index, mask, count) {
0453 if (desc[index].name) {
0454 if (names) {
0455 strlcpy(names, desc[index].name,
0456 ETH_GSTRING_LEN);
0457 names += ETH_GSTRING_LEN;
0458 }
0459 ++visible;
0460 }
0461 }
0462
0463 return visible;
0464 }
0465
0466
0467
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479
0480 void ef4_nic_update_stats(const struct ef4_hw_stat_desc *desc, size_t count,
0481 const unsigned long *mask,
0482 u64 *stats, const void *dma_buf, bool accumulate)
0483 {
0484 size_t index;
0485
0486 for_each_set_bit(index, mask, count) {
0487 if (desc[index].dma_width) {
0488 const void *addr = dma_buf + desc[index].offset;
0489 u64 val;
0490
0491 switch (desc[index].dma_width) {
0492 case 16:
0493 val = le16_to_cpup((__le16 *)addr);
0494 break;
0495 case 32:
0496 val = le32_to_cpup((__le32 *)addr);
0497 break;
0498 case 64:
0499 val = le64_to_cpup((__le64 *)addr);
0500 break;
0501 default:
0502 WARN_ON(1);
0503 val = 0;
0504 break;
0505 }
0506
0507 if (accumulate)
0508 stats[index] += val;
0509 else
0510 stats[index] = val;
0511 }
0512 }
0513 }
0514
0515 void ef4_nic_fix_nodesc_drop_stat(struct ef4_nic *efx, u64 *rx_nodesc_drops)
0516 {
0517
0518 if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
0519 efx->rx_nodesc_drops_while_down +=
0520 *rx_nodesc_drops - efx->rx_nodesc_drops_total;
0521 efx->rx_nodesc_drops_total = *rx_nodesc_drops;
0522 efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
0523 *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
0524 }