Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /****************************************************************************
0003  * Driver for Solarflare network controllers and boards
0004  * Copyright 2005-2006 Fen Systems Ltd.
0005  * Copyright 2006-2013 Solarflare Communications Inc.
0006  */
0007 
0008 #include <linux/bitops.h>
0009 #include <linux/delay.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/pci.h>
0012 #include <linux/module.h>
0013 #include <linux/seq_file.h>
0014 #include <linux/cpu_rmap.h>
0015 #include "net_driver.h"
0016 #include "bitfield.h"
0017 #include "efx.h"
0018 #include "nic.h"
0019 #include "ef10_regs.h"
0020 #include "farch_regs.h"
0021 #include "io.h"
0022 #include "workarounds.h"
0023 #include "mcdi_pcol.h"
0024 
0025 /**************************************************************************
0026  *
0027  * Generic buffer handling
0028  * These buffers are used for interrupt status, MAC stats, etc.
0029  *
0030  **************************************************************************/
0031 
0032 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
0033              unsigned int len, gfp_t gfp_flags)
0034 {
0035     buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
0036                       &buffer->dma_addr, gfp_flags);
0037     if (!buffer->addr)
0038         return -ENOMEM;
0039     buffer->len = len;
0040     return 0;
0041 }
0042 
0043 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
0044 {
0045     if (buffer->addr) {
0046         dma_free_coherent(&efx->pci_dev->dev, buffer->len,
0047                   buffer->addr, buffer->dma_addr);
0048         buffer->addr = NULL;
0049     }
0050 }
0051 
0052 /* Check whether an event is present in the eventq at the current
0053  * read pointer.  Only useful for self-test.
0054  */
0055 bool efx_nic_event_present(struct efx_channel *channel)
0056 {
0057     return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
0058 }
0059 
0060 void efx_nic_event_test_start(struct efx_channel *channel)
0061 {
0062     channel->event_test_cpu = -1;
0063     smp_wmb();
0064     channel->efx->type->ev_test_generate(channel);
0065 }
0066 
0067 int efx_nic_irq_test_start(struct efx_nic *efx)
0068 {
0069     efx->last_irq_cpu = -1;
0070     smp_wmb();
0071     return efx->type->irq_test_generate(efx);
0072 }
0073 
0074 /* Hook interrupt handler(s)
0075  * Try MSI and then legacy interrupts.
0076  */
0077 int efx_nic_init_interrupt(struct efx_nic *efx)
0078 {
0079     struct efx_channel *channel;
0080     unsigned int n_irqs;
0081     int rc;
0082 
0083     if (!EFX_INT_MODE_USE_MSI(efx)) {
0084         rc = request_irq(efx->legacy_irq,
0085                  efx->type->irq_handle_legacy, IRQF_SHARED,
0086                  efx->name, efx);
0087         if (rc) {
0088             netif_err(efx, drv, efx->net_dev,
0089                   "failed to hook legacy IRQ %d\n",
0090                   efx->pci_dev->irq);
0091             goto fail1;
0092         }
0093         efx->irqs_hooked = true;
0094         return 0;
0095     }
0096 
0097 #ifdef CONFIG_RFS_ACCEL
0098     if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
0099         efx->net_dev->rx_cpu_rmap =
0100             alloc_irq_cpu_rmap(efx->n_rx_channels);
0101         if (!efx->net_dev->rx_cpu_rmap) {
0102             rc = -ENOMEM;
0103             goto fail1;
0104         }
0105     }
0106 #endif
0107 
0108     /* Hook MSI or MSI-X interrupt */
0109     n_irqs = 0;
0110     efx_for_each_channel(channel, efx) {
0111         rc = request_irq(channel->irq, efx->type->irq_handle_msi,
0112                  IRQF_PROBE_SHARED, /* Not shared */
0113                  efx->msi_context[channel->channel].name,
0114                  &efx->msi_context[channel->channel]);
0115         if (rc) {
0116             netif_err(efx, drv, efx->net_dev,
0117                   "failed to hook IRQ %d\n", channel->irq);
0118             goto fail2;
0119         }
0120         ++n_irqs;
0121 
0122 #ifdef CONFIG_RFS_ACCEL
0123         if (efx->interrupt_mode == EFX_INT_MODE_MSIX &&
0124             channel->channel < efx->n_rx_channels) {
0125             rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
0126                           channel->irq);
0127             if (rc)
0128                 goto fail2;
0129         }
0130 #endif
0131     }
0132 
0133     efx->irqs_hooked = true;
0134     return 0;
0135 
0136  fail2:
0137 #ifdef CONFIG_RFS_ACCEL
0138     free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
0139     efx->net_dev->rx_cpu_rmap = NULL;
0140 #endif
0141     efx_for_each_channel(channel, efx) {
0142         if (n_irqs-- == 0)
0143             break;
0144         free_irq(channel->irq, &efx->msi_context[channel->channel]);
0145     }
0146  fail1:
0147     return rc;
0148 }
0149 
0150 void efx_nic_fini_interrupt(struct efx_nic *efx)
0151 {
0152     struct efx_channel *channel;
0153 
0154 #ifdef CONFIG_RFS_ACCEL
0155     free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
0156     efx->net_dev->rx_cpu_rmap = NULL;
0157 #endif
0158 
0159     if (!efx->irqs_hooked)
0160         return;
0161     if (EFX_INT_MODE_USE_MSI(efx)) {
0162         /* Disable MSI/MSI-X interrupts */
0163         efx_for_each_channel(channel, efx)
0164             free_irq(channel->irq,
0165                  &efx->msi_context[channel->channel]);
0166     } else {
0167         /* Disable legacy interrupt */
0168         free_irq(efx->legacy_irq, efx);
0169     }
0170     efx->irqs_hooked = false;
0171 }
0172 
0173 /* Register dump */
0174 
0175 #define REGISTER_REVISION_FA    1
0176 #define REGISTER_REVISION_FB    2
0177 #define REGISTER_REVISION_FC    3
0178 #define REGISTER_REVISION_FZ    3   /* last Falcon arch revision */
0179 #define REGISTER_REVISION_ED    4
0180 #define REGISTER_REVISION_EZ    4   /* latest EF10 revision */
0181 
0182 struct efx_nic_reg {
0183     u32 offset:24;
0184     u32 min_revision:3, max_revision:3;
0185 };
0186 
0187 #define REGISTER(name, arch, min_rev, max_rev) {            \
0188     arch ## R_ ## min_rev ## max_rev ## _ ## name,          \
0189     REGISTER_REVISION_ ## arch ## min_rev,              \
0190     REGISTER_REVISION_ ## arch ## max_rev               \
0191 }
0192 #define REGISTER_AA(name) REGISTER(name, F, A, A)
0193 #define REGISTER_AB(name) REGISTER(name, F, A, B)
0194 #define REGISTER_AZ(name) REGISTER(name, F, A, Z)
0195 #define REGISTER_BB(name) REGISTER(name, F, B, B)
0196 #define REGISTER_BZ(name) REGISTER(name, F, B, Z)
0197 #define REGISTER_CZ(name) REGISTER(name, F, C, Z)
0198 #define REGISTER_DZ(name) REGISTER(name, E, D, Z)
0199 
0200 static const struct efx_nic_reg efx_nic_regs[] = {
0201     REGISTER_AZ(ADR_REGION),
0202     REGISTER_AZ(INT_EN_KER),
0203     REGISTER_BZ(INT_EN_CHAR),
0204     REGISTER_AZ(INT_ADR_KER),
0205     REGISTER_BZ(INT_ADR_CHAR),
0206     /* INT_ACK_KER is WO */
0207     /* INT_ISR0 is RC */
0208     REGISTER_AZ(HW_INIT),
0209     REGISTER_CZ(USR_EV_CFG),
0210     REGISTER_AB(EE_SPI_HCMD),
0211     REGISTER_AB(EE_SPI_HADR),
0212     REGISTER_AB(EE_SPI_HDATA),
0213     REGISTER_AB(EE_BASE_PAGE),
0214     REGISTER_AB(EE_VPD_CFG0),
0215     /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
0216     /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
0217     /* PCIE_CORE_INDIRECT is indirect */
0218     REGISTER_AB(NIC_STAT),
0219     REGISTER_AB(GPIO_CTL),
0220     REGISTER_AB(GLB_CTL),
0221     /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
0222     REGISTER_BZ(DP_CTRL),
0223     REGISTER_AZ(MEM_STAT),
0224     REGISTER_AZ(CS_DEBUG),
0225     REGISTER_AZ(ALTERA_BUILD),
0226     REGISTER_AZ(CSR_SPARE),
0227     REGISTER_AB(PCIE_SD_CTL0123),
0228     REGISTER_AB(PCIE_SD_CTL45),
0229     REGISTER_AB(PCIE_PCS_CTL_STAT),
0230     /* DEBUG_DATA_OUT is not used */
0231     /* DRV_EV is WO */
0232     REGISTER_AZ(EVQ_CTL),
0233     REGISTER_AZ(EVQ_CNT1),
0234     REGISTER_AZ(EVQ_CNT2),
0235     REGISTER_AZ(BUF_TBL_CFG),
0236     REGISTER_AZ(SRM_RX_DC_CFG),
0237     REGISTER_AZ(SRM_TX_DC_CFG),
0238     REGISTER_AZ(SRM_CFG),
0239     /* BUF_TBL_UPD is WO */
0240     REGISTER_AZ(SRM_UPD_EVQ),
0241     REGISTER_AZ(SRAM_PARITY),
0242     REGISTER_AZ(RX_CFG),
0243     REGISTER_BZ(RX_FILTER_CTL),
0244     /* RX_FLUSH_DESCQ is WO */
0245     REGISTER_AZ(RX_DC_CFG),
0246     REGISTER_AZ(RX_DC_PF_WM),
0247     REGISTER_BZ(RX_RSS_TKEY),
0248     /* RX_NODESC_DROP is RC */
0249     REGISTER_AA(RX_SELF_RST),
0250     /* RX_DEBUG, RX_PUSH_DROP are not used */
0251     REGISTER_CZ(RX_RSS_IPV6_REG1),
0252     REGISTER_CZ(RX_RSS_IPV6_REG2),
0253     REGISTER_CZ(RX_RSS_IPV6_REG3),
0254     /* TX_FLUSH_DESCQ is WO */
0255     REGISTER_AZ(TX_DC_CFG),
0256     REGISTER_AA(TX_CHKSM_CFG),
0257     REGISTER_AZ(TX_CFG),
0258     /* TX_PUSH_DROP is not used */
0259     REGISTER_AZ(TX_RESERVED),
0260     REGISTER_BZ(TX_PACE),
0261     /* TX_PACE_DROP_QID is RC */
0262     REGISTER_BB(TX_VLAN),
0263     REGISTER_BZ(TX_IPFIL_PORTEN),
0264     REGISTER_AB(MD_TXD),
0265     REGISTER_AB(MD_RXD),
0266     REGISTER_AB(MD_CS),
0267     REGISTER_AB(MD_PHY_ADR),
0268     REGISTER_AB(MD_ID),
0269     /* MD_STAT is RC */
0270     REGISTER_AB(MAC_STAT_DMA),
0271     REGISTER_AB(MAC_CTRL),
0272     REGISTER_BB(GEN_MODE),
0273     REGISTER_AB(MAC_MC_HASH_REG0),
0274     REGISTER_AB(MAC_MC_HASH_REG1),
0275     REGISTER_AB(GM_CFG1),
0276     REGISTER_AB(GM_CFG2),
0277     /* GM_IPG and GM_HD are not used */
0278     REGISTER_AB(GM_MAX_FLEN),
0279     /* GM_TEST is not used */
0280     REGISTER_AB(GM_ADR1),
0281     REGISTER_AB(GM_ADR2),
0282     REGISTER_AB(GMF_CFG0),
0283     REGISTER_AB(GMF_CFG1),
0284     REGISTER_AB(GMF_CFG2),
0285     REGISTER_AB(GMF_CFG3),
0286     REGISTER_AB(GMF_CFG4),
0287     REGISTER_AB(GMF_CFG5),
0288     REGISTER_BB(TX_SRC_MAC_CTL),
0289     REGISTER_AB(XM_ADR_LO),
0290     REGISTER_AB(XM_ADR_HI),
0291     REGISTER_AB(XM_GLB_CFG),
0292     REGISTER_AB(XM_TX_CFG),
0293     REGISTER_AB(XM_RX_CFG),
0294     REGISTER_AB(XM_MGT_INT_MASK),
0295     REGISTER_AB(XM_FC),
0296     REGISTER_AB(XM_PAUSE_TIME),
0297     REGISTER_AB(XM_TX_PARAM),
0298     REGISTER_AB(XM_RX_PARAM),
0299     /* XM_MGT_INT_MSK (note no 'A') is RC */
0300     REGISTER_AB(XX_PWR_RST),
0301     REGISTER_AB(XX_SD_CTL),
0302     REGISTER_AB(XX_TXDRV_CTL),
0303     /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
0304     /* XX_CORE_STAT is partly RC */
0305     REGISTER_DZ(BIU_HW_REV_ID),
0306     REGISTER_DZ(MC_DB_LWRD),
0307     REGISTER_DZ(MC_DB_HWRD),
0308 };
0309 
0310 struct efx_nic_reg_table {
0311     u32 offset:24;
0312     u32 min_revision:3, max_revision:3;
0313     u32 step:6, rows:21;
0314 };
0315 
0316 #define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
0317     offset,                             \
0318     REGISTER_REVISION_ ## arch ## min_rev,              \
0319     REGISTER_REVISION_ ## arch ## max_rev,              \
0320     step, rows                          \
0321 }
0322 #define REGISTER_TABLE(name, arch, min_rev, max_rev)            \
0323     REGISTER_TABLE_DIMENSIONS(                  \
0324         name, arch ## R_ ## min_rev ## max_rev ## _ ## name,    \
0325         arch, min_rev, max_rev,                 \
0326         arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
0327         arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
0328 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
0329 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
0330 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
0331 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
0332 #define REGISTER_TABLE_BB_CZ(name)                  \
0333     REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B,    \
0334                   FR_BZ_ ## name ## _STEP,      \
0335                   FR_BB_ ## name ## _ROWS),     \
0336     REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z,    \
0337                   FR_BZ_ ## name ## _STEP,      \
0338                   FR_CZ_ ## name ## _ROWS)
0339 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
0340 #define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
0341 
0342 static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
0343     /* DRIVER is not used */
0344     /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
0345     REGISTER_TABLE_BB(TX_IPFIL_TBL),
0346     REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
0347     REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
0348     REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
0349     REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
0350     REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
0351     REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
0352     REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
0353     /* We can't reasonably read all of the buffer table (up to 8MB!).
0354      * However this driver will only use a few entries.  Reading
0355      * 1K entries allows for some expansion of queue count and
0356      * size before we need to change the version. */
0357     REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
0358                   F, A, A, 8, 1024),
0359     REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
0360                   F, B, Z, 8, 1024),
0361     REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
0362     REGISTER_TABLE_BB_CZ(TIMER_TBL),
0363     REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
0364     REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
0365     /* TX_FILTER_TBL0 is huge and not used by this driver */
0366     REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
0367     REGISTER_TABLE_CZ(MC_TREG_SMEM),
0368     /* MSIX_PBA_TABLE is not mapped */
0369     /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
0370     REGISTER_TABLE_BZ(RX_FILTER_TBL0),
0371     REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
0372 };
0373 
0374 size_t efx_nic_get_regs_len(struct efx_nic *efx)
0375 {
0376     const struct efx_nic_reg *reg;
0377     const struct efx_nic_reg_table *table;
0378     size_t len = 0;
0379 
0380     for (reg = efx_nic_regs;
0381          reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
0382          reg++)
0383         if (efx->type->revision >= reg->min_revision &&
0384             efx->type->revision <= reg->max_revision)
0385             len += sizeof(efx_oword_t);
0386 
0387     for (table = efx_nic_reg_tables;
0388          table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
0389          table++)
0390         if (efx->type->revision >= table->min_revision &&
0391             efx->type->revision <= table->max_revision)
0392             len += table->rows * min_t(size_t, table->step, 16);
0393 
0394     return len;
0395 }
0396 
0397 void efx_nic_get_regs(struct efx_nic *efx, void *buf)
0398 {
0399     const struct efx_nic_reg *reg;
0400     const struct efx_nic_reg_table *table;
0401 
0402     for (reg = efx_nic_regs;
0403          reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
0404          reg++) {
0405         if (efx->type->revision >= reg->min_revision &&
0406             efx->type->revision <= reg->max_revision) {
0407             efx_reado(efx, (efx_oword_t *)buf, reg->offset);
0408             buf += sizeof(efx_oword_t);
0409         }
0410     }
0411 
0412     for (table = efx_nic_reg_tables;
0413          table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
0414          table++) {
0415         size_t size, i;
0416 
0417         if (!(efx->type->revision >= table->min_revision &&
0418               efx->type->revision <= table->max_revision))
0419             continue;
0420 
0421         size = min_t(size_t, table->step, 16);
0422 
0423         for (i = 0; i < table->rows; i++) {
0424             switch (table->step) {
0425             case 4: /* 32-bit SRAM */
0426                 efx_readd(efx, buf, table->offset + 4 * i);
0427                 break;
0428             case 8: /* 64-bit SRAM */
0429                 efx_sram_readq(efx,
0430                            efx->membase + table->offset,
0431                            buf, i);
0432                 break;
0433             case 16: /* 128-bit-readable register */
0434                 efx_reado_table(efx, buf, table->offset, i);
0435                 break;
0436             case 32: /* 128-bit register, interleaved */
0437                 efx_reado_table(efx, buf, table->offset, 2 * i);
0438                 break;
0439             default:
0440                 WARN_ON(1);
0441                 return;
0442             }
0443             buf += size;
0444         }
0445     }
0446 }
0447 
0448 /**
0449  * efx_nic_describe_stats - Describe supported statistics for ethtool
0450  * @desc: Array of &struct efx_hw_stat_desc describing the statistics
0451  * @count: Length of the @desc array
0452  * @mask: Bitmask of which elements of @desc are enabled
0453  * @names: Buffer to copy names to, or %NULL.  The names are copied
0454  *  starting at intervals of %ETH_GSTRING_LEN bytes.
0455  *
0456  * Returns the number of visible statistics, i.e. the number of set
0457  * bits in the first @count bits of @mask for which a name is defined.
0458  */
0459 size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
0460                   const unsigned long *mask, u8 *names)
0461 {
0462     size_t visible = 0;
0463     size_t index;
0464 
0465     for_each_set_bit(index, mask, count) {
0466         if (desc[index].name) {
0467             if (names) {
0468                 strlcpy(names, desc[index].name,
0469                     ETH_GSTRING_LEN);
0470                 names += ETH_GSTRING_LEN;
0471             }
0472             ++visible;
0473         }
0474     }
0475 
0476     return visible;
0477 }
0478 
0479 /**
0480  * efx_nic_copy_stats - Copy stats from the DMA buffer in to an
0481  *  intermediate buffer. This is used to get a consistent
0482  *  set of stats while the DMA buffer can be written at any time
0483  *  by the NIC.
0484  * @efx: The associated NIC.
0485  * @dest: Destination buffer. Must be the same size as the DMA buffer.
0486  */
0487 int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest)
0488 {
0489     __le64 *dma_stats = efx->stats_buffer.addr;
0490     __le64 generation_start, generation_end;
0491     int rc = 0, retry;
0492 
0493     if (!dest)
0494         return 0;
0495 
0496     if (!dma_stats)
0497         goto return_zeroes;
0498 
0499     /* If we're unlucky enough to read statistics during the DMA, wait
0500      * up to 10ms for it to finish (typically takes <500us)
0501      */
0502     for (retry = 0; retry < 100; ++retry) {
0503         generation_end = dma_stats[efx->num_mac_stats - 1];
0504         if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
0505             goto return_zeroes;
0506         rmb();
0507         memcpy(dest, dma_stats, efx->num_mac_stats * sizeof(__le64));
0508         rmb();
0509         generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
0510         if (generation_end == generation_start)
0511             return 0; /* return good data */
0512         udelay(100);
0513     }
0514 
0515     rc = -EIO;
0516 
0517 return_zeroes:
0518     memset(dest, 0, efx->num_mac_stats * sizeof(u64));
0519     return rc;
0520 }
0521 
0522 /**
0523  * efx_nic_update_stats - Convert statistics DMA buffer to array of u64
0524  * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
0525  *  layout.  DMA widths of 0, 16, 32 and 64 are supported; where
0526  *  the width is specified as 0 the corresponding element of
0527  *  @stats is not updated.
0528  * @count: Length of the @desc array
0529  * @mask: Bitmask of which elements of @desc are enabled
0530  * @stats: Buffer to update with the converted statistics.  The length
0531  *  of this array must be at least @count.
0532  * @dma_buf: DMA buffer containing hardware statistics
0533  * @accumulate: If set, the converted values will be added rather than
0534  *  directly stored to the corresponding elements of @stats
0535  */
0536 void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
0537               const unsigned long *mask,
0538               u64 *stats, const void *dma_buf, bool accumulate)
0539 {
0540     size_t index;
0541 
0542     for_each_set_bit(index, mask, count) {
0543         if (desc[index].dma_width) {
0544             const void *addr = dma_buf + desc[index].offset;
0545             u64 val;
0546 
0547             switch (desc[index].dma_width) {
0548             case 16:
0549                 val = le16_to_cpup((__le16 *)addr);
0550                 break;
0551             case 32:
0552                 val = le32_to_cpup((__le32 *)addr);
0553                 break;
0554             case 64:
0555                 val = le64_to_cpup((__le64 *)addr);
0556                 break;
0557             default:
0558                 WARN_ON(1);
0559                 val = 0;
0560                 break;
0561             }
0562 
0563             if (accumulate)
0564                 stats[index] += val;
0565             else
0566                 stats[index] = val;
0567         }
0568     }
0569 }
0570 
0571 void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
0572 {
0573     /* if down, or this is the first update after coming up */
0574     if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
0575         efx->rx_nodesc_drops_while_down +=
0576             *rx_nodesc_drops - efx->rx_nodesc_drops_total;
0577     efx->rx_nodesc_drops_total = *rx_nodesc_drops;
0578     efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
0579     *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
0580 }