Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /****************************************************************************
0003  * Driver for Solarflare network controllers and boards
0004  * Copyright 2018 Solarflare Communications Inc.
0005  *
0006  * This program is free software; you can redistribute it and/or modify it
0007  * under the terms of the GNU General Public License version 2 as published
0008  * by the Free Software Foundation, incorporated herein by reference.
0009  */
0010 
0011 #include "net_driver.h"
0012 #include <linux/module.h>
0013 #include <linux/filter.h>
0014 #include "efx_channels.h"
0015 #include "efx.h"
0016 #include "efx_common.h"
0017 #include "tx_common.h"
0018 #include "rx_common.h"
0019 #include "nic.h"
0020 #include "sriov.h"
0021 #include "workarounds.h"
0022 
0023 /* This is the first interrupt mode to try out of:
0024  * 0 => MSI-X
0025  * 1 => MSI
0026  * 2 => legacy
0027  */
0028 unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX;
0029 
0030 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
0031  * i.e. the number of CPUs among which we may distribute simultaneous
0032  * interrupt handling.
0033  *
0034  * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
0035  * The default (0) means to assign an interrupt to each core.
0036  */
0037 unsigned int rss_cpus;
0038 
0039 static unsigned int irq_adapt_low_thresh = 8000;
0040 module_param(irq_adapt_low_thresh, uint, 0644);
0041 MODULE_PARM_DESC(irq_adapt_low_thresh,
0042          "Threshold score for reducing IRQ moderation");
0043 
0044 static unsigned int irq_adapt_high_thresh = 16000;
0045 module_param(irq_adapt_high_thresh, uint, 0644);
0046 MODULE_PARM_DESC(irq_adapt_high_thresh,
0047          "Threshold score for increasing IRQ moderation");
0048 
0049 static const struct efx_channel_type efx_default_channel_type;
0050 
0051 /*************
0052  * INTERRUPTS
0053  *************/
0054 
0055 static unsigned int count_online_cores(struct efx_nic *efx, bool local_node)
0056 {
0057     cpumask_var_t filter_mask;
0058     unsigned int count;
0059     int cpu;
0060 
0061     if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) {
0062         netif_warn(efx, probe, efx->net_dev,
0063                "RSS disabled due to allocation failure\n");
0064         return 1;
0065     }
0066 
0067     cpumask_copy(filter_mask, cpu_online_mask);
0068     if (local_node)
0069         cpumask_and(filter_mask, filter_mask,
0070                 cpumask_of_pcibus(efx->pci_dev->bus));
0071 
0072     count = 0;
0073     for_each_cpu(cpu, filter_mask) {
0074         ++count;
0075         cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu));
0076     }
0077 
0078     free_cpumask_var(filter_mask);
0079 
0080     return count;
0081 }
0082 
0083 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
0084 {
0085     unsigned int count;
0086 
0087     if (rss_cpus) {
0088         count = rss_cpus;
0089     } else {
0090         count = count_online_cores(efx, true);
0091 
0092         /* If no online CPUs in local node, fallback to any online CPUs */
0093         if (count == 0)
0094             count = count_online_cores(efx, false);
0095     }
0096 
0097     if (count > EFX_MAX_RX_QUEUES) {
0098         netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
0099                    "Reducing number of rx queues from %u to %u.\n",
0100                    count, EFX_MAX_RX_QUEUES);
0101         count = EFX_MAX_RX_QUEUES;
0102     }
0103 
0104     /* If RSS is requested for the PF *and* VFs then we can't write RSS
0105      * table entries that are inaccessible to VFs
0106      */
0107 #ifdef CONFIG_SFC_SRIOV
0108     if (efx->type->sriov_wanted) {
0109         if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
0110             count > efx_vf_size(efx)) {
0111             netif_warn(efx, probe, efx->net_dev,
0112                    "Reducing number of RSS channels from %u to %u for "
0113                    "VF support. Increase vf-msix-limit to use more "
0114                    "channels on the PF.\n",
0115                    count, efx_vf_size(efx));
0116             count = efx_vf_size(efx);
0117         }
0118     }
0119 #endif
0120 
0121     return count;
0122 }
0123 
0124 static int efx_allocate_msix_channels(struct efx_nic *efx,
0125                       unsigned int max_channels,
0126                       unsigned int extra_channels,
0127                       unsigned int parallelism)
0128 {
0129     unsigned int n_channels = parallelism;
0130     int vec_count;
0131     int tx_per_ev;
0132     int n_xdp_tx;
0133     int n_xdp_ev;
0134 
0135     if (efx_separate_tx_channels)
0136         n_channels *= 2;
0137     n_channels += extra_channels;
0138 
0139     /* To allow XDP transmit to happen from arbitrary NAPI contexts
0140      * we allocate a TX queue per CPU. We share event queues across
0141      * multiple tx queues, assuming tx and ev queues are both
0142      * maximum size.
0143      */
0144     tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
0145     tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
0146     n_xdp_tx = num_possible_cpus();
0147     n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
0148 
0149     vec_count = pci_msix_vec_count(efx->pci_dev);
0150     if (vec_count < 0)
0151         return vec_count;
0152 
0153     max_channels = min_t(unsigned int, vec_count, max_channels);
0154 
0155     /* Check resources.
0156      * We need a channel per event queue, plus a VI per tx queue.
0157      * This may be more pessimistic than it needs to be.
0158      */
0159     if (n_channels >= max_channels) {
0160         efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
0161         netif_warn(efx, drv, efx->net_dev,
0162                "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
0163                n_xdp_ev, n_channels, max_channels);
0164         netif_warn(efx, drv, efx->net_dev,
0165                "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
0166     } else if (n_channels + n_xdp_tx > efx->max_vis) {
0167         efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
0168         netif_warn(efx, drv, efx->net_dev,
0169                "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
0170                n_xdp_tx, n_channels, efx->max_vis);
0171         netif_warn(efx, drv, efx->net_dev,
0172                "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
0173     } else if (n_channels + n_xdp_ev > max_channels) {
0174         efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
0175         netif_warn(efx, drv, efx->net_dev,
0176                "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
0177                n_xdp_ev, n_channels, max_channels);
0178 
0179         n_xdp_ev = max_channels - n_channels;
0180         netif_warn(efx, drv, efx->net_dev,
0181                "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
0182                DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev));
0183     } else {
0184         efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
0185     }
0186 
0187     if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
0188         efx->n_xdp_channels = n_xdp_ev;
0189         efx->xdp_tx_per_channel = tx_per_ev;
0190         efx->xdp_tx_queue_count = n_xdp_tx;
0191         n_channels += n_xdp_ev;
0192         netif_dbg(efx, drv, efx->net_dev,
0193               "Allocating %d TX and %d event queues for XDP\n",
0194               n_xdp_ev * tx_per_ev, n_xdp_ev);
0195     } else {
0196         efx->n_xdp_channels = 0;
0197         efx->xdp_tx_per_channel = 0;
0198         efx->xdp_tx_queue_count = n_xdp_tx;
0199     }
0200 
0201     if (vec_count < n_channels) {
0202         netif_err(efx, drv, efx->net_dev,
0203               "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
0204               vec_count, n_channels);
0205         netif_err(efx, drv, efx->net_dev,
0206               "WARNING: Performance may be reduced.\n");
0207         n_channels = vec_count;
0208     }
0209 
0210     n_channels = min(n_channels, max_channels);
0211 
0212     efx->n_channels = n_channels;
0213 
0214     /* Ignore XDP tx channels when creating rx channels. */
0215     n_channels -= efx->n_xdp_channels;
0216 
0217     if (efx_separate_tx_channels) {
0218         efx->n_tx_channels =
0219             min(max(n_channels / 2, 1U),
0220                 efx->max_tx_channels);
0221         efx->tx_channel_offset =
0222             n_channels - efx->n_tx_channels;
0223         efx->n_rx_channels =
0224             max(n_channels -
0225                 efx->n_tx_channels, 1U);
0226     } else {
0227         efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
0228         efx->tx_channel_offset = 0;
0229         efx->n_rx_channels = n_channels;
0230     }
0231 
0232     efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
0233     efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
0234 
0235     efx->xdp_channel_offset = n_channels;
0236 
0237     netif_dbg(efx, drv, efx->net_dev,
0238           "Allocating %u RX channels\n",
0239           efx->n_rx_channels);
0240 
0241     return efx->n_channels;
0242 }
0243 
0244 /* Probe the number and type of interrupts we are able to obtain, and
0245  * the resulting numbers of channels and RX queues.
0246  */
0247 int efx_probe_interrupts(struct efx_nic *efx)
0248 {
0249     unsigned int extra_channels = 0;
0250     unsigned int rss_spread;
0251     unsigned int i, j;
0252     int rc;
0253 
0254     for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
0255         if (efx->extra_channel_type[i])
0256             ++extra_channels;
0257 
0258     if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
0259         unsigned int parallelism = efx_wanted_parallelism(efx);
0260         struct msix_entry xentries[EFX_MAX_CHANNELS];
0261         unsigned int n_channels;
0262 
0263         rc = efx_allocate_msix_channels(efx, efx->max_channels,
0264                         extra_channels, parallelism);
0265         if (rc >= 0) {
0266             n_channels = rc;
0267             for (i = 0; i < n_channels; i++)
0268                 xentries[i].entry = i;
0269             rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
0270                            n_channels);
0271         }
0272         if (rc < 0) {
0273             /* Fall back to single channel MSI */
0274             netif_err(efx, drv, efx->net_dev,
0275                   "could not enable MSI-X\n");
0276             if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
0277                 efx->interrupt_mode = EFX_INT_MODE_MSI;
0278             else
0279                 return rc;
0280         } else if (rc < n_channels) {
0281             netif_err(efx, drv, efx->net_dev,
0282                   "WARNING: Insufficient MSI-X vectors"
0283                   " available (%d < %u).\n", rc, n_channels);
0284             netif_err(efx, drv, efx->net_dev,
0285                   "WARNING: Performance may be reduced.\n");
0286             n_channels = rc;
0287         }
0288 
0289         if (rc > 0) {
0290             for (i = 0; i < efx->n_channels; i++)
0291                 efx_get_channel(efx, i)->irq =
0292                     xentries[i].vector;
0293         }
0294     }
0295 
0296     /* Try single interrupt MSI */
0297     if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
0298         efx->n_channels = 1;
0299         efx->n_rx_channels = 1;
0300         efx->n_tx_channels = 1;
0301         efx->tx_channel_offset = 0;
0302         efx->n_xdp_channels = 0;
0303         efx->xdp_channel_offset = efx->n_channels;
0304         rc = pci_enable_msi(efx->pci_dev);
0305         if (rc == 0) {
0306             efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
0307         } else {
0308             netif_err(efx, drv, efx->net_dev,
0309                   "could not enable MSI\n");
0310             if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
0311                 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
0312             else
0313                 return rc;
0314         }
0315     }
0316 
0317     /* Assume legacy interrupts */
0318     if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
0319         efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
0320         efx->n_rx_channels = 1;
0321         efx->n_tx_channels = 1;
0322         efx->tx_channel_offset = efx_separate_tx_channels ? 1 : 0;
0323         efx->n_xdp_channels = 0;
0324         efx->xdp_channel_offset = efx->n_channels;
0325         efx->legacy_irq = efx->pci_dev->irq;
0326     }
0327 
0328     /* Assign extra channels if possible, before XDP channels */
0329     efx->n_extra_tx_channels = 0;
0330     j = efx->xdp_channel_offset;
0331     for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
0332         if (!efx->extra_channel_type[i])
0333             continue;
0334         if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
0335             efx->extra_channel_type[i]->handle_no_channel(efx);
0336         } else {
0337             --j;
0338             efx_get_channel(efx, j)->type =
0339                 efx->extra_channel_type[i];
0340             if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
0341                 efx->n_extra_tx_channels++;
0342         }
0343     }
0344 
0345     rss_spread = efx->n_rx_channels;
0346     /* RSS might be usable on VFs even if it is disabled on the PF */
0347 #ifdef CONFIG_SFC_SRIOV
0348     if (efx->type->sriov_wanted) {
0349         efx->rss_spread = ((rss_spread > 1 ||
0350                     !efx->type->sriov_wanted(efx)) ?
0351                    rss_spread : efx_vf_size(efx));
0352         return 0;
0353     }
0354 #endif
0355     efx->rss_spread = rss_spread;
0356 
0357     return 0;
0358 }
0359 
0360 #if defined(CONFIG_SMP)
0361 void efx_set_interrupt_affinity(struct efx_nic *efx)
0362 {
0363     const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus);
0364     struct efx_channel *channel;
0365     unsigned int cpu;
0366 
0367     /* If no online CPUs in local node, fallback to any online CPU */
0368     if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids)
0369         numa_mask = cpu_online_mask;
0370 
0371     cpu = -1;
0372     efx_for_each_channel(channel, efx) {
0373         cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask);
0374         if (cpu >= nr_cpu_ids)
0375             cpu = cpumask_first_and(cpu_online_mask, numa_mask);
0376         irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
0377     }
0378 }
0379 
0380 void efx_clear_interrupt_affinity(struct efx_nic *efx)
0381 {
0382     struct efx_channel *channel;
0383 
0384     efx_for_each_channel(channel, efx)
0385         irq_set_affinity_hint(channel->irq, NULL);
0386 }
0387 #else
0388 void
0389 efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
0390 {
0391 }
0392 
0393 void
0394 efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
0395 {
0396 }
0397 #endif /* CONFIG_SMP */
0398 
0399 void efx_remove_interrupts(struct efx_nic *efx)
0400 {
0401     struct efx_channel *channel;
0402 
0403     /* Remove MSI/MSI-X interrupts */
0404     efx_for_each_channel(channel, efx)
0405         channel->irq = 0;
0406     pci_disable_msi(efx->pci_dev);
0407     pci_disable_msix(efx->pci_dev);
0408 
0409     /* Remove legacy interrupt */
0410     efx->legacy_irq = 0;
0411 }
0412 
0413 /***************
0414  * EVENT QUEUES
0415  ***************/
0416 
0417 /* Create event queue
0418  * Event queue memory allocations are done only once.  If the channel
0419  * is reset, the memory buffer will be reused; this guards against
0420  * errors during channel reset and also simplifies interrupt handling.
0421  */
0422 int efx_probe_eventq(struct efx_channel *channel)
0423 {
0424     struct efx_nic *efx = channel->efx;
0425     unsigned long entries;
0426 
0427     netif_dbg(efx, probe, efx->net_dev,
0428           "chan %d create event queue\n", channel->channel);
0429 
0430     /* Build an event queue with room for one event per tx and rx buffer,
0431      * plus some extra for link state events and MCDI completions.
0432      */
0433     entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
0434     EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
0435     channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
0436 
0437     return efx_nic_probe_eventq(channel);
0438 }
0439 
0440 /* Prepare channel's event queue */
0441 int efx_init_eventq(struct efx_channel *channel)
0442 {
0443     struct efx_nic *efx = channel->efx;
0444     int rc;
0445 
0446     EFX_WARN_ON_PARANOID(channel->eventq_init);
0447 
0448     netif_dbg(efx, drv, efx->net_dev,
0449           "chan %d init event queue\n", channel->channel);
0450 
0451     rc = efx_nic_init_eventq(channel);
0452     if (rc == 0) {
0453         efx->type->push_irq_moderation(channel);
0454         channel->eventq_read_ptr = 0;
0455         channel->eventq_init = true;
0456     }
0457     return rc;
0458 }
0459 
0460 /* Enable event queue processing and NAPI */
0461 void efx_start_eventq(struct efx_channel *channel)
0462 {
0463     netif_dbg(channel->efx, ifup, channel->efx->net_dev,
0464           "chan %d start event queue\n", channel->channel);
0465 
0466     /* Make sure the NAPI handler sees the enabled flag set */
0467     channel->enabled = true;
0468     smp_wmb();
0469 
0470     napi_enable(&channel->napi_str);
0471     efx_nic_eventq_read_ack(channel);
0472 }
0473 
0474 /* Disable event queue processing and NAPI */
0475 void efx_stop_eventq(struct efx_channel *channel)
0476 {
0477     if (!channel->enabled)
0478         return;
0479 
0480     napi_disable(&channel->napi_str);
0481     channel->enabled = false;
0482 }
0483 
0484 void efx_fini_eventq(struct efx_channel *channel)
0485 {
0486     if (!channel->eventq_init)
0487         return;
0488 
0489     netif_dbg(channel->efx, drv, channel->efx->net_dev,
0490           "chan %d fini event queue\n", channel->channel);
0491 
0492     efx_nic_fini_eventq(channel);
0493     channel->eventq_init = false;
0494 }
0495 
0496 void efx_remove_eventq(struct efx_channel *channel)
0497 {
0498     netif_dbg(channel->efx, drv, channel->efx->net_dev,
0499           "chan %d remove event queue\n", channel->channel);
0500 
0501     efx_nic_remove_eventq(channel);
0502 }
0503 
0504 /**************************************************************************
0505  *
0506  * Channel handling
0507  *
0508  *************************************************************************/
0509 
0510 #ifdef CONFIG_RFS_ACCEL
0511 static void efx_filter_rfs_expire(struct work_struct *data)
0512 {
0513     struct delayed_work *dwork = to_delayed_work(data);
0514     struct efx_channel *channel;
0515     unsigned int time, quota;
0516 
0517     channel = container_of(dwork, struct efx_channel, filter_work);
0518     time = jiffies - channel->rfs_last_expiry;
0519     quota = channel->rfs_filter_count * time / (30 * HZ);
0520     if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
0521         channel->rfs_last_expiry += time;
0522     /* Ensure we do more work eventually even if NAPI poll is not happening */
0523     schedule_delayed_work(dwork, 30 * HZ);
0524 }
0525 #endif
0526 
0527 /* Allocate and initialise a channel structure. */
0528 static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
0529 {
0530     struct efx_rx_queue *rx_queue;
0531     struct efx_tx_queue *tx_queue;
0532     struct efx_channel *channel;
0533     int j;
0534 
0535     channel = kzalloc(sizeof(*channel), GFP_KERNEL);
0536     if (!channel)
0537         return NULL;
0538 
0539     channel->efx = efx;
0540     channel->channel = i;
0541     channel->type = &efx_default_channel_type;
0542 
0543     for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
0544         tx_queue = &channel->tx_queue[j];
0545         tx_queue->efx = efx;
0546         tx_queue->queue = -1;
0547         tx_queue->label = j;
0548         tx_queue->channel = channel;
0549     }
0550 
0551 #ifdef CONFIG_RFS_ACCEL
0552     INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
0553 #endif
0554 
0555     rx_queue = &channel->rx_queue;
0556     rx_queue->efx = efx;
0557     timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
0558 
0559     return channel;
0560 }
0561 
0562 int efx_init_channels(struct efx_nic *efx)
0563 {
0564     unsigned int i;
0565 
0566     for (i = 0; i < EFX_MAX_CHANNELS; i++) {
0567         efx->channel[i] = efx_alloc_channel(efx, i);
0568         if (!efx->channel[i])
0569             return -ENOMEM;
0570         efx->msi_context[i].efx = efx;
0571         efx->msi_context[i].index = i;
0572     }
0573 
0574     /* Higher numbered interrupt modes are less capable! */
0575     efx->interrupt_mode = min(efx->type->min_interrupt_mode,
0576                   efx_interrupt_mode);
0577 
0578     efx->max_channels = EFX_MAX_CHANNELS;
0579     efx->max_tx_channels = EFX_MAX_CHANNELS;
0580 
0581     return 0;
0582 }
0583 
0584 void efx_fini_channels(struct efx_nic *efx)
0585 {
0586     unsigned int i;
0587 
0588     for (i = 0; i < EFX_MAX_CHANNELS; i++)
0589         if (efx->channel[i]) {
0590             kfree(efx->channel[i]);
0591             efx->channel[i] = NULL;
0592         }
0593 }
0594 
0595 /* Allocate and initialise a channel structure, copying parameters
0596  * (but not resources) from an old channel structure.
0597  */
0598 struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
0599 {
0600     struct efx_rx_queue *rx_queue;
0601     struct efx_tx_queue *tx_queue;
0602     struct efx_channel *channel;
0603     int j;
0604 
0605     channel = kmalloc(sizeof(*channel), GFP_KERNEL);
0606     if (!channel)
0607         return NULL;
0608 
0609     *channel = *old_channel;
0610 
0611     channel->napi_dev = NULL;
0612     INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
0613     channel->napi_str.napi_id = 0;
0614     channel->napi_str.state = 0;
0615     memset(&channel->eventq, 0, sizeof(channel->eventq));
0616 
0617     for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
0618         tx_queue = &channel->tx_queue[j];
0619         if (tx_queue->channel)
0620             tx_queue->channel = channel;
0621         tx_queue->buffer = NULL;
0622         tx_queue->cb_page = NULL;
0623         memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
0624     }
0625 
0626     rx_queue = &channel->rx_queue;
0627     rx_queue->buffer = NULL;
0628     memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
0629     timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
0630 #ifdef CONFIG_RFS_ACCEL
0631     INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
0632 #endif
0633 
0634     return channel;
0635 }
0636 
0637 static int efx_probe_channel(struct efx_channel *channel)
0638 {
0639     struct efx_tx_queue *tx_queue;
0640     struct efx_rx_queue *rx_queue;
0641     int rc;
0642 
0643     netif_dbg(channel->efx, probe, channel->efx->net_dev,
0644           "creating channel %d\n", channel->channel);
0645 
0646     rc = channel->type->pre_probe(channel);
0647     if (rc)
0648         goto fail;
0649 
0650     rc = efx_probe_eventq(channel);
0651     if (rc)
0652         goto fail;
0653 
0654     efx_for_each_channel_tx_queue(tx_queue, channel) {
0655         rc = efx_probe_tx_queue(tx_queue);
0656         if (rc)
0657             goto fail;
0658     }
0659 
0660     efx_for_each_channel_rx_queue(rx_queue, channel) {
0661         rc = efx_probe_rx_queue(rx_queue);
0662         if (rc)
0663             goto fail;
0664     }
0665 
0666     channel->rx_list = NULL;
0667 
0668     return 0;
0669 
0670 fail:
0671     efx_remove_channel(channel);
0672     return rc;
0673 }
0674 
0675 static void efx_get_channel_name(struct efx_channel *channel, char *buf,
0676                  size_t len)
0677 {
0678     struct efx_nic *efx = channel->efx;
0679     const char *type;
0680     int number;
0681 
0682     number = channel->channel;
0683 
0684     if (number >= efx->xdp_channel_offset &&
0685         !WARN_ON_ONCE(!efx->n_xdp_channels)) {
0686         type = "-xdp";
0687         number -= efx->xdp_channel_offset;
0688     } else if (efx->tx_channel_offset == 0) {
0689         type = "";
0690     } else if (number < efx->tx_channel_offset) {
0691         type = "-rx";
0692     } else {
0693         type = "-tx";
0694         number -= efx->tx_channel_offset;
0695     }
0696     snprintf(buf, len, "%s%s-%d", efx->name, type, number);
0697 }
0698 
0699 void efx_set_channel_names(struct efx_nic *efx)
0700 {
0701     struct efx_channel *channel;
0702 
0703     efx_for_each_channel(channel, efx)
0704         channel->type->get_name(channel,
0705                     efx->msi_context[channel->channel].name,
0706                     sizeof(efx->msi_context[0].name));
0707 }
0708 
0709 int efx_probe_channels(struct efx_nic *efx)
0710 {
0711     struct efx_channel *channel;
0712     int rc;
0713 
0714     /* Restart special buffer allocation */
0715     efx->next_buffer_table = 0;
0716 
0717     /* Probe channels in reverse, so that any 'extra' channels
0718      * use the start of the buffer table. This allows the traffic
0719      * channels to be resized without moving them or wasting the
0720      * entries before them.
0721      */
0722     efx_for_each_channel_rev(channel, efx) {
0723         rc = efx_probe_channel(channel);
0724         if (rc) {
0725             netif_err(efx, probe, efx->net_dev,
0726                   "failed to create channel %d\n",
0727                   channel->channel);
0728             goto fail;
0729         }
0730     }
0731     efx_set_channel_names(efx);
0732 
0733     return 0;
0734 
0735 fail:
0736     efx_remove_channels(efx);
0737     return rc;
0738 }
0739 
0740 void efx_remove_channel(struct efx_channel *channel)
0741 {
0742     struct efx_tx_queue *tx_queue;
0743     struct efx_rx_queue *rx_queue;
0744 
0745     netif_dbg(channel->efx, drv, channel->efx->net_dev,
0746           "destroy chan %d\n", channel->channel);
0747 
0748     efx_for_each_channel_rx_queue(rx_queue, channel)
0749         efx_remove_rx_queue(rx_queue);
0750     efx_for_each_channel_tx_queue(tx_queue, channel)
0751         efx_remove_tx_queue(tx_queue);
0752     efx_remove_eventq(channel);
0753     channel->type->post_remove(channel);
0754 }
0755 
0756 void efx_remove_channels(struct efx_nic *efx)
0757 {
0758     struct efx_channel *channel;
0759 
0760     efx_for_each_channel(channel, efx)
0761         efx_remove_channel(channel);
0762 
0763     kfree(efx->xdp_tx_queues);
0764 }
0765 
0766 static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
0767                 struct efx_tx_queue *tx_queue)
0768 {
0769     if (xdp_queue_number >= efx->xdp_tx_queue_count)
0770         return -EINVAL;
0771 
0772     netif_dbg(efx, drv, efx->net_dev,
0773           "Channel %u TXQ %u is XDP %u, HW %u\n",
0774           tx_queue->channel->channel, tx_queue->label,
0775           xdp_queue_number, tx_queue->queue);
0776     efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
0777     return 0;
0778 }
0779 
0780 static void efx_set_xdp_channels(struct efx_nic *efx)
0781 {
0782     struct efx_tx_queue *tx_queue;
0783     struct efx_channel *channel;
0784     unsigned int next_queue = 0;
0785     int xdp_queue_number = 0;
0786     int rc;
0787 
0788     /* We need to mark which channels really have RX and TX
0789      * queues, and adjust the TX queue numbers if we have separate
0790      * RX-only and TX-only channels.
0791      */
0792     efx_for_each_channel(channel, efx) {
0793         if (channel->channel < efx->tx_channel_offset)
0794             continue;
0795 
0796         if (efx_channel_is_xdp_tx(channel)) {
0797             efx_for_each_channel_tx_queue(tx_queue, channel) {
0798                 tx_queue->queue = next_queue++;
0799                 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
0800                               tx_queue);
0801                 if (rc == 0)
0802                     xdp_queue_number++;
0803             }
0804         } else {
0805             efx_for_each_channel_tx_queue(tx_queue, channel) {
0806                 tx_queue->queue = next_queue++;
0807                 netif_dbg(efx, drv, efx->net_dev,
0808                       "Channel %u TXQ %u is HW %u\n",
0809                       channel->channel, tx_queue->label,
0810                       tx_queue->queue);
0811             }
0812 
0813             /* If XDP is borrowing queues from net stack, it must
0814              * use the queue with no csum offload, which is the
0815              * first one of the channel
0816              * (note: tx_queue_by_type is not initialized yet)
0817              */
0818             if (efx->xdp_txq_queues_mode ==
0819                 EFX_XDP_TX_QUEUES_BORROWED) {
0820                 tx_queue = &channel->tx_queue[0];
0821                 rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
0822                               tx_queue);
0823                 if (rc == 0)
0824                     xdp_queue_number++;
0825             }
0826         }
0827     }
0828     WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
0829         xdp_queue_number != efx->xdp_tx_queue_count);
0830     WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
0831         xdp_queue_number > efx->xdp_tx_queue_count);
0832 
0833     /* If we have more CPUs than assigned XDP TX queues, assign the already
0834      * existing queues to the exceeding CPUs
0835      */
0836     next_queue = 0;
0837     while (xdp_queue_number < efx->xdp_tx_queue_count) {
0838         tx_queue = efx->xdp_tx_queues[next_queue++];
0839         rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
0840         if (rc == 0)
0841             xdp_queue_number++;
0842     }
0843 }
0844 
0845 int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
0846 {
0847     struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel,
0848                *ptp_channel = efx_ptp_channel(efx);
0849     struct efx_ptp_data *ptp_data = efx->ptp_data;
0850     unsigned int i, next_buffer_table = 0;
0851     u32 old_rxq_entries, old_txq_entries;
0852     int rc, rc2;
0853 
0854     rc = efx_check_disabled(efx);
0855     if (rc)
0856         return rc;
0857 
0858     /* Not all channels should be reallocated. We must avoid
0859      * reallocating their buffer table entries.
0860      */
0861     efx_for_each_channel(channel, efx) {
0862         struct efx_rx_queue *rx_queue;
0863         struct efx_tx_queue *tx_queue;
0864 
0865         if (channel->type->copy)
0866             continue;
0867         next_buffer_table = max(next_buffer_table,
0868                     channel->eventq.index +
0869                     channel->eventq.entries);
0870         efx_for_each_channel_rx_queue(rx_queue, channel)
0871             next_buffer_table = max(next_buffer_table,
0872                         rx_queue->rxd.index +
0873                         rx_queue->rxd.entries);
0874         efx_for_each_channel_tx_queue(tx_queue, channel)
0875             next_buffer_table = max(next_buffer_table,
0876                         tx_queue->txd.index +
0877                         tx_queue->txd.entries);
0878     }
0879 
0880     efx_device_detach_sync(efx);
0881     efx_stop_all(efx);
0882     efx_soft_disable_interrupts(efx);
0883 
0884     /* Clone channels (where possible) */
0885     memset(other_channel, 0, sizeof(other_channel));
0886     for (i = 0; i < efx->n_channels; i++) {
0887         channel = efx->channel[i];
0888         if (channel->type->copy)
0889             channel = channel->type->copy(channel);
0890         if (!channel) {
0891             rc = -ENOMEM;
0892             goto out;
0893         }
0894         other_channel[i] = channel;
0895     }
0896 
0897     /* Swap entry counts and channel pointers */
0898     old_rxq_entries = efx->rxq_entries;
0899     old_txq_entries = efx->txq_entries;
0900     efx->rxq_entries = rxq_entries;
0901     efx->txq_entries = txq_entries;
0902     for (i = 0; i < efx->n_channels; i++)
0903         swap(efx->channel[i], other_channel[i]);
0904 
0905     /* Restart buffer table allocation */
0906     efx->next_buffer_table = next_buffer_table;
0907 
0908     for (i = 0; i < efx->n_channels; i++) {
0909         channel = efx->channel[i];
0910         if (!channel->type->copy)
0911             continue;
0912         rc = efx_probe_channel(channel);
0913         if (rc)
0914             goto rollback;
0915         efx_init_napi_channel(efx->channel[i]);
0916     }
0917 
0918     efx_set_xdp_channels(efx);
0919 out:
0920     efx->ptp_data = NULL;
0921     /* Destroy unused channel structures */
0922     for (i = 0; i < efx->n_channels; i++) {
0923         channel = other_channel[i];
0924         if (channel && channel->type->copy) {
0925             efx_fini_napi_channel(channel);
0926             efx_remove_channel(channel);
0927             kfree(channel);
0928         }
0929     }
0930 
0931     efx->ptp_data = ptp_data;
0932     rc2 = efx_soft_enable_interrupts(efx);
0933     if (rc2) {
0934         rc = rc ? rc : rc2;
0935         netif_err(efx, drv, efx->net_dev,
0936               "unable to restart interrupts on channel reallocation\n");
0937         efx_schedule_reset(efx, RESET_TYPE_DISABLE);
0938     } else {
0939         efx_start_all(efx);
0940         efx_device_attach_if_not_resetting(efx);
0941     }
0942     return rc;
0943 
0944 rollback:
0945     /* Swap back */
0946     efx->rxq_entries = old_rxq_entries;
0947     efx->txq_entries = old_txq_entries;
0948     for (i = 0; i < efx->n_channels; i++)
0949         swap(efx->channel[i], other_channel[i]);
0950     efx_ptp_update_channel(efx, ptp_channel);
0951     goto out;
0952 }
0953 
0954 int efx_set_channels(struct efx_nic *efx)
0955 {
0956     struct efx_channel *channel;
0957     int rc;
0958 
0959     if (efx->xdp_tx_queue_count) {
0960         EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
0961 
0962         /* Allocate array for XDP TX queue lookup. */
0963         efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
0964                          sizeof(*efx->xdp_tx_queues),
0965                          GFP_KERNEL);
0966         if (!efx->xdp_tx_queues)
0967             return -ENOMEM;
0968     }
0969 
0970     efx_for_each_channel(channel, efx) {
0971         if (channel->channel < efx->n_rx_channels)
0972             channel->rx_queue.core_index = channel->channel;
0973         else
0974             channel->rx_queue.core_index = -1;
0975     }
0976 
0977     efx_set_xdp_channels(efx);
0978 
0979     rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
0980     if (rc)
0981         return rc;
0982     return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
0983 }
0984 
0985 static bool efx_default_channel_want_txqs(struct efx_channel *channel)
0986 {
0987     return channel->channel - channel->efx->tx_channel_offset <
0988         channel->efx->n_tx_channels;
0989 }
0990 
0991 /*************
0992  * START/STOP
0993  *************/
0994 
0995 int efx_soft_enable_interrupts(struct efx_nic *efx)
0996 {
0997     struct efx_channel *channel, *end_channel;
0998     int rc;
0999 
1000     BUG_ON(efx->state == STATE_DISABLED);
1001 
1002     efx->irq_soft_enabled = true;
1003     smp_wmb();
1004 
1005     efx_for_each_channel(channel, efx) {
1006         if (!channel->type->keep_eventq) {
1007             rc = efx_init_eventq(channel);
1008             if (rc)
1009                 goto fail;
1010         }
1011         efx_start_eventq(channel);
1012     }
1013 
1014     efx_mcdi_mode_event(efx);
1015 
1016     return 0;
1017 fail:
1018     end_channel = channel;
1019     efx_for_each_channel(channel, efx) {
1020         if (channel == end_channel)
1021             break;
1022         efx_stop_eventq(channel);
1023         if (!channel->type->keep_eventq)
1024             efx_fini_eventq(channel);
1025     }
1026 
1027     return rc;
1028 }
1029 
1030 void efx_soft_disable_interrupts(struct efx_nic *efx)
1031 {
1032     struct efx_channel *channel;
1033 
1034     if (efx->state == STATE_DISABLED)
1035         return;
1036 
1037     efx_mcdi_mode_poll(efx);
1038 
1039     efx->irq_soft_enabled = false;
1040     smp_wmb();
1041 
1042     if (efx->legacy_irq)
1043         synchronize_irq(efx->legacy_irq);
1044 
1045     efx_for_each_channel(channel, efx) {
1046         if (channel->irq)
1047             synchronize_irq(channel->irq);
1048 
1049         efx_stop_eventq(channel);
1050         if (!channel->type->keep_eventq)
1051             efx_fini_eventq(channel);
1052     }
1053 
1054     /* Flush the asynchronous MCDI request queue */
1055     efx_mcdi_flush_async(efx);
1056 }
1057 
1058 int efx_enable_interrupts(struct efx_nic *efx)
1059 {
1060     struct efx_channel *channel, *end_channel;
1061     int rc;
1062 
1063     /* TODO: Is this really a bug? */
1064     BUG_ON(efx->state == STATE_DISABLED);
1065 
1066     if (efx->eeh_disabled_legacy_irq) {
1067         enable_irq(efx->legacy_irq);
1068         efx->eeh_disabled_legacy_irq = false;
1069     }
1070 
1071     efx->type->irq_enable_master(efx);
1072 
1073     efx_for_each_channel(channel, efx) {
1074         if (channel->type->keep_eventq) {
1075             rc = efx_init_eventq(channel);
1076             if (rc)
1077                 goto fail;
1078         }
1079     }
1080 
1081     rc = efx_soft_enable_interrupts(efx);
1082     if (rc)
1083         goto fail;
1084 
1085     return 0;
1086 
1087 fail:
1088     end_channel = channel;
1089     efx_for_each_channel(channel, efx) {
1090         if (channel == end_channel)
1091             break;
1092         if (channel->type->keep_eventq)
1093             efx_fini_eventq(channel);
1094     }
1095 
1096     efx->type->irq_disable_non_ev(efx);
1097 
1098     return rc;
1099 }
1100 
1101 void efx_disable_interrupts(struct efx_nic *efx)
1102 {
1103     struct efx_channel *channel;
1104 
1105     efx_soft_disable_interrupts(efx);
1106 
1107     efx_for_each_channel(channel, efx) {
1108         if (channel->type->keep_eventq)
1109             efx_fini_eventq(channel);
1110     }
1111 
1112     efx->type->irq_disable_non_ev(efx);
1113 }
1114 
1115 void efx_start_channels(struct efx_nic *efx)
1116 {
1117     struct efx_tx_queue *tx_queue;
1118     struct efx_rx_queue *rx_queue;
1119     struct efx_channel *channel;
1120 
1121     efx_for_each_channel_rev(channel, efx) {
1122         efx_for_each_channel_tx_queue(tx_queue, channel) {
1123             efx_init_tx_queue(tx_queue);
1124             atomic_inc(&efx->active_queues);
1125         }
1126 
1127         efx_for_each_channel_rx_queue(rx_queue, channel) {
1128             efx_init_rx_queue(rx_queue);
1129             atomic_inc(&efx->active_queues);
1130             efx_stop_eventq(channel);
1131             efx_fast_push_rx_descriptors(rx_queue, false);
1132             efx_start_eventq(channel);
1133         }
1134 
1135         WARN_ON(channel->rx_pkt_n_frags);
1136     }
1137 }
1138 
1139 void efx_stop_channels(struct efx_nic *efx)
1140 {
1141     struct efx_tx_queue *tx_queue;
1142     struct efx_rx_queue *rx_queue;
1143     struct efx_channel *channel;
1144     int rc = 0;
1145 
1146     /* Stop RX refill */
1147     efx_for_each_channel(channel, efx) {
1148         efx_for_each_channel_rx_queue(rx_queue, channel)
1149             rx_queue->refill_enabled = false;
1150     }
1151 
1152     efx_for_each_channel(channel, efx) {
1153         /* RX packet processing is pipelined, so wait for the
1154          * NAPI handler to complete.  At least event queue 0
1155          * might be kept active by non-data events, so don't
1156          * use napi_synchronize() but actually disable NAPI
1157          * temporarily.
1158          */
1159         if (efx_channel_has_rx_queue(channel)) {
1160             efx_stop_eventq(channel);
1161             efx_start_eventq(channel);
1162         }
1163     }
1164 
1165     if (efx->type->fini_dmaq)
1166         rc = efx->type->fini_dmaq(efx);
1167 
1168     if (rc) {
1169         netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
1170     } else {
1171         netif_dbg(efx, drv, efx->net_dev,
1172               "successfully flushed all queues\n");
1173     }
1174 
1175     efx_for_each_channel(channel, efx) {
1176         efx_for_each_channel_rx_queue(rx_queue, channel)
1177             efx_fini_rx_queue(rx_queue);
1178         efx_for_each_channel_tx_queue(tx_queue, channel)
1179             efx_fini_tx_queue(tx_queue);
1180     }
1181 }
1182 
1183 /**************************************************************************
1184  *
1185  * NAPI interface
1186  *
1187  *************************************************************************/
1188 
1189 /* Process channel's event queue
1190  *
1191  * This function is responsible for processing the event queue of a
1192  * single channel.  The caller must guarantee that this function will
1193  * never be concurrently called more than once on the same channel,
1194  * though different channels may be being processed concurrently.
1195  */
1196 static int efx_process_channel(struct efx_channel *channel, int budget)
1197 {
1198     struct efx_tx_queue *tx_queue;
1199     struct list_head rx_list;
1200     int spent;
1201 
1202     if (unlikely(!channel->enabled))
1203         return 0;
1204 
1205     /* Prepare the batch receive list */
1206     EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
1207     INIT_LIST_HEAD(&rx_list);
1208     channel->rx_list = &rx_list;
1209 
1210     efx_for_each_channel_tx_queue(tx_queue, channel) {
1211         tx_queue->pkts_compl = 0;
1212         tx_queue->bytes_compl = 0;
1213     }
1214 
1215     spent = efx_nic_process_eventq(channel, budget);
1216     if (spent && efx_channel_has_rx_queue(channel)) {
1217         struct efx_rx_queue *rx_queue =
1218             efx_channel_get_rx_queue(channel);
1219 
1220         efx_rx_flush_packet(channel);
1221         efx_fast_push_rx_descriptors(rx_queue, true);
1222     }
1223 
1224     /* Update BQL */
1225     efx_for_each_channel_tx_queue(tx_queue, channel) {
1226         if (tx_queue->bytes_compl) {
1227             netdev_tx_completed_queue(tx_queue->core_txq,
1228                           tx_queue->pkts_compl,
1229                           tx_queue->bytes_compl);
1230         }
1231     }
1232 
1233     /* Receive any packets we queued up */
1234     netif_receive_skb_list(channel->rx_list);
1235     channel->rx_list = NULL;
1236 
1237     return spent;
1238 }
1239 
1240 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
1241 {
1242     int step = efx->irq_mod_step_us;
1243 
1244     if (channel->irq_mod_score < irq_adapt_low_thresh) {
1245         if (channel->irq_moderation_us > step) {
1246             channel->irq_moderation_us -= step;
1247             efx->type->push_irq_moderation(channel);
1248         }
1249     } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
1250         if (channel->irq_moderation_us <
1251             efx->irq_rx_moderation_us) {
1252             channel->irq_moderation_us += step;
1253             efx->type->push_irq_moderation(channel);
1254         }
1255     }
1256 
1257     channel->irq_count = 0;
1258     channel->irq_mod_score = 0;
1259 }
1260 
1261 /* NAPI poll handler
1262  *
1263  * NAPI guarantees serialisation of polls of the same device, which
1264  * provides the guarantee required by efx_process_channel().
1265  */
1266 static int efx_poll(struct napi_struct *napi, int budget)
1267 {
1268     struct efx_channel *channel =
1269         container_of(napi, struct efx_channel, napi_str);
1270     struct efx_nic *efx = channel->efx;
1271 #ifdef CONFIG_RFS_ACCEL
1272     unsigned int time;
1273 #endif
1274     int spent;
1275 
1276     netif_vdbg(efx, intr, efx->net_dev,
1277            "channel %d NAPI poll executing on CPU %d\n",
1278            channel->channel, raw_smp_processor_id());
1279 
1280     spent = efx_process_channel(channel, budget);
1281 
1282     xdp_do_flush_map();
1283 
1284     if (spent < budget) {
1285         if (efx_channel_has_rx_queue(channel) &&
1286             efx->irq_rx_adaptive &&
1287             unlikely(++channel->irq_count == 1000)) {
1288             efx_update_irq_mod(efx, channel);
1289         }
1290 
1291 #ifdef CONFIG_RFS_ACCEL
1292         /* Perhaps expire some ARFS filters */
1293         time = jiffies - channel->rfs_last_expiry;
1294         /* Would our quota be >= 20? */
1295         if (channel->rfs_filter_count * time >= 600 * HZ)
1296             mod_delayed_work(system_wq, &channel->filter_work, 0);
1297 #endif
1298 
1299         /* There is no race here; although napi_disable() will
1300          * only wait for napi_complete(), this isn't a problem
1301          * since efx_nic_eventq_read_ack() will have no effect if
1302          * interrupts have already been disabled.
1303          */
1304         if (napi_complete_done(napi, spent))
1305             efx_nic_eventq_read_ack(channel);
1306     }
1307 
1308     return spent;
1309 }
1310 
1311 void efx_init_napi_channel(struct efx_channel *channel)
1312 {
1313     struct efx_nic *efx = channel->efx;
1314 
1315     channel->napi_dev = efx->net_dev;
1316     netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, 64);
1317 }
1318 
1319 void efx_init_napi(struct efx_nic *efx)
1320 {
1321     struct efx_channel *channel;
1322 
1323     efx_for_each_channel(channel, efx)
1324         efx_init_napi_channel(channel);
1325 }
1326 
1327 void efx_fini_napi_channel(struct efx_channel *channel)
1328 {
1329     if (channel->napi_dev)
1330         netif_napi_del(&channel->napi_str);
1331 
1332     channel->napi_dev = NULL;
1333 }
1334 
1335 void efx_fini_napi(struct efx_nic *efx)
1336 {
1337     struct efx_channel *channel;
1338 
1339     efx_for_each_channel(channel, efx)
1340         efx_fini_napi_channel(channel);
1341 }
1342 
1343 /***************
1344  * Housekeeping
1345  ***************/
1346 
1347 static int efx_channel_dummy_op_int(struct efx_channel *channel)
1348 {
1349     return 0;
1350 }
1351 
1352 void efx_channel_dummy_op_void(struct efx_channel *channel)
1353 {
1354 }
1355 
1356 static const struct efx_channel_type efx_default_channel_type = {
1357     .pre_probe      = efx_channel_dummy_op_int,
1358     .post_remove        = efx_channel_dummy_op_void,
1359     .get_name       = efx_get_channel_name,
1360     .copy           = efx_copy_channel,
1361     .want_txqs      = efx_default_channel_want_txqs,
1362     .keep_eventq        = false,
1363     .want_pio       = true,
1364 };