Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /****************************************************************************
0003  * Driver for Solarflare network controllers and boards
0004  * Copyright 2005-2006 Fen Systems Ltd.
0005  * Copyright 2005-2013 Solarflare Communications Inc.
0006  */
0007 
0008 #include <linux/filter.h>
0009 #include <linux/module.h>
0010 #include <linux/pci.h>
0011 #include <linux/netdevice.h>
0012 #include <linux/etherdevice.h>
0013 #include <linux/delay.h>
0014 #include <linux/notifier.h>
0015 #include <linux/ip.h>
0016 #include <linux/tcp.h>
0017 #include <linux/in.h>
0018 #include <linux/ethtool.h>
0019 #include <linux/topology.h>
0020 #include <linux/gfp.h>
0021 #include <linux/aer.h>
0022 #include <linux/interrupt.h>
0023 #include "net_driver.h"
0024 #include <net/gre.h>
0025 #include <net/udp_tunnel.h>
0026 #include "efx.h"
0027 #include "efx_common.h"
0028 #include "efx_channels.h"
0029 #include "ef100.h"
0030 #include "rx_common.h"
0031 #include "tx_common.h"
0032 #include "nic.h"
0033 #include "io.h"
0034 #include "selftest.h"
0035 #include "sriov.h"
0036 
0037 #include "mcdi_port_common.h"
0038 #include "mcdi_pcol.h"
0039 #include "workarounds.h"
0040 
0041 /**************************************************************************
0042  *
0043  * Configurable values
0044  *
0045  *************************************************************************/
0046 
0047 module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444);
0048 MODULE_PARM_DESC(interrupt_mode,
0049          "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
0050 
0051 module_param(rss_cpus, uint, 0444);
0052 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
0053 
0054 /*
0055  * Use separate channels for TX and RX events
0056  *
0057  * Set this to 1 to use separate channels for TX and RX. It allows us
0058  * to control interrupt affinity separately for TX and RX.
0059  *
0060  * This is only used in MSI-X interrupt mode
0061  */
0062 bool efx_separate_tx_channels;
0063 module_param(efx_separate_tx_channels, bool, 0444);
0064 MODULE_PARM_DESC(efx_separate_tx_channels,
0065          "Use separate channels for TX and RX");
0066 
0067 /* Initial interrupt moderation settings.  They can be modified after
0068  * module load with ethtool.
0069  *
0070  * The default for RX should strike a balance between increasing the
0071  * round-trip latency and reducing overhead.
0072  */
0073 static unsigned int rx_irq_mod_usec = 60;
0074 
0075 /* Initial interrupt moderation settings.  They can be modified after
0076  * module load with ethtool.
0077  *
0078  * This default is chosen to ensure that a 10G link does not go idle
0079  * while a TX queue is stopped after it has become full.  A queue is
0080  * restarted when it drops below half full.  The time this takes (assuming
0081  * worst case 3 descriptors per packet and 1024 descriptors) is
0082  *   512 / 3 * 1.2 = 205 usec.
0083  */
0084 static unsigned int tx_irq_mod_usec = 150;
0085 
0086 static bool phy_flash_cfg;
0087 module_param(phy_flash_cfg, bool, 0644);
0088 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
0089 
0090 static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
0091              NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
0092              NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
0093              NETIF_MSG_TX_ERR | NETIF_MSG_HW);
0094 module_param(debug, uint, 0);
0095 MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
0096 
0097 /**************************************************************************
0098  *
0099  * Utility functions and prototypes
0100  *
0101  *************************************************************************/
0102 
0103 static void efx_remove_port(struct efx_nic *efx);
0104 static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
0105 static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
0106 static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
0107             u32 flags);
0108 
0109 /**************************************************************************
0110  *
0111  * Port handling
0112  *
0113  **************************************************************************/
0114 
0115 static void efx_fini_port(struct efx_nic *efx);
0116 
0117 static int efx_probe_port(struct efx_nic *efx)
0118 {
0119     int rc;
0120 
0121     netif_dbg(efx, probe, efx->net_dev, "create port\n");
0122 
0123     if (phy_flash_cfg)
0124         efx->phy_mode = PHY_MODE_SPECIAL;
0125 
0126     /* Connect up MAC/PHY operations table */
0127     rc = efx->type->probe_port(efx);
0128     if (rc)
0129         return rc;
0130 
0131     /* Initialise MAC address to permanent address */
0132     eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr);
0133 
0134     return 0;
0135 }
0136 
0137 static int efx_init_port(struct efx_nic *efx)
0138 {
0139     int rc;
0140 
0141     netif_dbg(efx, drv, efx->net_dev, "init port\n");
0142 
0143     mutex_lock(&efx->mac_lock);
0144 
0145     efx->port_initialized = true;
0146 
0147     /* Ensure the PHY advertises the correct flow control settings */
0148     rc = efx_mcdi_port_reconfigure(efx);
0149     if (rc && rc != -EPERM)
0150         goto fail;
0151 
0152     mutex_unlock(&efx->mac_lock);
0153     return 0;
0154 
0155 fail:
0156     mutex_unlock(&efx->mac_lock);
0157     return rc;
0158 }
0159 
0160 static void efx_fini_port(struct efx_nic *efx)
0161 {
0162     netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
0163 
0164     if (!efx->port_initialized)
0165         return;
0166 
0167     efx->port_initialized = false;
0168 
0169     efx->link_state.up = false;
0170     efx_link_status_changed(efx);
0171 }
0172 
0173 static void efx_remove_port(struct efx_nic *efx)
0174 {
0175     netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
0176 
0177     efx->type->remove_port(efx);
0178 }
0179 
0180 /**************************************************************************
0181  *
0182  * NIC handling
0183  *
0184  **************************************************************************/
0185 
0186 static LIST_HEAD(efx_primary_list);
0187 static LIST_HEAD(efx_unassociated_list);
0188 
0189 static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
0190 {
0191     return left->type == right->type &&
0192         left->vpd_sn && right->vpd_sn &&
0193         !strcmp(left->vpd_sn, right->vpd_sn);
0194 }
0195 
0196 static void efx_associate(struct efx_nic *efx)
0197 {
0198     struct efx_nic *other, *next;
0199 
0200     if (efx->primary == efx) {
0201         /* Adding primary function; look for secondaries */
0202 
0203         netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
0204         list_add_tail(&efx->node, &efx_primary_list);
0205 
0206         list_for_each_entry_safe(other, next, &efx_unassociated_list,
0207                      node) {
0208             if (efx_same_controller(efx, other)) {
0209                 list_del(&other->node);
0210                 netif_dbg(other, probe, other->net_dev,
0211                       "moving to secondary list of %s %s\n",
0212                       pci_name(efx->pci_dev),
0213                       efx->net_dev->name);
0214                 list_add_tail(&other->node,
0215                           &efx->secondary_list);
0216                 other->primary = efx;
0217             }
0218         }
0219     } else {
0220         /* Adding secondary function; look for primary */
0221 
0222         list_for_each_entry(other, &efx_primary_list, node) {
0223             if (efx_same_controller(efx, other)) {
0224                 netif_dbg(efx, probe, efx->net_dev,
0225                       "adding to secondary list of %s %s\n",
0226                       pci_name(other->pci_dev),
0227                       other->net_dev->name);
0228                 list_add_tail(&efx->node,
0229                           &other->secondary_list);
0230                 efx->primary = other;
0231                 return;
0232             }
0233         }
0234 
0235         netif_dbg(efx, probe, efx->net_dev,
0236               "adding to unassociated list\n");
0237         list_add_tail(&efx->node, &efx_unassociated_list);
0238     }
0239 }
0240 
0241 static void efx_dissociate(struct efx_nic *efx)
0242 {
0243     struct efx_nic *other, *next;
0244 
0245     list_del(&efx->node);
0246     efx->primary = NULL;
0247 
0248     list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
0249         list_del(&other->node);
0250         netif_dbg(other, probe, other->net_dev,
0251               "moving to unassociated list\n");
0252         list_add_tail(&other->node, &efx_unassociated_list);
0253         other->primary = NULL;
0254     }
0255 }
0256 
0257 static int efx_probe_nic(struct efx_nic *efx)
0258 {
0259     int rc;
0260 
0261     netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
0262 
0263     /* Carry out hardware-type specific initialisation */
0264     rc = efx->type->probe(efx);
0265     if (rc)
0266         return rc;
0267 
0268     do {
0269         if (!efx->max_channels || !efx->max_tx_channels) {
0270             netif_err(efx, drv, efx->net_dev,
0271                   "Insufficient resources to allocate"
0272                   " any channels\n");
0273             rc = -ENOSPC;
0274             goto fail1;
0275         }
0276 
0277         /* Determine the number of channels and queues by trying
0278          * to hook in MSI-X interrupts.
0279          */
0280         rc = efx_probe_interrupts(efx);
0281         if (rc)
0282             goto fail1;
0283 
0284         rc = efx_set_channels(efx);
0285         if (rc)
0286             goto fail1;
0287 
0288         /* dimension_resources can fail with EAGAIN */
0289         rc = efx->type->dimension_resources(efx);
0290         if (rc != 0 && rc != -EAGAIN)
0291             goto fail2;
0292 
0293         if (rc == -EAGAIN)
0294             /* try again with new max_channels */
0295             efx_remove_interrupts(efx);
0296 
0297     } while (rc == -EAGAIN);
0298 
0299     if (efx->n_channels > 1)
0300         netdev_rss_key_fill(efx->rss_context.rx_hash_key,
0301                     sizeof(efx->rss_context.rx_hash_key));
0302     efx_set_default_rx_indir_table(efx, &efx->rss_context);
0303 
0304     /* Initialise the interrupt moderation settings */
0305     efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
0306     efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
0307                 true);
0308 
0309     return 0;
0310 
0311 fail2:
0312     efx_remove_interrupts(efx);
0313 fail1:
0314     efx->type->remove(efx);
0315     return rc;
0316 }
0317 
0318 static void efx_remove_nic(struct efx_nic *efx)
0319 {
0320     netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
0321 
0322     efx_remove_interrupts(efx);
0323     efx->type->remove(efx);
0324 }
0325 
0326 /**************************************************************************
0327  *
0328  * NIC startup/shutdown
0329  *
0330  *************************************************************************/
0331 
0332 static int efx_probe_all(struct efx_nic *efx)
0333 {
0334     int rc;
0335 
0336     rc = efx_probe_nic(efx);
0337     if (rc) {
0338         netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
0339         goto fail1;
0340     }
0341 
0342     rc = efx_probe_port(efx);
0343     if (rc) {
0344         netif_err(efx, probe, efx->net_dev, "failed to create port\n");
0345         goto fail2;
0346     }
0347 
0348     BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
0349     if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
0350         rc = -EINVAL;
0351         goto fail3;
0352     }
0353 
0354 #ifdef CONFIG_SFC_SRIOV
0355     rc = efx->type->vswitching_probe(efx);
0356     if (rc) /* not fatal; the PF will still work fine */
0357         netif_warn(efx, probe, efx->net_dev,
0358                "failed to setup vswitching rc=%d;"
0359                " VFs may not function\n", rc);
0360 #endif
0361 
0362     rc = efx_probe_filters(efx);
0363     if (rc) {
0364         netif_err(efx, probe, efx->net_dev,
0365               "failed to create filter tables\n");
0366         goto fail4;
0367     }
0368 
0369     rc = efx_probe_channels(efx);
0370     if (rc)
0371         goto fail5;
0372 
0373     efx->state = STATE_NET_DOWN;
0374 
0375     return 0;
0376 
0377  fail5:
0378     efx_remove_filters(efx);
0379  fail4:
0380 #ifdef CONFIG_SFC_SRIOV
0381     efx->type->vswitching_remove(efx);
0382 #endif
0383  fail3:
0384     efx_remove_port(efx);
0385  fail2:
0386     efx_remove_nic(efx);
0387  fail1:
0388     return rc;
0389 }
0390 
0391 static void efx_remove_all(struct efx_nic *efx)
0392 {
0393     rtnl_lock();
0394     efx_xdp_setup_prog(efx, NULL);
0395     rtnl_unlock();
0396 
0397     efx_remove_channels(efx);
0398     efx_remove_filters(efx);
0399 #ifdef CONFIG_SFC_SRIOV
0400     efx->type->vswitching_remove(efx);
0401 #endif
0402     efx_remove_port(efx);
0403     efx_remove_nic(efx);
0404 }
0405 
0406 /**************************************************************************
0407  *
0408  * Interrupt moderation
0409  *
0410  **************************************************************************/
0411 unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
0412 {
0413     if (usecs == 0)
0414         return 0;
0415     if (usecs * 1000 < efx->timer_quantum_ns)
0416         return 1; /* never round down to 0 */
0417     return usecs * 1000 / efx->timer_quantum_ns;
0418 }
0419 
0420 unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
0421 {
0422     /* We must round up when converting ticks to microseconds
0423      * because we round down when converting the other way.
0424      */
0425     return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
0426 }
0427 
0428 /* Set interrupt moderation parameters */
0429 int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
0430                 unsigned int rx_usecs, bool rx_adaptive,
0431                 bool rx_may_override_tx)
0432 {
0433     struct efx_channel *channel;
0434     unsigned int timer_max_us;
0435 
0436     EFX_ASSERT_RESET_SERIALISED(efx);
0437 
0438     timer_max_us = efx->timer_max_ns / 1000;
0439 
0440     if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
0441         return -EINVAL;
0442 
0443     if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
0444         !rx_may_override_tx) {
0445         netif_err(efx, drv, efx->net_dev, "Channels are shared. "
0446               "RX and TX IRQ moderation must be equal\n");
0447         return -EINVAL;
0448     }
0449 
0450     efx->irq_rx_adaptive = rx_adaptive;
0451     efx->irq_rx_moderation_us = rx_usecs;
0452     efx_for_each_channel(channel, efx) {
0453         if (efx_channel_has_rx_queue(channel))
0454             channel->irq_moderation_us = rx_usecs;
0455         else if (efx_channel_has_tx_queues(channel))
0456             channel->irq_moderation_us = tx_usecs;
0457         else if (efx_channel_is_xdp_tx(channel))
0458             channel->irq_moderation_us = tx_usecs;
0459     }
0460 
0461     return 0;
0462 }
0463 
0464 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
0465                 unsigned int *rx_usecs, bool *rx_adaptive)
0466 {
0467     *rx_adaptive = efx->irq_rx_adaptive;
0468     *rx_usecs = efx->irq_rx_moderation_us;
0469 
0470     /* If channels are shared between RX and TX, so is IRQ
0471      * moderation.  Otherwise, IRQ moderation is the same for all
0472      * TX channels and is not adaptive.
0473      */
0474     if (efx->tx_channel_offset == 0) {
0475         *tx_usecs = *rx_usecs;
0476     } else {
0477         struct efx_channel *tx_channel;
0478 
0479         tx_channel = efx->channel[efx->tx_channel_offset];
0480         *tx_usecs = tx_channel->irq_moderation_us;
0481     }
0482 }
0483 
0484 /**************************************************************************
0485  *
0486  * ioctls
0487  *
0488  *************************************************************************/
0489 
0490 /* Net device ioctl
0491  * Context: process, rtnl_lock() held.
0492  */
0493 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
0494 {
0495     struct efx_nic *efx = efx_netdev_priv(net_dev);
0496     struct mii_ioctl_data *data = if_mii(ifr);
0497 
0498     if (cmd == SIOCSHWTSTAMP)
0499         return efx_ptp_set_ts_config(efx, ifr);
0500     if (cmd == SIOCGHWTSTAMP)
0501         return efx_ptp_get_ts_config(efx, ifr);
0502 
0503     /* Convert phy_id from older PRTAD/DEVAD format */
0504     if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
0505         (data->phy_id & 0xfc00) == 0x0400)
0506         data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
0507 
0508     return mdio_mii_ioctl(&efx->mdio, data, cmd);
0509 }
0510 
0511 /**************************************************************************
0512  *
0513  * Kernel net device interface
0514  *
0515  *************************************************************************/
0516 
0517 /* Context: process, rtnl_lock() held. */
0518 int efx_net_open(struct net_device *net_dev)
0519 {
0520     struct efx_nic *efx = efx_netdev_priv(net_dev);
0521     int rc;
0522 
0523     netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
0524           raw_smp_processor_id());
0525 
0526     rc = efx_check_disabled(efx);
0527     if (rc)
0528         return rc;
0529     if (efx->phy_mode & PHY_MODE_SPECIAL)
0530         return -EBUSY;
0531     if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
0532         return -EIO;
0533 
0534     /* Notify the kernel of the link state polled during driver load,
0535      * before the monitor starts running */
0536     efx_link_status_changed(efx);
0537 
0538     efx_start_all(efx);
0539     if (efx->state == STATE_DISABLED || efx->reset_pending)
0540         netif_device_detach(efx->net_dev);
0541     else
0542         efx->state = STATE_NET_UP;
0543 
0544     efx_selftest_async_start(efx);
0545     return 0;
0546 }
0547 
0548 /* Context: process, rtnl_lock() held.
0549  * Note that the kernel will ignore our return code; this method
0550  * should really be a void.
0551  */
0552 int efx_net_stop(struct net_device *net_dev)
0553 {
0554     struct efx_nic *efx = efx_netdev_priv(net_dev);
0555 
0556     netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
0557           raw_smp_processor_id());
0558 
0559     /* Stop the device and flush all the channels */
0560     efx_stop_all(efx);
0561 
0562     return 0;
0563 }
0564 
0565 static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
0566 {
0567     struct efx_nic *efx = efx_netdev_priv(net_dev);
0568 
0569     if (efx->type->vlan_rx_add_vid)
0570         return efx->type->vlan_rx_add_vid(efx, proto, vid);
0571     else
0572         return -EOPNOTSUPP;
0573 }
0574 
0575 static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
0576 {
0577     struct efx_nic *efx = efx_netdev_priv(net_dev);
0578 
0579     if (efx->type->vlan_rx_kill_vid)
0580         return efx->type->vlan_rx_kill_vid(efx, proto, vid);
0581     else
0582         return -EOPNOTSUPP;
0583 }
0584 
0585 static const struct net_device_ops efx_netdev_ops = {
0586     .ndo_open       = efx_net_open,
0587     .ndo_stop       = efx_net_stop,
0588     .ndo_get_stats64    = efx_net_stats,
0589     .ndo_tx_timeout     = efx_watchdog,
0590     .ndo_start_xmit     = efx_hard_start_xmit,
0591     .ndo_validate_addr  = eth_validate_addr,
0592     .ndo_eth_ioctl      = efx_ioctl,
0593     .ndo_change_mtu     = efx_change_mtu,
0594     .ndo_set_mac_address    = efx_set_mac_address,
0595     .ndo_set_rx_mode    = efx_set_rx_mode,
0596     .ndo_set_features   = efx_set_features,
0597     .ndo_features_check = efx_features_check,
0598     .ndo_vlan_rx_add_vid    = efx_vlan_rx_add_vid,
0599     .ndo_vlan_rx_kill_vid   = efx_vlan_rx_kill_vid,
0600 #ifdef CONFIG_SFC_SRIOV
0601     .ndo_set_vf_mac     = efx_sriov_set_vf_mac,
0602     .ndo_set_vf_vlan    = efx_sriov_set_vf_vlan,
0603     .ndo_set_vf_spoofchk    = efx_sriov_set_vf_spoofchk,
0604     .ndo_get_vf_config  = efx_sriov_get_vf_config,
0605     .ndo_set_vf_link_state  = efx_sriov_set_vf_link_state,
0606 #endif
0607     .ndo_get_phys_port_id   = efx_get_phys_port_id,
0608     .ndo_get_phys_port_name = efx_get_phys_port_name,
0609     .ndo_setup_tc       = efx_setup_tc,
0610 #ifdef CONFIG_RFS_ACCEL
0611     .ndo_rx_flow_steer  = efx_filter_rfs,
0612 #endif
0613     .ndo_xdp_xmit       = efx_xdp_xmit,
0614     .ndo_bpf        = efx_xdp
0615 };
0616 
0617 static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
0618 {
0619     struct bpf_prog *old_prog;
0620 
0621     if (efx->xdp_rxq_info_failed) {
0622         netif_err(efx, drv, efx->net_dev,
0623               "Unable to bind XDP program due to previous failure of rxq_info\n");
0624         return -EINVAL;
0625     }
0626 
0627     if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) {
0628         netif_err(efx, drv, efx->net_dev,
0629               "Unable to configure XDP with MTU of %d (max: %d)\n",
0630               efx->net_dev->mtu, efx_xdp_max_mtu(efx));
0631         return -EINVAL;
0632     }
0633 
0634     old_prog = rtnl_dereference(efx->xdp_prog);
0635     rcu_assign_pointer(efx->xdp_prog, prog);
0636     /* Release the reference that was originally passed by the caller. */
0637     if (old_prog)
0638         bpf_prog_put(old_prog);
0639 
0640     return 0;
0641 }
0642 
0643 /* Context: process, rtnl_lock() held. */
0644 static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
0645 {
0646     struct efx_nic *efx = efx_netdev_priv(dev);
0647 
0648     switch (xdp->command) {
0649     case XDP_SETUP_PROG:
0650         return efx_xdp_setup_prog(efx, xdp->prog);
0651     default:
0652         return -EINVAL;
0653     }
0654 }
0655 
0656 static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
0657             u32 flags)
0658 {
0659     struct efx_nic *efx = efx_netdev_priv(dev);
0660 
0661     if (!netif_running(dev))
0662         return -EINVAL;
0663 
0664     return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH);
0665 }
0666 
0667 static void efx_update_name(struct efx_nic *efx)
0668 {
0669     strcpy(efx->name, efx->net_dev->name);
0670     efx_mtd_rename(efx);
0671     efx_set_channel_names(efx);
0672 }
0673 
0674 static int efx_netdev_event(struct notifier_block *this,
0675                 unsigned long event, void *ptr)
0676 {
0677     struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
0678 
0679     if ((net_dev->netdev_ops == &efx_netdev_ops) &&
0680         event == NETDEV_CHANGENAME)
0681         efx_update_name(efx_netdev_priv(net_dev));
0682 
0683     return NOTIFY_DONE;
0684 }
0685 
0686 static struct notifier_block efx_netdev_notifier = {
0687     .notifier_call = efx_netdev_event,
0688 };
0689 
0690 static ssize_t phy_type_show(struct device *dev,
0691                  struct device_attribute *attr, char *buf)
0692 {
0693     struct efx_nic *efx = dev_get_drvdata(dev);
0694     return sprintf(buf, "%d\n", efx->phy_type);
0695 }
0696 static DEVICE_ATTR_RO(phy_type);
0697 
0698 static int efx_register_netdev(struct efx_nic *efx)
0699 {
0700     struct net_device *net_dev = efx->net_dev;
0701     struct efx_channel *channel;
0702     int rc;
0703 
0704     net_dev->watchdog_timeo = 5 * HZ;
0705     net_dev->irq = efx->pci_dev->irq;
0706     net_dev->netdev_ops = &efx_netdev_ops;
0707     if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
0708         net_dev->priv_flags |= IFF_UNICAST_FLT;
0709     net_dev->ethtool_ops = &efx_ethtool_ops;
0710     netif_set_tso_max_segs(net_dev, EFX_TSO_MAX_SEGS);
0711     net_dev->min_mtu = EFX_MIN_MTU;
0712     net_dev->max_mtu = EFX_MAX_MTU;
0713 
0714     rtnl_lock();
0715 
0716     /* Enable resets to be scheduled and check whether any were
0717      * already requested.  If so, the NIC is probably hosed so we
0718      * abort.
0719      */
0720     if (efx->reset_pending) {
0721         pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n");
0722         rc = -EIO;
0723         goto fail_locked;
0724     }
0725 
0726     rc = dev_alloc_name(net_dev, net_dev->name);
0727     if (rc < 0)
0728         goto fail_locked;
0729     efx_update_name(efx);
0730 
0731     /* Always start with carrier off; PHY events will detect the link */
0732     netif_carrier_off(net_dev);
0733 
0734     rc = register_netdevice(net_dev);
0735     if (rc)
0736         goto fail_locked;
0737 
0738     efx_for_each_channel(channel, efx) {
0739         struct efx_tx_queue *tx_queue;
0740         efx_for_each_channel_tx_queue(tx_queue, channel)
0741             efx_init_tx_queue_core_txq(tx_queue);
0742     }
0743 
0744     efx_associate(efx);
0745 
0746     efx->state = STATE_NET_DOWN;
0747 
0748     rtnl_unlock();
0749 
0750     rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
0751     if (rc) {
0752         netif_err(efx, drv, efx->net_dev,
0753               "failed to init net dev attributes\n");
0754         goto fail_registered;
0755     }
0756 
0757     efx_init_mcdi_logging(efx);
0758 
0759     return 0;
0760 
0761 fail_registered:
0762     rtnl_lock();
0763     efx_dissociate(efx);
0764     unregister_netdevice(net_dev);
0765 fail_locked:
0766     efx->state = STATE_UNINIT;
0767     rtnl_unlock();
0768     netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
0769     return rc;
0770 }
0771 
0772 static void efx_unregister_netdev(struct efx_nic *efx)
0773 {
0774     if (!efx->net_dev)
0775         return;
0776 
0777     if (WARN_ON(efx_netdev_priv(efx->net_dev) != efx))
0778         return;
0779 
0780     if (efx_dev_registered(efx)) {
0781         strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
0782         efx_fini_mcdi_logging(efx);
0783         device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
0784         unregister_netdev(efx->net_dev);
0785     }
0786 }
0787 
0788 /**************************************************************************
0789  *
0790  * List of NICs we support
0791  *
0792  **************************************************************************/
0793 
0794 /* PCI device ID table */
0795 static const struct pci_device_id efx_pci_table[] = {
0796     {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903),  /* SFC9120 PF */
0797      .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
0798     {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903),  /* SFC9120 VF */
0799      .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
0800     {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923),  /* SFC9140 PF */
0801      .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
0802     {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923),  /* SFC9140 VF */
0803      .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
0804     {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03),  /* SFC9220 PF */
0805      .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
0806     {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03),  /* SFC9220 VF */
0807      .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
0808     {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03),  /* SFC9250 PF */
0809      .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
0810     {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03),  /* SFC9250 VF */
0811      .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
0812     {0}         /* end of list */
0813 };
0814 
0815 /**************************************************************************
0816  *
0817  * Data housekeeping
0818  *
0819  **************************************************************************/
0820 
0821 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
0822 {
0823     u64 n_rx_nodesc_trunc = 0;
0824     struct efx_channel *channel;
0825 
0826     efx_for_each_channel(channel, efx)
0827         n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
0828     stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
0829     stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
0830 }
0831 
0832 /**************************************************************************
0833  *
0834  * PCI interface
0835  *
0836  **************************************************************************/
0837 
0838 /* Main body of final NIC shutdown code
0839  * This is called only at module unload (or hotplug removal).
0840  */
0841 static void efx_pci_remove_main(struct efx_nic *efx)
0842 {
0843     /* Flush reset_work. It can no longer be scheduled since we
0844      * are not READY.
0845      */
0846     WARN_ON(efx_net_active(efx->state));
0847     efx_flush_reset_workqueue(efx);
0848 
0849     efx_disable_interrupts(efx);
0850     efx_clear_interrupt_affinity(efx);
0851     efx_nic_fini_interrupt(efx);
0852     efx_fini_port(efx);
0853     efx->type->fini(efx);
0854     efx_fini_napi(efx);
0855     efx_remove_all(efx);
0856 }
0857 
0858 /* Final NIC shutdown
0859  * This is called only at module unload (or hotplug removal).  A PF can call
0860  * this on its VFs to ensure they are unbound first.
0861  */
0862 static void efx_pci_remove(struct pci_dev *pci_dev)
0863 {
0864     struct efx_probe_data *probe_data;
0865     struct efx_nic *efx;
0866 
0867     efx = pci_get_drvdata(pci_dev);
0868     if (!efx)
0869         return;
0870 
0871     /* Mark the NIC as fini, then stop the interface */
0872     rtnl_lock();
0873     efx_dissociate(efx);
0874     dev_close(efx->net_dev);
0875     efx_disable_interrupts(efx);
0876     efx->state = STATE_UNINIT;
0877     rtnl_unlock();
0878 
0879     if (efx->type->sriov_fini)
0880         efx->type->sriov_fini(efx);
0881 
0882     efx_unregister_netdev(efx);
0883 
0884     efx_mtd_remove(efx);
0885 
0886     efx_pci_remove_main(efx);
0887 
0888     efx_fini_io(efx);
0889     pci_dbg(efx->pci_dev, "shutdown successful\n");
0890 
0891     efx_fini_struct(efx);
0892     free_netdev(efx->net_dev);
0893     probe_data = container_of(efx, struct efx_probe_data, efx);
0894     kfree(probe_data);
0895 
0896     pci_disable_pcie_error_reporting(pci_dev);
0897 };
0898 
0899 /* NIC VPD information
0900  * Called during probe to display the part number of the
0901  * installed NIC.
0902  */
0903 static void efx_probe_vpd_strings(struct efx_nic *efx)
0904 {
0905     struct pci_dev *dev = efx->pci_dev;
0906     unsigned int vpd_size, kw_len;
0907     u8 *vpd_data;
0908     int start;
0909 
0910     vpd_data = pci_vpd_alloc(dev, &vpd_size);
0911     if (IS_ERR(vpd_data)) {
0912         pci_warn(dev, "Unable to read VPD\n");
0913         return;
0914     }
0915 
0916     start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
0917                          PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
0918     if (start < 0)
0919         pci_err(dev, "Part number not found or incomplete\n");
0920     else
0921         pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
0922 
0923     start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
0924                          PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
0925     if (start < 0)
0926         pci_err(dev, "Serial number not found or incomplete\n");
0927     else
0928         efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
0929 
0930     kfree(vpd_data);
0931 }
0932 
0933 
0934 /* Main body of NIC initialisation
0935  * This is called at module load (or hotplug insertion, theoretically).
0936  */
0937 static int efx_pci_probe_main(struct efx_nic *efx)
0938 {
0939     int rc;
0940 
0941     /* Do start-of-day initialisation */
0942     rc = efx_probe_all(efx);
0943     if (rc)
0944         goto fail1;
0945 
0946     efx_init_napi(efx);
0947 
0948     down_write(&efx->filter_sem);
0949     rc = efx->type->init(efx);
0950     up_write(&efx->filter_sem);
0951     if (rc) {
0952         pci_err(efx->pci_dev, "failed to initialise NIC\n");
0953         goto fail3;
0954     }
0955 
0956     rc = efx_init_port(efx);
0957     if (rc) {
0958         netif_err(efx, probe, efx->net_dev,
0959               "failed to initialise port\n");
0960         goto fail4;
0961     }
0962 
0963     rc = efx_nic_init_interrupt(efx);
0964     if (rc)
0965         goto fail5;
0966 
0967     efx_set_interrupt_affinity(efx);
0968     rc = efx_enable_interrupts(efx);
0969     if (rc)
0970         goto fail6;
0971 
0972     return 0;
0973 
0974  fail6:
0975     efx_clear_interrupt_affinity(efx);
0976     efx_nic_fini_interrupt(efx);
0977  fail5:
0978     efx_fini_port(efx);
0979  fail4:
0980     efx->type->fini(efx);
0981  fail3:
0982     efx_fini_napi(efx);
0983     efx_remove_all(efx);
0984  fail1:
0985     return rc;
0986 }
0987 
0988 static int efx_pci_probe_post_io(struct efx_nic *efx)
0989 {
0990     struct net_device *net_dev = efx->net_dev;
0991     int rc = efx_pci_probe_main(efx);
0992 
0993     if (rc)
0994         return rc;
0995 
0996     if (efx->type->sriov_init) {
0997         rc = efx->type->sriov_init(efx);
0998         if (rc)
0999             pci_err(efx->pci_dev, "SR-IOV can't be enabled rc %d\n",
1000                 rc);
1001     }
1002 
1003     /* Determine netdevice features */
1004     net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
1005                   NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
1006     if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1007         net_dev->features |= NETIF_F_TSO6;
1008     /* Check whether device supports TSO */
1009     if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
1010         net_dev->features &= ~NETIF_F_ALL_TSO;
1011     /* Mask for features that also apply to VLAN devices */
1012     net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
1013                    NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
1014                    NETIF_F_RXCSUM);
1015 
1016     net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
1017 
1018     /* Disable receiving frames with bad FCS, by default. */
1019     net_dev->features &= ~NETIF_F_RXALL;
1020 
1021     /* Disable VLAN filtering by default.  It may be enforced if
1022      * the feature is fixed (i.e. VLAN filters are required to
1023      * receive VLAN tagged packets due to vPort restrictions).
1024      */
1025     net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1026     net_dev->features |= efx->fixed_features;
1027 
1028     rc = efx_register_netdev(efx);
1029     if (!rc)
1030         return 0;
1031 
1032     efx_pci_remove_main(efx);
1033     return rc;
1034 }
1035 
1036 /* NIC initialisation
1037  *
1038  * This is called at module load (or hotplug insertion,
1039  * theoretically).  It sets up PCI mappings, resets the NIC,
1040  * sets up and registers the network devices with the kernel and hooks
1041  * the interrupt service routine.  It does not prepare the device for
1042  * transmission; this is left to the first time one of the network
1043  * interfaces is brought up (i.e. efx_net_open).
1044  */
1045 static int efx_pci_probe(struct pci_dev *pci_dev,
1046              const struct pci_device_id *entry)
1047 {
1048     struct efx_probe_data *probe_data, **probe_ptr;
1049     struct net_device *net_dev;
1050     struct efx_nic *efx;
1051     int rc;
1052 
1053     /* Allocate probe data and struct efx_nic */
1054     probe_data = kzalloc(sizeof(*probe_data), GFP_KERNEL);
1055     if (!probe_data)
1056         return -ENOMEM;
1057     probe_data->pci_dev = pci_dev;
1058     efx = &probe_data->efx;
1059 
1060     /* Allocate and initialise a struct net_device */
1061     net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES);
1062     if (!net_dev)
1063         return -ENOMEM;
1064     probe_ptr = netdev_priv(net_dev);
1065     *probe_ptr = probe_data;
1066     efx->net_dev = net_dev;
1067     efx->type = (const struct efx_nic_type *) entry->driver_data;
1068     efx->fixed_features |= NETIF_F_HIGHDMA;
1069 
1070     pci_set_drvdata(pci_dev, efx);
1071     SET_NETDEV_DEV(net_dev, &pci_dev->dev);
1072     rc = efx_init_struct(efx, pci_dev);
1073     if (rc)
1074         goto fail1;
1075     efx->mdio.dev = net_dev;
1076 
1077     pci_info(pci_dev, "Solarflare NIC detected\n");
1078 
1079     if (!efx->type->is_vf)
1080         efx_probe_vpd_strings(efx);
1081 
1082     /* Set up basic I/O (BAR mappings etc) */
1083     rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask,
1084              efx->type->mem_map_size(efx));
1085     if (rc)
1086         goto fail2;
1087 
1088     rc = efx_pci_probe_post_io(efx);
1089     if (rc) {
1090         /* On failure, retry once immediately.
1091          * If we aborted probe due to a scheduled reset, dismiss it.
1092          */
1093         efx->reset_pending = 0;
1094         rc = efx_pci_probe_post_io(efx);
1095         if (rc) {
1096             /* On another failure, retry once more
1097              * after a 50-305ms delay.
1098              */
1099             unsigned char r;
1100 
1101             get_random_bytes(&r, 1);
1102             msleep((unsigned int)r + 50);
1103             efx->reset_pending = 0;
1104             rc = efx_pci_probe_post_io(efx);
1105         }
1106     }
1107     if (rc)
1108         goto fail3;
1109 
1110     netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
1111 
1112     /* Try to create MTDs, but allow this to fail */
1113     rtnl_lock();
1114     rc = efx_mtd_probe(efx);
1115     rtnl_unlock();
1116     if (rc && rc != -EPERM)
1117         netif_warn(efx, probe, efx->net_dev,
1118                "failed to create MTDs (%d)\n", rc);
1119 
1120     (void)pci_enable_pcie_error_reporting(pci_dev);
1121 
1122     if (efx->type->udp_tnl_push_ports)
1123         efx->type->udp_tnl_push_ports(efx);
1124 
1125     return 0;
1126 
1127  fail3:
1128     efx_fini_io(efx);
1129  fail2:
1130     efx_fini_struct(efx);
1131  fail1:
1132     WARN_ON(rc > 0);
1133     netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
1134     free_netdev(net_dev);
1135     return rc;
1136 }
1137 
1138 /* efx_pci_sriov_configure returns the actual number of Virtual Functions
1139  * enabled on success
1140  */
1141 #ifdef CONFIG_SFC_SRIOV
1142 static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
1143 {
1144     int rc;
1145     struct efx_nic *efx = pci_get_drvdata(dev);
1146 
1147     if (efx->type->sriov_configure) {
1148         rc = efx->type->sriov_configure(efx, num_vfs);
1149         if (rc)
1150             return rc;
1151         else
1152             return num_vfs;
1153     } else
1154         return -EOPNOTSUPP;
1155 }
1156 #endif
1157 
1158 static int efx_pm_freeze(struct device *dev)
1159 {
1160     struct efx_nic *efx = dev_get_drvdata(dev);
1161 
1162     rtnl_lock();
1163 
1164     if (efx_net_active(efx->state)) {
1165         efx_device_detach_sync(efx);
1166 
1167         efx_stop_all(efx);
1168         efx_disable_interrupts(efx);
1169 
1170         efx->state = efx_freeze(efx->state);
1171     }
1172 
1173     rtnl_unlock();
1174 
1175     return 0;
1176 }
1177 
1178 static int efx_pm_thaw(struct device *dev)
1179 {
1180     int rc;
1181     struct efx_nic *efx = dev_get_drvdata(dev);
1182 
1183     rtnl_lock();
1184 
1185     if (efx_frozen(efx->state)) {
1186         rc = efx_enable_interrupts(efx);
1187         if (rc)
1188             goto fail;
1189 
1190         mutex_lock(&efx->mac_lock);
1191         efx_mcdi_port_reconfigure(efx);
1192         mutex_unlock(&efx->mac_lock);
1193 
1194         efx_start_all(efx);
1195 
1196         efx_device_attach_if_not_resetting(efx);
1197 
1198         efx->state = efx_thaw(efx->state);
1199 
1200         efx->type->resume_wol(efx);
1201     }
1202 
1203     rtnl_unlock();
1204 
1205     /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
1206     efx_queue_reset_work(efx);
1207 
1208     return 0;
1209 
1210 fail:
1211     rtnl_unlock();
1212 
1213     return rc;
1214 }
1215 
1216 static int efx_pm_poweroff(struct device *dev)
1217 {
1218     struct pci_dev *pci_dev = to_pci_dev(dev);
1219     struct efx_nic *efx = pci_get_drvdata(pci_dev);
1220 
1221     efx->type->fini(efx);
1222 
1223     efx->reset_pending = 0;
1224 
1225     pci_save_state(pci_dev);
1226     return pci_set_power_state(pci_dev, PCI_D3hot);
1227 }
1228 
1229 /* Used for both resume and restore */
1230 static int efx_pm_resume(struct device *dev)
1231 {
1232     struct pci_dev *pci_dev = to_pci_dev(dev);
1233     struct efx_nic *efx = pci_get_drvdata(pci_dev);
1234     int rc;
1235 
1236     rc = pci_set_power_state(pci_dev, PCI_D0);
1237     if (rc)
1238         return rc;
1239     pci_restore_state(pci_dev);
1240     rc = pci_enable_device(pci_dev);
1241     if (rc)
1242         return rc;
1243     pci_set_master(efx->pci_dev);
1244     rc = efx->type->reset(efx, RESET_TYPE_ALL);
1245     if (rc)
1246         return rc;
1247     down_write(&efx->filter_sem);
1248     rc = efx->type->init(efx);
1249     up_write(&efx->filter_sem);
1250     if (rc)
1251         return rc;
1252     rc = efx_pm_thaw(dev);
1253     return rc;
1254 }
1255 
1256 static int efx_pm_suspend(struct device *dev)
1257 {
1258     int rc;
1259 
1260     efx_pm_freeze(dev);
1261     rc = efx_pm_poweroff(dev);
1262     if (rc)
1263         efx_pm_resume(dev);
1264     return rc;
1265 }
1266 
1267 static const struct dev_pm_ops efx_pm_ops = {
1268     .suspend    = efx_pm_suspend,
1269     .resume     = efx_pm_resume,
1270     .freeze     = efx_pm_freeze,
1271     .thaw       = efx_pm_thaw,
1272     .poweroff   = efx_pm_poweroff,
1273     .restore    = efx_pm_resume,
1274 };
1275 
1276 static struct pci_driver efx_pci_driver = {
1277     .name       = KBUILD_MODNAME,
1278     .id_table   = efx_pci_table,
1279     .probe      = efx_pci_probe,
1280     .remove     = efx_pci_remove,
1281     .driver.pm  = &efx_pm_ops,
1282     .err_handler    = &efx_err_handlers,
1283 #ifdef CONFIG_SFC_SRIOV
1284     .sriov_configure = efx_pci_sriov_configure,
1285 #endif
1286 };
1287 
1288 /**************************************************************************
1289  *
1290  * Kernel module interface
1291  *
1292  *************************************************************************/
1293 
1294 static int __init efx_init_module(void)
1295 {
1296     int rc;
1297 
1298     printk(KERN_INFO "Solarflare NET driver\n");
1299 
1300     rc = register_netdevice_notifier(&efx_netdev_notifier);
1301     if (rc)
1302         goto err_notifier;
1303 
1304     rc = efx_create_reset_workqueue();
1305     if (rc)
1306         goto err_reset;
1307 
1308     rc = pci_register_driver(&efx_pci_driver);
1309     if (rc < 0)
1310         goto err_pci;
1311 
1312     rc = pci_register_driver(&ef100_pci_driver);
1313     if (rc < 0)
1314         goto err_pci_ef100;
1315 
1316     return 0;
1317 
1318  err_pci_ef100:
1319     pci_unregister_driver(&efx_pci_driver);
1320  err_pci:
1321     efx_destroy_reset_workqueue();
1322  err_reset:
1323     unregister_netdevice_notifier(&efx_netdev_notifier);
1324  err_notifier:
1325     return rc;
1326 }
1327 
1328 static void __exit efx_exit_module(void)
1329 {
1330     printk(KERN_INFO "Solarflare NET driver unloading\n");
1331 
1332     pci_unregister_driver(&ef100_pci_driver);
1333     pci_unregister_driver(&efx_pci_driver);
1334     efx_destroy_reset_workqueue();
1335     unregister_netdevice_notifier(&efx_netdev_notifier);
1336 
1337 }
1338 
1339 module_init(efx_init_module);
1340 module_exit(efx_exit_module);
1341 
1342 MODULE_AUTHOR("Solarflare Communications and "
1343           "Michael Brown <mbrown@fensystems.co.uk>");
1344 MODULE_DESCRIPTION("Solarflare network driver");
1345 MODULE_LICENSE("GPL");
1346 MODULE_DEVICE_TABLE(pci, efx_pci_table);