Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /****************************************************************************
0003  * Driver for Solarflare network controllers and boards
0004  * Copyright 2005-2006 Fen Systems Ltd.
0005  * Copyright 2006-2012 Solarflare Communications Inc.
0006  */
0007 
0008 #include <linux/netdevice.h>
0009 #include <linux/module.h>
0010 #include <linux/delay.h>
0011 #include <linux/kernel_stat.h>
0012 #include <linux/pci.h>
0013 #include <linux/ethtool.h>
0014 #include <linux/ip.h>
0015 #include <linux/in.h>
0016 #include <linux/udp.h>
0017 #include <linux/rtnetlink.h>
0018 #include <linux/slab.h>
0019 #include "net_driver.h"
0020 #include "efx.h"
0021 #include "efx_common.h"
0022 #include "efx_channels.h"
0023 #include "nic.h"
0024 #include "mcdi_port_common.h"
0025 #include "selftest.h"
0026 #include "workarounds.h"
0027 
0028 /* IRQ latency can be enormous because:
0029  * - All IRQs may be disabled on a CPU for a *long* time by e.g. a
0030  *   slow serial console or an old IDE driver doing error recovery
0031  * - The PREEMPT_RT patches mostly deal with this, but also allow a
0032  *   tasklet or normal task to be given higher priority than our IRQ
0033  *   threads
0034  * Try to avoid blaming the hardware for this.
0035  */
0036 #define IRQ_TIMEOUT HZ
0037 
0038 /*
0039  * Loopback test packet structure
0040  *
0041  * The self-test should stress every RSS vector, and unfortunately
0042  * Falcon only performs RSS on TCP/UDP packets.
0043  */
0044 struct efx_loopback_payload {
0045     struct ethhdr header;
0046     struct iphdr ip;
0047     struct udphdr udp;
0048     __be16 iteration;
0049     char msg[64];
0050 } __packed;
0051 
0052 /* Loopback test source MAC address */
0053 static const u8 payload_source[ETH_ALEN] __aligned(2) = {
0054     0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
0055 };
0056 
0057 static const char payload_msg[] =
0058     "Hello world! This is an Efx loopback test in progress!";
0059 
0060 /* Interrupt mode names */
0061 static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
0062 static const char *const efx_interrupt_mode_names[] = {
0063     [EFX_INT_MODE_MSIX]   = "MSI-X",
0064     [EFX_INT_MODE_MSI]    = "MSI",
0065     [EFX_INT_MODE_LEGACY] = "legacy",
0066 };
0067 #define INT_MODE(efx) \
0068     STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
0069 
0070 /**
0071  * struct efx_loopback_state - persistent state during a loopback selftest
0072  * @flush:      Drop all packets in efx_loopback_rx_packet
0073  * @packet_count:   Number of packets being used in this test
0074  * @skbs:       An array of skbs transmitted
0075  * @offload_csum:   Checksums are being offloaded
0076  * @rx_good:        RX good packet count
0077  * @rx_bad:     RX bad packet count
0078  * @payload:        Payload used in tests
0079  */
0080 struct efx_loopback_state {
0081     bool flush;
0082     int packet_count;
0083     struct sk_buff **skbs;
0084     bool offload_csum;
0085     atomic_t rx_good;
0086     atomic_t rx_bad;
0087     struct efx_loopback_payload payload;
0088 };
0089 
0090 /* How long to wait for all the packets to arrive (in ms) */
0091 #define LOOPBACK_TIMEOUT_MS 1000
0092 
0093 /**************************************************************************
0094  *
0095  * MII, NVRAM and register tests
0096  *
0097  **************************************************************************/
0098 
0099 static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
0100 {
0101     int rc = 0;
0102 
0103     rc = efx_mcdi_phy_test_alive(efx);
0104     tests->phy_alive = rc ? -1 : 1;
0105 
0106     return rc;
0107 }
0108 
0109 static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
0110 {
0111     int rc = 0;
0112 
0113     if (efx->type->test_nvram) {
0114         rc = efx->type->test_nvram(efx);
0115         if (rc == -EPERM)
0116             rc = 0;
0117         else
0118             tests->nvram = rc ? -1 : 1;
0119     }
0120 
0121     return rc;
0122 }
0123 
0124 /**************************************************************************
0125  *
0126  * Interrupt and event queue testing
0127  *
0128  **************************************************************************/
0129 
0130 /* Test generation and receipt of interrupts */
0131 static int efx_test_interrupts(struct efx_nic *efx,
0132                    struct efx_self_tests *tests)
0133 {
0134     unsigned long timeout, wait;
0135     int cpu;
0136     int rc;
0137 
0138     netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
0139     tests->interrupt = -1;
0140 
0141     rc = efx_nic_irq_test_start(efx);
0142     if (rc == -ENOTSUPP) {
0143         netif_dbg(efx, drv, efx->net_dev,
0144               "direct interrupt testing not supported\n");
0145         tests->interrupt = 0;
0146         return 0;
0147     }
0148 
0149     timeout = jiffies + IRQ_TIMEOUT;
0150     wait = 1;
0151 
0152     /* Wait for arrival of test interrupt. */
0153     netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
0154     do {
0155         schedule_timeout_uninterruptible(wait);
0156         cpu = efx_nic_irq_test_irq_cpu(efx);
0157         if (cpu >= 0)
0158             goto success;
0159         wait *= 2;
0160     } while (time_before(jiffies, timeout));
0161 
0162     netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
0163     return -ETIMEDOUT;
0164 
0165  success:
0166     netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
0167           INT_MODE(efx), cpu);
0168     tests->interrupt = 1;
0169     return 0;
0170 }
0171 
0172 /* Test generation and receipt of interrupting events */
0173 static int efx_test_eventq_irq(struct efx_nic *efx,
0174                    struct efx_self_tests *tests)
0175 {
0176     struct efx_channel *channel;
0177     unsigned int read_ptr[EFX_MAX_CHANNELS];
0178     unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
0179     unsigned long timeout, wait;
0180 
0181     BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG);
0182 
0183     efx_for_each_channel(channel, efx) {
0184         read_ptr[channel->channel] = channel->eventq_read_ptr;
0185         set_bit(channel->channel, &dma_pend);
0186         set_bit(channel->channel, &int_pend);
0187         efx_nic_event_test_start(channel);
0188     }
0189 
0190     timeout = jiffies + IRQ_TIMEOUT;
0191     wait = 1;
0192 
0193     /* Wait for arrival of interrupts.  NAPI processing may or may
0194      * not complete in time, but we can cope in any case.
0195      */
0196     do {
0197         schedule_timeout_uninterruptible(wait);
0198 
0199         efx_for_each_channel(channel, efx) {
0200             efx_stop_eventq(channel);
0201             if (channel->eventq_read_ptr !=
0202                 read_ptr[channel->channel]) {
0203                 set_bit(channel->channel, &napi_ran);
0204                 clear_bit(channel->channel, &dma_pend);
0205                 clear_bit(channel->channel, &int_pend);
0206             } else {
0207                 if (efx_nic_event_present(channel))
0208                     clear_bit(channel->channel, &dma_pend);
0209                 if (efx_nic_event_test_irq_cpu(channel) >= 0)
0210                     clear_bit(channel->channel, &int_pend);
0211             }
0212             efx_start_eventq(channel);
0213         }
0214 
0215         wait *= 2;
0216     } while ((dma_pend || int_pend) && time_before(jiffies, timeout));
0217 
0218     efx_for_each_channel(channel, efx) {
0219         bool dma_seen = !test_bit(channel->channel, &dma_pend);
0220         bool int_seen = !test_bit(channel->channel, &int_pend);
0221 
0222         tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
0223         tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
0224 
0225         if (dma_seen && int_seen) {
0226             netif_dbg(efx, drv, efx->net_dev,
0227                   "channel %d event queue passed (with%s NAPI)\n",
0228                   channel->channel,
0229                   test_bit(channel->channel, &napi_ran) ?
0230                   "" : "out");
0231         } else {
0232             /* Report failure and whether either interrupt or DMA
0233              * worked
0234              */
0235             netif_err(efx, drv, efx->net_dev,
0236                   "channel %d timed out waiting for event queue\n",
0237                   channel->channel);
0238             if (int_seen)
0239                 netif_err(efx, drv, efx->net_dev,
0240                       "channel %d saw interrupt "
0241                       "during event queue test\n",
0242                       channel->channel);
0243             if (dma_seen)
0244                 netif_err(efx, drv, efx->net_dev,
0245                       "channel %d event was generated, but "
0246                       "failed to trigger an interrupt\n",
0247                       channel->channel);
0248         }
0249     }
0250 
0251     return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
0252 }
0253 
0254 static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
0255             unsigned flags)
0256 {
0257     int rc;
0258 
0259     mutex_lock(&efx->mac_lock);
0260     rc = efx_mcdi_phy_run_tests(efx, tests->phy_ext, flags);
0261     mutex_unlock(&efx->mac_lock);
0262     if (rc == -EPERM)
0263         rc = 0;
0264     else
0265         netif_info(efx, drv, efx->net_dev,
0266                "%s phy selftest\n", rc ? "Failed" : "Passed");
0267 
0268     return rc;
0269 }
0270 
0271 /**************************************************************************
0272  *
0273  * Loopback testing
0274  * NB Only one loopback test can be executing concurrently.
0275  *
0276  **************************************************************************/
0277 
0278 /* Loopback test RX callback
0279  * This is called for each received packet during loopback testing.
0280  */
0281 void efx_loopback_rx_packet(struct efx_nic *efx,
0282                 const char *buf_ptr, int pkt_len)
0283 {
0284     struct efx_loopback_state *state = efx->loopback_selftest;
0285     struct efx_loopback_payload *received;
0286     struct efx_loopback_payload *payload;
0287 
0288     BUG_ON(!buf_ptr);
0289 
0290     /* If we are just flushing, then drop the packet */
0291     if ((state == NULL) || state->flush)
0292         return;
0293 
0294     payload = &state->payload;
0295 
0296     received = (struct efx_loopback_payload *) buf_ptr;
0297     received->ip.saddr = payload->ip.saddr;
0298     if (state->offload_csum)
0299         received->ip.check = payload->ip.check;
0300 
0301     /* Check that header exists */
0302     if (pkt_len < sizeof(received->header)) {
0303         netif_err(efx, drv, efx->net_dev,
0304               "saw runt RX packet (length %d) in %s loopback "
0305               "test\n", pkt_len, LOOPBACK_MODE(efx));
0306         goto err;
0307     }
0308 
0309     /* Check that the ethernet header exists */
0310     if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
0311         netif_err(efx, drv, efx->net_dev,
0312               "saw non-loopback RX packet in %s loopback test\n",
0313               LOOPBACK_MODE(efx));
0314         goto err;
0315     }
0316 
0317     /* Check packet length */
0318     if (pkt_len != sizeof(*payload)) {
0319         netif_err(efx, drv, efx->net_dev,
0320               "saw incorrect RX packet length %d (wanted %d) in "
0321               "%s loopback test\n", pkt_len, (int)sizeof(*payload),
0322               LOOPBACK_MODE(efx));
0323         goto err;
0324     }
0325 
0326     /* Check that IP header matches */
0327     if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
0328         netif_err(efx, drv, efx->net_dev,
0329               "saw corrupted IP header in %s loopback test\n",
0330               LOOPBACK_MODE(efx));
0331         goto err;
0332     }
0333 
0334     /* Check that msg and padding matches */
0335     if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
0336         netif_err(efx, drv, efx->net_dev,
0337               "saw corrupted RX packet in %s loopback test\n",
0338               LOOPBACK_MODE(efx));
0339         goto err;
0340     }
0341 
0342     /* Check that iteration matches */
0343     if (received->iteration != payload->iteration) {
0344         netif_err(efx, drv, efx->net_dev,
0345               "saw RX packet from iteration %d (wanted %d) in "
0346               "%s loopback test\n", ntohs(received->iteration),
0347               ntohs(payload->iteration), LOOPBACK_MODE(efx));
0348         goto err;
0349     }
0350 
0351     /* Increase correct RX count */
0352     netif_vdbg(efx, drv, efx->net_dev,
0353            "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
0354 
0355     atomic_inc(&state->rx_good);
0356     return;
0357 
0358  err:
0359 #ifdef DEBUG
0360     if (atomic_read(&state->rx_bad) == 0) {
0361         netif_err(efx, drv, efx->net_dev, "received packet:\n");
0362         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
0363                    buf_ptr, pkt_len, 0);
0364         netif_err(efx, drv, efx->net_dev, "expected packet:\n");
0365         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
0366                    &state->payload, sizeof(state->payload), 0);
0367     }
0368 #endif
0369     atomic_inc(&state->rx_bad);
0370 }
0371 
0372 /* Initialise an efx_selftest_state for a new iteration */
0373 static void efx_iterate_state(struct efx_nic *efx)
0374 {
0375     struct efx_loopback_state *state = efx->loopback_selftest;
0376     struct net_device *net_dev = efx->net_dev;
0377     struct efx_loopback_payload *payload = &state->payload;
0378 
0379     /* Initialise the layerII header */
0380     ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
0381     ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
0382     payload->header.h_proto = htons(ETH_P_IP);
0383 
0384     /* saddr set later and used as incrementing count */
0385     payload->ip.daddr = htonl(INADDR_LOOPBACK);
0386     payload->ip.ihl = 5;
0387     payload->ip.check = (__force __sum16) htons(0xdead);
0388     payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
0389     payload->ip.version = IPVERSION;
0390     payload->ip.protocol = IPPROTO_UDP;
0391 
0392     /* Initialise udp header */
0393     payload->udp.source = 0;
0394     payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
0395                  sizeof(struct iphdr));
0396     payload->udp.check = 0; /* checksum ignored */
0397 
0398     /* Fill out payload */
0399     payload->iteration = htons(ntohs(payload->iteration) + 1);
0400     memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
0401 
0402     /* Fill out remaining state members */
0403     atomic_set(&state->rx_good, 0);
0404     atomic_set(&state->rx_bad, 0);
0405     smp_wmb();
0406 }
0407 
0408 static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
0409 {
0410     struct efx_nic *efx = tx_queue->efx;
0411     struct efx_loopback_state *state = efx->loopback_selftest;
0412     struct efx_loopback_payload *payload;
0413     struct sk_buff *skb;
0414     int i;
0415     netdev_tx_t rc;
0416 
0417     /* Transmit N copies of buffer */
0418     for (i = 0; i < state->packet_count; i++) {
0419         /* Allocate an skb, holding an extra reference for
0420          * transmit completion counting */
0421         skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
0422         if (!skb)
0423             return -ENOMEM;
0424         state->skbs[i] = skb;
0425         skb_get(skb);
0426 
0427         /* Copy the payload in, incrementing the source address to
0428          * exercise the rss vectors */
0429         payload = skb_put(skb, sizeof(state->payload));
0430         memcpy(payload, &state->payload, sizeof(state->payload));
0431         payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
0432 
0433         /* Ensure everything we've written is visible to the
0434          * interrupt handler. */
0435         smp_wmb();
0436 
0437         netif_tx_lock_bh(efx->net_dev);
0438         rc = efx_enqueue_skb(tx_queue, skb);
0439         netif_tx_unlock_bh(efx->net_dev);
0440 
0441         if (rc != NETDEV_TX_OK) {
0442             netif_err(efx, drv, efx->net_dev,
0443                   "TX queue %d could not transmit packet %d of "
0444                   "%d in %s loopback test\n", tx_queue->label,
0445                   i + 1, state->packet_count,
0446                   LOOPBACK_MODE(efx));
0447 
0448             /* Defer cleaning up the other skbs for the caller */
0449             kfree_skb(skb);
0450             return -EPIPE;
0451         }
0452     }
0453 
0454     return 0;
0455 }
0456 
0457 static int efx_poll_loopback(struct efx_nic *efx)
0458 {
0459     struct efx_loopback_state *state = efx->loopback_selftest;
0460 
0461     return atomic_read(&state->rx_good) == state->packet_count;
0462 }
0463 
0464 static int efx_end_loopback(struct efx_tx_queue *tx_queue,
0465                 struct efx_loopback_self_tests *lb_tests)
0466 {
0467     struct efx_nic *efx = tx_queue->efx;
0468     struct efx_loopback_state *state = efx->loopback_selftest;
0469     struct sk_buff *skb;
0470     int tx_done = 0, rx_good, rx_bad;
0471     int i, rc = 0;
0472 
0473     netif_tx_lock_bh(efx->net_dev);
0474 
0475     /* Count the number of tx completions, and decrement the refcnt. Any
0476      * skbs not already completed will be free'd when the queue is flushed */
0477     for (i = 0; i < state->packet_count; i++) {
0478         skb = state->skbs[i];
0479         if (skb && !skb_shared(skb))
0480             ++tx_done;
0481         dev_kfree_skb(skb);
0482     }
0483 
0484     netif_tx_unlock_bh(efx->net_dev);
0485 
0486     /* Check TX completion and received packet counts */
0487     rx_good = atomic_read(&state->rx_good);
0488     rx_bad = atomic_read(&state->rx_bad);
0489     if (tx_done != state->packet_count) {
0490         /* Don't free the skbs; they will be picked up on TX
0491          * overflow or channel teardown.
0492          */
0493         netif_err(efx, drv, efx->net_dev,
0494               "TX queue %d saw only %d out of an expected %d "
0495               "TX completion events in %s loopback test\n",
0496               tx_queue->label, tx_done, state->packet_count,
0497               LOOPBACK_MODE(efx));
0498         rc = -ETIMEDOUT;
0499         /* Allow to fall through so we see the RX errors as well */
0500     }
0501 
0502     /* We may always be up to a flush away from our desired packet total */
0503     if (rx_good != state->packet_count) {
0504         netif_dbg(efx, drv, efx->net_dev,
0505               "TX queue %d saw only %d out of an expected %d "
0506               "received packets in %s loopback test\n",
0507               tx_queue->label, rx_good, state->packet_count,
0508               LOOPBACK_MODE(efx));
0509         rc = -ETIMEDOUT;
0510         /* Fall through */
0511     }
0512 
0513     /* Update loopback test structure */
0514     lb_tests->tx_sent[tx_queue->label] += state->packet_count;
0515     lb_tests->tx_done[tx_queue->label] += tx_done;
0516     lb_tests->rx_good += rx_good;
0517     lb_tests->rx_bad += rx_bad;
0518 
0519     return rc;
0520 }
0521 
0522 static int
0523 efx_test_loopback(struct efx_tx_queue *tx_queue,
0524           struct efx_loopback_self_tests *lb_tests)
0525 {
0526     struct efx_nic *efx = tx_queue->efx;
0527     struct efx_loopback_state *state = efx->loopback_selftest;
0528     int i, begin_rc, end_rc;
0529 
0530     for (i = 0; i < 3; i++) {
0531         /* Determine how many packets to send */
0532         state->packet_count = efx->txq_entries / 3;
0533         state->packet_count = min(1 << (i << 2), state->packet_count);
0534         state->skbs = kcalloc(state->packet_count,
0535                       sizeof(state->skbs[0]), GFP_KERNEL);
0536         if (!state->skbs)
0537             return -ENOMEM;
0538         state->flush = false;
0539 
0540         netif_dbg(efx, drv, efx->net_dev,
0541               "TX queue %d (hw %d) testing %s loopback with %d packets\n",
0542               tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx),
0543               state->packet_count);
0544 
0545         efx_iterate_state(efx);
0546         begin_rc = efx_begin_loopback(tx_queue);
0547 
0548         /* This will normally complete very quickly, but be
0549          * prepared to wait much longer. */
0550         msleep(1);
0551         if (!efx_poll_loopback(efx)) {
0552             msleep(LOOPBACK_TIMEOUT_MS);
0553             efx_poll_loopback(efx);
0554         }
0555 
0556         end_rc = efx_end_loopback(tx_queue, lb_tests);
0557         kfree(state->skbs);
0558 
0559         if (begin_rc || end_rc) {
0560             /* Wait a while to ensure there are no packets
0561              * floating around after a failure. */
0562             schedule_timeout_uninterruptible(HZ / 10);
0563             return begin_rc ? begin_rc : end_rc;
0564         }
0565     }
0566 
0567     netif_dbg(efx, drv, efx->net_dev,
0568           "TX queue %d passed %s loopback test with a burst length "
0569           "of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx),
0570           state->packet_count);
0571 
0572     return 0;
0573 }
0574 
0575 /* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
0576  * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
0577  * to delay and retry. Therefore, it's safer to just poll directly. Wait
0578  * for link up and any faults to dissipate. */
0579 static int efx_wait_for_link(struct efx_nic *efx)
0580 {
0581     struct efx_link_state *link_state = &efx->link_state;
0582     int count, link_up_count = 0;
0583     bool link_up;
0584 
0585     for (count = 0; count < 40; count++) {
0586         schedule_timeout_uninterruptible(HZ / 10);
0587 
0588         if (efx->type->monitor != NULL) {
0589             mutex_lock(&efx->mac_lock);
0590             efx->type->monitor(efx);
0591             mutex_unlock(&efx->mac_lock);
0592         }
0593 
0594         mutex_lock(&efx->mac_lock);
0595         link_up = link_state->up;
0596         if (link_up)
0597             link_up = !efx->type->check_mac_fault(efx);
0598         mutex_unlock(&efx->mac_lock);
0599 
0600         if (link_up) {
0601             if (++link_up_count == 2)
0602                 return 0;
0603         } else {
0604             link_up_count = 0;
0605         }
0606     }
0607 
0608     return -ETIMEDOUT;
0609 }
0610 
0611 static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
0612                   unsigned int loopback_modes)
0613 {
0614     enum efx_loopback_mode mode;
0615     struct efx_loopback_state *state;
0616     struct efx_channel *channel =
0617         efx_get_channel(efx, efx->tx_channel_offset);
0618     struct efx_tx_queue *tx_queue;
0619     int rc = 0;
0620 
0621     /* Set the port loopback_selftest member. From this point on
0622      * all received packets will be dropped. Mark the state as
0623      * "flushing" so all inflight packets are dropped */
0624     state = kzalloc(sizeof(*state), GFP_KERNEL);
0625     if (state == NULL)
0626         return -ENOMEM;
0627     BUG_ON(efx->loopback_selftest);
0628     state->flush = true;
0629     efx->loopback_selftest = state;
0630 
0631     /* Test all supported loopback modes */
0632     for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
0633         if (!(loopback_modes & (1 << mode)))
0634             continue;
0635 
0636         /* Move the port into the specified loopback mode. */
0637         state->flush = true;
0638         mutex_lock(&efx->mac_lock);
0639         efx->loopback_mode = mode;
0640         rc = __efx_reconfigure_port(efx);
0641         mutex_unlock(&efx->mac_lock);
0642         if (rc) {
0643             netif_err(efx, drv, efx->net_dev,
0644                   "unable to move into %s loopback\n",
0645                   LOOPBACK_MODE(efx));
0646             goto out;
0647         }
0648 
0649         rc = efx_wait_for_link(efx);
0650         if (rc) {
0651             netif_err(efx, drv, efx->net_dev,
0652                   "loopback %s never came up\n",
0653                   LOOPBACK_MODE(efx));
0654             goto out;
0655         }
0656 
0657         /* Test all enabled types of TX queue */
0658         efx_for_each_channel_tx_queue(tx_queue, channel) {
0659             state->offload_csum = (tx_queue->type &
0660                            EFX_TXQ_TYPE_OUTER_CSUM);
0661             rc = efx_test_loopback(tx_queue,
0662                            &tests->loopback[mode]);
0663             if (rc)
0664                 goto out;
0665         }
0666     }
0667 
0668  out:
0669     /* Remove the flush. The caller will remove the loopback setting */
0670     state->flush = true;
0671     efx->loopback_selftest = NULL;
0672     wmb();
0673     kfree(state);
0674 
0675     if (rc == -EPERM)
0676         rc = 0;
0677 
0678     return rc;
0679 }
0680 
0681 /**************************************************************************
0682  *
0683  * Entry point
0684  *
0685  *************************************************************************/
0686 
0687 int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
0688          unsigned flags)
0689 {
0690     enum efx_loopback_mode loopback_mode = efx->loopback_mode;
0691     int phy_mode = efx->phy_mode;
0692     int rc_test = 0, rc_reset, rc;
0693 
0694     efx_selftest_async_cancel(efx);
0695 
0696     /* Online (i.e. non-disruptive) testing
0697      * This checks interrupt generation, event delivery and PHY presence. */
0698 
0699     rc = efx_test_phy_alive(efx, tests);
0700     if (rc && !rc_test)
0701         rc_test = rc;
0702 
0703     rc = efx_test_nvram(efx, tests);
0704     if (rc && !rc_test)
0705         rc_test = rc;
0706 
0707     rc = efx_test_interrupts(efx, tests);
0708     if (rc && !rc_test)
0709         rc_test = rc;
0710 
0711     rc = efx_test_eventq_irq(efx, tests);
0712     if (rc && !rc_test)
0713         rc_test = rc;
0714 
0715     if (rc_test)
0716         return rc_test;
0717 
0718     if (!(flags & ETH_TEST_FL_OFFLINE))
0719         return efx_test_phy(efx, tests, flags);
0720 
0721     /* Offline (i.e. disruptive) testing
0722      * This checks MAC and PHY loopback on the specified port. */
0723 
0724     /* Detach the device so the kernel doesn't transmit during the
0725      * loopback test and the watchdog timeout doesn't fire.
0726      */
0727     efx_device_detach_sync(efx);
0728 
0729     if (efx->type->test_chip) {
0730         rc_reset = efx->type->test_chip(efx, tests);
0731         if (rc_reset) {
0732             netif_err(efx, hw, efx->net_dev,
0733                   "Unable to recover from chip test\n");
0734             efx_schedule_reset(efx, RESET_TYPE_DISABLE);
0735             return rc_reset;
0736         }
0737 
0738         if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
0739             rc_test = -EIO;
0740     }
0741 
0742     /* Ensure that the phy is powered and out of loopback
0743      * for the bist and loopback tests */
0744     mutex_lock(&efx->mac_lock);
0745     efx->phy_mode &= ~PHY_MODE_LOW_POWER;
0746     efx->loopback_mode = LOOPBACK_NONE;
0747     __efx_reconfigure_port(efx);
0748     mutex_unlock(&efx->mac_lock);
0749 
0750     rc = efx_test_phy(efx, tests, flags);
0751     if (rc && !rc_test)
0752         rc_test = rc;
0753 
0754     rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
0755     if (rc && !rc_test)
0756         rc_test = rc;
0757 
0758     /* restore the PHY to the previous state */
0759     mutex_lock(&efx->mac_lock);
0760     efx->phy_mode = phy_mode;
0761     efx->loopback_mode = loopback_mode;
0762     __efx_reconfigure_port(efx);
0763     mutex_unlock(&efx->mac_lock);
0764 
0765     efx_device_attach_if_not_resetting(efx);
0766 
0767     return rc_test;
0768 }
0769 
0770 void efx_selftest_async_start(struct efx_nic *efx)
0771 {
0772     struct efx_channel *channel;
0773 
0774     efx_for_each_channel(channel, efx)
0775         efx_nic_event_test_start(channel);
0776     schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
0777 }
0778 
0779 void efx_selftest_async_cancel(struct efx_nic *efx)
0780 {
0781     cancel_delayed_work_sync(&efx->selftest_work);
0782 }
0783 
0784 static void efx_selftest_async_work(struct work_struct *data)
0785 {
0786     struct efx_nic *efx = container_of(data, struct efx_nic,
0787                        selftest_work.work);
0788     struct efx_channel *channel;
0789     int cpu;
0790 
0791     efx_for_each_channel(channel, efx) {
0792         cpu = efx_nic_event_test_irq_cpu(channel);
0793         if (cpu < 0)
0794             netif_err(efx, ifup, efx->net_dev,
0795                   "channel %d failed to trigger an interrupt\n",
0796                   channel->channel);
0797         else
0798             netif_dbg(efx, ifup, efx->net_dev,
0799                   "channel %d triggered interrupt on CPU %d\n",
0800                   channel->channel, cpu);
0801     }
0802 }
0803 
0804 void efx_selftest_async_init(struct efx_nic *efx)
0805 {
0806     INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
0807 }