Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /****************************************************************************
0003  * Driver for Solarflare network controllers and boards
0004  * Copyright 2005-2006 Fen Systems Ltd.
0005  * Copyright 2005-2013 Solarflare Communications Inc.
0006  */
0007 
0008 #include <linux/pci.h>
0009 #include <linux/tcp.h>
0010 #include <linux/ip.h>
0011 #include <linux/in.h>
0012 #include <linux/ipv6.h>
0013 #include <linux/slab.h>
0014 #include <net/ipv6.h>
0015 #include <linux/if_ether.h>
0016 #include <linux/highmem.h>
0017 #include <linux/cache.h>
0018 #include "net_driver.h"
0019 #include "efx.h"
0020 #include "io.h"
0021 #include "nic.h"
0022 #include "tx.h"
0023 #include "workarounds.h"
0024 
0025 static inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue,
0026                      struct ef4_tx_buffer *buffer)
0027 {
0028     unsigned int index = ef4_tx_queue_get_insert_index(tx_queue);
0029     struct ef4_buffer *page_buf =
0030         &tx_queue->cb_page[index >> (PAGE_SHIFT - EF4_TX_CB_ORDER)];
0031     unsigned int offset =
0032         ((index << EF4_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
0033 
0034     if (unlikely(!page_buf->addr) &&
0035         ef4_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
0036                  GFP_ATOMIC))
0037         return NULL;
0038     buffer->dma_addr = page_buf->dma_addr + offset;
0039     buffer->unmap_len = 0;
0040     return (u8 *)page_buf->addr + offset;
0041 }
0042 
0043 u8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
0044                    struct ef4_tx_buffer *buffer, size_t len)
0045 {
0046     if (len > EF4_TX_CB_SIZE)
0047         return NULL;
0048     return ef4_tx_get_copy_buffer(tx_queue, buffer);
0049 }
0050 
0051 static void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue,
0052                    struct ef4_tx_buffer *buffer,
0053                    unsigned int *pkts_compl,
0054                    unsigned int *bytes_compl)
0055 {
0056     if (buffer->unmap_len) {
0057         struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
0058         dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
0059         if (buffer->flags & EF4_TX_BUF_MAP_SINGLE)
0060             dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
0061                      DMA_TO_DEVICE);
0062         else
0063             dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
0064                        DMA_TO_DEVICE);
0065         buffer->unmap_len = 0;
0066     }
0067 
0068     if (buffer->flags & EF4_TX_BUF_SKB) {
0069         (*pkts_compl)++;
0070         (*bytes_compl) += buffer->skb->len;
0071         dev_consume_skb_any((struct sk_buff *)buffer->skb);
0072         netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
0073                "TX queue %d transmission id %x complete\n",
0074                tx_queue->queue, tx_queue->read_count);
0075     }
0076 
0077     buffer->len = 0;
0078     buffer->flags = 0;
0079 }
0080 
0081 unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx)
0082 {
0083     /* This is probably too much since we don't have any TSO support;
0084      * it's a left-over from when we had Software TSO.  But it's safer
0085      * to leave it as-is than try to determine a new bound.
0086      */
0087     /* Header and payload descriptor for each output segment, plus
0088      * one for every input fragment boundary within a segment
0089      */
0090     unsigned int max_descs = EF4_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
0091 
0092     /* Possibly one more per segment for the alignment workaround,
0093      * or for option descriptors
0094      */
0095     if (EF4_WORKAROUND_5391(efx))
0096         max_descs += EF4_TSO_MAX_SEGS;
0097 
0098     /* Possibly more for PCIe page boundaries within input fragments */
0099     if (PAGE_SIZE > EF4_PAGE_SIZE)
0100         max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
0101                    DIV_ROUND_UP(GSO_LEGACY_MAX_SIZE,
0102                         EF4_PAGE_SIZE));
0103 
0104     return max_descs;
0105 }
0106 
0107 static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1)
0108 {
0109     /* We need to consider both queues that the net core sees as one */
0110     struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(txq1);
0111     struct ef4_nic *efx = txq1->efx;
0112     unsigned int fill_level;
0113 
0114     fill_level = max(txq1->insert_count - txq1->old_read_count,
0115              txq2->insert_count - txq2->old_read_count);
0116     if (likely(fill_level < efx->txq_stop_thresh))
0117         return;
0118 
0119     /* We used the stale old_read_count above, which gives us a
0120      * pessimistic estimate of the fill level (which may even
0121      * validly be >= efx->txq_entries).  Now try again using
0122      * read_count (more likely to be a cache miss).
0123      *
0124      * If we read read_count and then conditionally stop the
0125      * queue, it is possible for the completion path to race with
0126      * us and complete all outstanding descriptors in the middle,
0127      * after which there will be no more completions to wake it.
0128      * Therefore we stop the queue first, then read read_count
0129      * (with a memory barrier to ensure the ordering), then
0130      * restart the queue if the fill level turns out to be low
0131      * enough.
0132      */
0133     netif_tx_stop_queue(txq1->core_txq);
0134     smp_mb();
0135     txq1->old_read_count = READ_ONCE(txq1->read_count);
0136     txq2->old_read_count = READ_ONCE(txq2->read_count);
0137 
0138     fill_level = max(txq1->insert_count - txq1->old_read_count,
0139              txq2->insert_count - txq2->old_read_count);
0140     EF4_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
0141     if (likely(fill_level < efx->txq_stop_thresh)) {
0142         smp_mb();
0143         if (likely(!efx->loopback_selftest))
0144             netif_tx_start_queue(txq1->core_txq);
0145     }
0146 }
0147 
0148 static int ef4_enqueue_skb_copy(struct ef4_tx_queue *tx_queue,
0149                 struct sk_buff *skb)
0150 {
0151     unsigned int min_len = tx_queue->tx_min_size;
0152     unsigned int copy_len = skb->len;
0153     struct ef4_tx_buffer *buffer;
0154     u8 *copy_buffer;
0155     int rc;
0156 
0157     EF4_BUG_ON_PARANOID(copy_len > EF4_TX_CB_SIZE);
0158 
0159     buffer = ef4_tx_queue_get_insert_buffer(tx_queue);
0160 
0161     copy_buffer = ef4_tx_get_copy_buffer(tx_queue, buffer);
0162     if (unlikely(!copy_buffer))
0163         return -ENOMEM;
0164 
0165     rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
0166     EF4_WARN_ON_PARANOID(rc);
0167     if (unlikely(copy_len < min_len)) {
0168         memset(copy_buffer + copy_len, 0, min_len - copy_len);
0169         buffer->len = min_len;
0170     } else {
0171         buffer->len = copy_len;
0172     }
0173 
0174     buffer->skb = skb;
0175     buffer->flags = EF4_TX_BUF_SKB;
0176 
0177     ++tx_queue->insert_count;
0178     return rc;
0179 }
0180 
0181 static struct ef4_tx_buffer *ef4_tx_map_chunk(struct ef4_tx_queue *tx_queue,
0182                           dma_addr_t dma_addr,
0183                           size_t len)
0184 {
0185     const struct ef4_nic_type *nic_type = tx_queue->efx->type;
0186     struct ef4_tx_buffer *buffer;
0187     unsigned int dma_len;
0188 
0189     /* Map the fragment taking account of NIC-dependent DMA limits. */
0190     do {
0191         buffer = ef4_tx_queue_get_insert_buffer(tx_queue);
0192         dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
0193 
0194         buffer->len = dma_len;
0195         buffer->dma_addr = dma_addr;
0196         buffer->flags = EF4_TX_BUF_CONT;
0197         len -= dma_len;
0198         dma_addr += dma_len;
0199         ++tx_queue->insert_count;
0200     } while (len);
0201 
0202     return buffer;
0203 }
0204 
0205 /* Map all data from an SKB for DMA and create descriptors on the queue.
0206  */
0207 static int ef4_tx_map_data(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
0208 {
0209     struct ef4_nic *efx = tx_queue->efx;
0210     struct device *dma_dev = &efx->pci_dev->dev;
0211     unsigned int frag_index, nr_frags;
0212     dma_addr_t dma_addr, unmap_addr;
0213     unsigned short dma_flags;
0214     size_t len, unmap_len;
0215 
0216     nr_frags = skb_shinfo(skb)->nr_frags;
0217     frag_index = 0;
0218 
0219     /* Map header data. */
0220     len = skb_headlen(skb);
0221     dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
0222     dma_flags = EF4_TX_BUF_MAP_SINGLE;
0223     unmap_len = len;
0224     unmap_addr = dma_addr;
0225 
0226     if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
0227         return -EIO;
0228 
0229     /* Add descriptors for each fragment. */
0230     do {
0231         struct ef4_tx_buffer *buffer;
0232         skb_frag_t *fragment;
0233 
0234         buffer = ef4_tx_map_chunk(tx_queue, dma_addr, len);
0235 
0236         /* The final descriptor for a fragment is responsible for
0237          * unmapping the whole fragment.
0238          */
0239         buffer->flags = EF4_TX_BUF_CONT | dma_flags;
0240         buffer->unmap_len = unmap_len;
0241         buffer->dma_offset = buffer->dma_addr - unmap_addr;
0242 
0243         if (frag_index >= nr_frags) {
0244             /* Store SKB details with the final buffer for
0245              * the completion.
0246              */
0247             buffer->skb = skb;
0248             buffer->flags = EF4_TX_BUF_SKB | dma_flags;
0249             return 0;
0250         }
0251 
0252         /* Move on to the next fragment. */
0253         fragment = &skb_shinfo(skb)->frags[frag_index++];
0254         len = skb_frag_size(fragment);
0255         dma_addr = skb_frag_dma_map(dma_dev, fragment,
0256                 0, len, DMA_TO_DEVICE);
0257         dma_flags = 0;
0258         unmap_len = len;
0259         unmap_addr = dma_addr;
0260 
0261         if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
0262             return -EIO;
0263     } while (1);
0264 }
0265 
0266 /* Remove buffers put into a tx_queue.  None of the buffers must have
0267  * an skb attached.
0268  */
0269 static void ef4_enqueue_unwind(struct ef4_tx_queue *tx_queue)
0270 {
0271     struct ef4_tx_buffer *buffer;
0272 
0273     /* Work backwards until we hit the original insert pointer value */
0274     while (tx_queue->insert_count != tx_queue->write_count) {
0275         --tx_queue->insert_count;
0276         buffer = __ef4_tx_queue_get_insert_buffer(tx_queue);
0277         ef4_dequeue_buffer(tx_queue, buffer, NULL, NULL);
0278     }
0279 }
0280 
0281 /*
0282  * Add a socket buffer to a TX queue
0283  *
0284  * This maps all fragments of a socket buffer for DMA and adds them to
0285  * the TX queue.  The queue's insert pointer will be incremented by
0286  * the number of fragments in the socket buffer.
0287  *
0288  * If any DMA mapping fails, any mapped fragments will be unmapped,
0289  * the queue's insert pointer will be restored to its original value.
0290  *
0291  * This function is split out from ef4_hard_start_xmit to allow the
0292  * loopback test to direct packets via specific TX queues.
0293  *
0294  * Returns NETDEV_TX_OK.
0295  * You must hold netif_tx_lock() to call this function.
0296  */
0297 netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
0298 {
0299     bool data_mapped = false;
0300     unsigned int skb_len;
0301 
0302     skb_len = skb->len;
0303     EF4_WARN_ON_PARANOID(skb_is_gso(skb));
0304 
0305     if (skb_len < tx_queue->tx_min_size ||
0306             (skb->data_len && skb_len <= EF4_TX_CB_SIZE)) {
0307         /* Pad short packets or coalesce short fragmented packets. */
0308         if (ef4_enqueue_skb_copy(tx_queue, skb))
0309             goto err;
0310         tx_queue->cb_packets++;
0311         data_mapped = true;
0312     }
0313 
0314     /* Map for DMA and create descriptors if we haven't done so already. */
0315     if (!data_mapped && (ef4_tx_map_data(tx_queue, skb)))
0316         goto err;
0317 
0318     /* Update BQL */
0319     netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
0320 
0321     /* Pass off to hardware */
0322     if (!netdev_xmit_more() || netif_xmit_stopped(tx_queue->core_txq)) {
0323         struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(tx_queue);
0324 
0325         /* There could be packets left on the partner queue if those
0326          * SKBs had skb->xmit_more set. If we do not push those they
0327          * could be left for a long time and cause a netdev watchdog.
0328          */
0329         if (txq2->xmit_more_available)
0330             ef4_nic_push_buffers(txq2);
0331 
0332         ef4_nic_push_buffers(tx_queue);
0333     } else {
0334         tx_queue->xmit_more_available = netdev_xmit_more();
0335     }
0336 
0337     tx_queue->tx_packets++;
0338 
0339     ef4_tx_maybe_stop_queue(tx_queue);
0340 
0341     return NETDEV_TX_OK;
0342 
0343 
0344 err:
0345     ef4_enqueue_unwind(tx_queue);
0346     dev_kfree_skb_any(skb);
0347     return NETDEV_TX_OK;
0348 }
0349 
0350 /* Remove packets from the TX queue
0351  *
0352  * This removes packets from the TX queue, up to and including the
0353  * specified index.
0354  */
0355 static void ef4_dequeue_buffers(struct ef4_tx_queue *tx_queue,
0356                 unsigned int index,
0357                 unsigned int *pkts_compl,
0358                 unsigned int *bytes_compl)
0359 {
0360     struct ef4_nic *efx = tx_queue->efx;
0361     unsigned int stop_index, read_ptr;
0362 
0363     stop_index = (index + 1) & tx_queue->ptr_mask;
0364     read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
0365 
0366     while (read_ptr != stop_index) {
0367         struct ef4_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
0368 
0369         if (!(buffer->flags & EF4_TX_BUF_OPTION) &&
0370             unlikely(buffer->len == 0)) {
0371             netif_err(efx, tx_err, efx->net_dev,
0372                   "TX queue %d spurious TX completion id %x\n",
0373                   tx_queue->queue, read_ptr);
0374             ef4_schedule_reset(efx, RESET_TYPE_TX_SKIP);
0375             return;
0376         }
0377 
0378         ef4_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
0379 
0380         ++tx_queue->read_count;
0381         read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
0382     }
0383 }
0384 
0385 /* Initiate a packet transmission.  We use one channel per CPU
0386  * (sharing when we have more CPUs than channels).  On Falcon, the TX
0387  * completion events will be directed back to the CPU that transmitted
0388  * the packet, which should be cache-efficient.
0389  *
0390  * Context: non-blocking.
0391  * Note that returning anything other than NETDEV_TX_OK will cause the
0392  * OS to free the skb.
0393  */
0394 netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
0395                 struct net_device *net_dev)
0396 {
0397     struct ef4_nic *efx = netdev_priv(net_dev);
0398     struct ef4_tx_queue *tx_queue;
0399     unsigned index, type;
0400 
0401     EF4_WARN_ON_PARANOID(!netif_device_present(net_dev));
0402 
0403     index = skb_get_queue_mapping(skb);
0404     type = skb->ip_summed == CHECKSUM_PARTIAL ? EF4_TXQ_TYPE_OFFLOAD : 0;
0405     if (index >= efx->n_tx_channels) {
0406         index -= efx->n_tx_channels;
0407         type |= EF4_TXQ_TYPE_HIGHPRI;
0408     }
0409     tx_queue = ef4_get_tx_queue(efx, index, type);
0410 
0411     return ef4_enqueue_skb(tx_queue, skb);
0412 }
0413 
0414 void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue)
0415 {
0416     struct ef4_nic *efx = tx_queue->efx;
0417 
0418     /* Must be inverse of queue lookup in ef4_hard_start_xmit() */
0419     tx_queue->core_txq =
0420         netdev_get_tx_queue(efx->net_dev,
0421                     tx_queue->queue / EF4_TXQ_TYPES +
0422                     ((tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
0423                      efx->n_tx_channels : 0));
0424 }
0425 
0426 int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
0427          void *type_data)
0428 {
0429     struct ef4_nic *efx = netdev_priv(net_dev);
0430     struct tc_mqprio_qopt *mqprio = type_data;
0431     struct ef4_channel *channel;
0432     struct ef4_tx_queue *tx_queue;
0433     unsigned tc, num_tc;
0434     int rc;
0435 
0436     if (type != TC_SETUP_QDISC_MQPRIO)
0437         return -EOPNOTSUPP;
0438 
0439     num_tc = mqprio->num_tc;
0440 
0441     if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC)
0442         return -EINVAL;
0443 
0444     mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
0445 
0446     if (num_tc == net_dev->num_tc)
0447         return 0;
0448 
0449     for (tc = 0; tc < num_tc; tc++) {
0450         net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
0451         net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
0452     }
0453 
0454     if (num_tc > net_dev->num_tc) {
0455         /* Initialise high-priority queues as necessary */
0456         ef4_for_each_channel(channel, efx) {
0457             ef4_for_each_possible_channel_tx_queue(tx_queue,
0458                                    channel) {
0459                 if (!(tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI))
0460                     continue;
0461                 if (!tx_queue->buffer) {
0462                     rc = ef4_probe_tx_queue(tx_queue);
0463                     if (rc)
0464                         return rc;
0465                 }
0466                 if (!tx_queue->initialised)
0467                     ef4_init_tx_queue(tx_queue);
0468                 ef4_init_tx_queue_core_txq(tx_queue);
0469             }
0470         }
0471     } else {
0472         /* Reduce number of classes before number of queues */
0473         net_dev->num_tc = num_tc;
0474     }
0475 
0476     rc = netif_set_real_num_tx_queues(net_dev,
0477                       max_t(int, num_tc, 1) *
0478                       efx->n_tx_channels);
0479     if (rc)
0480         return rc;
0481 
0482     /* Do not destroy high-priority queues when they become
0483      * unused.  We would have to flush them first, and it is
0484      * fairly difficult to flush a subset of TX queues.  Leave
0485      * it to ef4_fini_channels().
0486      */
0487 
0488     net_dev->num_tc = num_tc;
0489     return 0;
0490 }
0491 
0492 void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index)
0493 {
0494     unsigned fill_level;
0495     struct ef4_nic *efx = tx_queue->efx;
0496     struct ef4_tx_queue *txq2;
0497     unsigned int pkts_compl = 0, bytes_compl = 0;
0498 
0499     EF4_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
0500 
0501     ef4_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
0502     tx_queue->pkts_compl += pkts_compl;
0503     tx_queue->bytes_compl += bytes_compl;
0504 
0505     if (pkts_compl > 1)
0506         ++tx_queue->merge_events;
0507 
0508     /* See if we need to restart the netif queue.  This memory
0509      * barrier ensures that we write read_count (inside
0510      * ef4_dequeue_buffers()) before reading the queue status.
0511      */
0512     smp_mb();
0513     if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
0514         likely(efx->port_enabled) &&
0515         likely(netif_device_present(efx->net_dev))) {
0516         txq2 = ef4_tx_queue_partner(tx_queue);
0517         fill_level = max(tx_queue->insert_count - tx_queue->read_count,
0518                  txq2->insert_count - txq2->read_count);
0519         if (fill_level <= efx->txq_wake_thresh)
0520             netif_tx_wake_queue(tx_queue->core_txq);
0521     }
0522 
0523     /* Check whether the hardware queue is now empty */
0524     if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
0525         tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
0526         if (tx_queue->read_count == tx_queue->old_write_count) {
0527             smp_mb();
0528             tx_queue->empty_read_count =
0529                 tx_queue->read_count | EF4_EMPTY_COUNT_VALID;
0530         }
0531     }
0532 }
0533 
0534 static unsigned int ef4_tx_cb_page_count(struct ef4_tx_queue *tx_queue)
0535 {
0536     return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EF4_TX_CB_ORDER);
0537 }
0538 
0539 int ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue)
0540 {
0541     struct ef4_nic *efx = tx_queue->efx;
0542     unsigned int entries;
0543     int rc;
0544 
0545     /* Create the smallest power-of-two aligned ring */
0546     entries = max(roundup_pow_of_two(efx->txq_entries), EF4_MIN_DMAQ_SIZE);
0547     EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE);
0548     tx_queue->ptr_mask = entries - 1;
0549 
0550     netif_dbg(efx, probe, efx->net_dev,
0551           "creating TX queue %d size %#x mask %#x\n",
0552           tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
0553 
0554     /* Allocate software ring */
0555     tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
0556                    GFP_KERNEL);
0557     if (!tx_queue->buffer)
0558         return -ENOMEM;
0559 
0560     tx_queue->cb_page = kcalloc(ef4_tx_cb_page_count(tx_queue),
0561                     sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
0562     if (!tx_queue->cb_page) {
0563         rc = -ENOMEM;
0564         goto fail1;
0565     }
0566 
0567     /* Allocate hardware ring */
0568     rc = ef4_nic_probe_tx(tx_queue);
0569     if (rc)
0570         goto fail2;
0571 
0572     return 0;
0573 
0574 fail2:
0575     kfree(tx_queue->cb_page);
0576     tx_queue->cb_page = NULL;
0577 fail1:
0578     kfree(tx_queue->buffer);
0579     tx_queue->buffer = NULL;
0580     return rc;
0581 }
0582 
0583 void ef4_init_tx_queue(struct ef4_tx_queue *tx_queue)
0584 {
0585     struct ef4_nic *efx = tx_queue->efx;
0586 
0587     netif_dbg(efx, drv, efx->net_dev,
0588           "initialising TX queue %d\n", tx_queue->queue);
0589 
0590     tx_queue->insert_count = 0;
0591     tx_queue->write_count = 0;
0592     tx_queue->old_write_count = 0;
0593     tx_queue->read_count = 0;
0594     tx_queue->old_read_count = 0;
0595     tx_queue->empty_read_count = 0 | EF4_EMPTY_COUNT_VALID;
0596     tx_queue->xmit_more_available = false;
0597 
0598     /* Some older hardware requires Tx writes larger than 32. */
0599     tx_queue->tx_min_size = EF4_WORKAROUND_15592(efx) ? 33 : 0;
0600 
0601     /* Set up TX descriptor ring */
0602     ef4_nic_init_tx(tx_queue);
0603 
0604     tx_queue->initialised = true;
0605 }
0606 
0607 void ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue)
0608 {
0609     struct ef4_tx_buffer *buffer;
0610 
0611     netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
0612           "shutting down TX queue %d\n", tx_queue->queue);
0613 
0614     if (!tx_queue->buffer)
0615         return;
0616 
0617     /* Free any buffers left in the ring */
0618     while (tx_queue->read_count != tx_queue->write_count) {
0619         unsigned int pkts_compl = 0, bytes_compl = 0;
0620         buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
0621         ef4_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
0622 
0623         ++tx_queue->read_count;
0624     }
0625     tx_queue->xmit_more_available = false;
0626     netdev_tx_reset_queue(tx_queue->core_txq);
0627 }
0628 
0629 void ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue)
0630 {
0631     int i;
0632 
0633     if (!tx_queue->buffer)
0634         return;
0635 
0636     netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
0637           "destroying TX queue %d\n", tx_queue->queue);
0638     ef4_nic_remove_tx(tx_queue);
0639 
0640     if (tx_queue->cb_page) {
0641         for (i = 0; i < ef4_tx_cb_page_count(tx_queue); i++)
0642             ef4_nic_free_buffer(tx_queue->efx,
0643                         &tx_queue->cb_page[i]);
0644         kfree(tx_queue->cb_page);
0645         tx_queue->cb_page = NULL;
0646     }
0647 
0648     kfree(tx_queue->buffer);
0649     tx_queue->buffer = NULL;
0650 }