Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
0003  * driver for Linux.
0004  *
0005  * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
0006  *
0007  * This software is available to you under a choice of one of two
0008  * licenses.  You may choose to be licensed under the terms of the GNU
0009  * General Public License (GPL) Version 2, available from the file
0010  * COPYING in the main directory of this source tree, or the
0011  * OpenIB.org BSD license below:
0012  *
0013  *     Redistribution and use in source and binary forms, with or
0014  *     without modification, are permitted provided that the following
0015  *     conditions are met:
0016  *
0017  *      - Redistributions of source code must retain the above
0018  *        copyright notice, this list of conditions and the following
0019  *        disclaimer.
0020  *
0021  *      - Redistributions in binary form must reproduce the above
0022  *        copyright notice, this list of conditions and the following
0023  *        disclaimer in the documentation and/or other materials
0024  *        provided with the distribution.
0025  *
0026  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
0027  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
0028  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
0029  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
0030  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
0031  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
0032  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
0033  * SOFTWARE.
0034  */
0035 
0036 #include <linux/skbuff.h>
0037 #include <linux/netdevice.h>
0038 #include <linux/etherdevice.h>
0039 #include <linux/if_vlan.h>
0040 #include <linux/ip.h>
0041 #include <net/ipv6.h>
0042 #include <net/tcp.h>
0043 #include <linux/dma-mapping.h>
0044 #include <linux/prefetch.h>
0045 
0046 #include "t4vf_common.h"
0047 #include "t4vf_defs.h"
0048 
0049 #include "../cxgb4/t4_regs.h"
0050 #include "../cxgb4/t4_values.h"
0051 #include "../cxgb4/t4fw_api.h"
0052 #include "../cxgb4/t4_msg.h"
0053 
0054 /*
0055  * Constants ...
0056  */
0057 enum {
0058     /*
0059      * Egress Queue sizes, producer and consumer indices are all in units
0060      * of Egress Context Units bytes.  Note that as far as the hardware is
0061      * concerned, the free list is an Egress Queue (the host produces free
0062      * buffers which the hardware consumes) and free list entries are
0063      * 64-bit PCI DMA addresses.
0064      */
0065     EQ_UNIT = SGE_EQ_IDXSIZE,
0066     FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
0067     TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
0068 
0069     /*
0070      * Max number of TX descriptors we clean up at a time.  Should be
0071      * modest as freeing skbs isn't cheap and it happens while holding
0072      * locks.  We just need to free packets faster than they arrive, we
0073      * eventually catch up and keep the amortized cost reasonable.
0074      */
0075     MAX_TX_RECLAIM = 16,
0076 
0077     /*
0078      * Max number of Rx buffers we replenish at a time.  Again keep this
0079      * modest, allocating buffers isn't cheap either.
0080      */
0081     MAX_RX_REFILL = 16,
0082 
0083     /*
0084      * Period of the Rx queue check timer.  This timer is infrequent as it
0085      * has something to do only when the system experiences severe memory
0086      * shortage.
0087      */
0088     RX_QCHECK_PERIOD = (HZ / 2),
0089 
0090     /*
0091      * Period of the TX queue check timer and the maximum number of TX
0092      * descriptors to be reclaimed by the TX timer.
0093      */
0094     TX_QCHECK_PERIOD = (HZ / 2),
0095     MAX_TIMER_TX_RECLAIM = 100,
0096 
0097     /*
0098      * Suspend an Ethernet TX queue with fewer available descriptors than
0099      * this.  We always want to have room for a maximum sized packet:
0100      * inline immediate data + MAX_SKB_FRAGS. This is the same as
0101      * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS
0102      * (see that function and its helpers for a description of the
0103      * calculation).
0104      */
0105     ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
0106     ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
0107                    ((ETHTXQ_MAX_FRAGS-1) & 1) +
0108                    2),
0109     ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
0110               sizeof(struct cpl_tx_pkt_lso_core) +
0111               sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
0112     ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
0113 
0114     ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
0115 
0116     /*
0117      * Max TX descriptor space we allow for an Ethernet packet to be
0118      * inlined into a WR.  This is limited by the maximum value which
0119      * we can specify for immediate data in the firmware Ethernet TX
0120      * Work Request.
0121      */
0122     MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M,
0123 
0124     /*
0125      * Max size of a WR sent through a control TX queue.
0126      */
0127     MAX_CTRL_WR_LEN = 256,
0128 
0129     /*
0130      * Maximum amount of data which we'll ever need to inline into a
0131      * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
0132      */
0133     MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
0134               ? MAX_IMM_TX_PKT_LEN
0135               : MAX_CTRL_WR_LEN),
0136 
0137     /*
0138      * For incoming packets less than RX_COPY_THRES, we copy the data into
0139      * an skb rather than referencing the data.  We allocate enough
0140      * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
0141      * of the data (header).
0142      */
0143     RX_COPY_THRES = 256,
0144     RX_PULL_LEN = 128,
0145 
0146     /*
0147      * Main body length for sk_buffs used for RX Ethernet packets with
0148      * fragments.  Should be >= RX_PULL_LEN but possibly bigger to give
0149      * pskb_may_pull() some room.
0150      */
0151     RX_SKB_LEN = 512,
0152 };
0153 
0154 /*
0155  * Software state per TX descriptor.
0156  */
0157 struct tx_sw_desc {
0158     struct sk_buff *skb;        /* socket buffer of TX data source */
0159     struct ulptx_sgl *sgl;      /* scatter/gather list in TX Queue */
0160 };
0161 
0162 /*
0163  * Software state per RX Free List descriptor.  We keep track of the allocated
0164  * FL page, its size, and its PCI DMA address (if the page is mapped).  The FL
0165  * page size and its PCI DMA mapped state are stored in the low bits of the
0166  * PCI DMA address as per below.
0167  */
0168 struct rx_sw_desc {
0169     struct page *page;      /* Free List page buffer */
0170     dma_addr_t dma_addr;        /* PCI DMA address (if mapped) */
0171                     /*   and flags (see below) */
0172 };
0173 
0174 /*
0175  * The low bits of rx_sw_desc.dma_addr have special meaning.  Note that the
0176  * SGE also uses the low 4 bits to determine the size of the buffer.  It uses
0177  * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array.
0178  * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
0179  * bits can only contain a 0 or a 1 to indicate which size buffer we're giving
0180  * to the SGE.  Thus, our software state of "is the buffer mapped for DMA" is
0181  * maintained in an inverse sense so the hardware never sees that bit high.
0182  */
0183 enum {
0184     RX_LARGE_BUF    = 1 << 0,   /* buffer is SGE_FL_BUFFER_SIZE[1] */
0185     RX_UNMAPPED_BUF = 1 << 1,   /* buffer is not mapped */
0186 };
0187 
0188 /**
0189  *  get_buf_addr - return DMA buffer address of software descriptor
0190  *  @sdesc: pointer to the software buffer descriptor
0191  *
0192  *  Return the DMA buffer address of a software descriptor (stripping out
0193  *  our low-order flag bits).
0194  */
0195 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
0196 {
0197     return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
0198 }
0199 
0200 /**
0201  *  is_buf_mapped - is buffer mapped for DMA?
0202  *  @sdesc: pointer to the software buffer descriptor
0203  *
0204  *  Determine whether the buffer associated with a software descriptor in
0205  *  mapped for DMA or not.
0206  */
0207 static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
0208 {
0209     return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
0210 }
0211 
0212 /**
0213  *  need_skb_unmap - does the platform need unmapping of sk_buffs?
0214  *
0215  *  Returns true if the platform needs sk_buff unmapping.  The compiler
0216  *  optimizes away unnecessary code if this returns true.
0217  */
0218 static inline int need_skb_unmap(void)
0219 {
0220 #ifdef CONFIG_NEED_DMA_MAP_STATE
0221     return 1;
0222 #else
0223     return 0;
0224 #endif
0225 }
0226 
0227 /**
0228  *  txq_avail - return the number of available slots in a TX queue
0229  *  @tq: the TX queue
0230  *
0231  *  Returns the number of available descriptors in a TX queue.
0232  */
0233 static inline unsigned int txq_avail(const struct sge_txq *tq)
0234 {
0235     return tq->size - 1 - tq->in_use;
0236 }
0237 
0238 /**
0239  *  fl_cap - return the capacity of a Free List
0240  *  @fl: the Free List
0241  *
0242  *  Returns the capacity of a Free List.  The capacity is less than the
0243  *  size because an Egress Queue Index Unit worth of descriptors needs to
0244  *  be left unpopulated, otherwise the Producer and Consumer indices PIDX
0245  *  and CIDX will match and the hardware will think the FL is empty.
0246  */
0247 static inline unsigned int fl_cap(const struct sge_fl *fl)
0248 {
0249     return fl->size - FL_PER_EQ_UNIT;
0250 }
0251 
0252 /**
0253  *  fl_starving - return whether a Free List is starving.
0254  *  @adapter: pointer to the adapter
0255  *  @fl: the Free List
0256  *
0257  *  Tests specified Free List to see whether the number of buffers
0258  *  available to the hardware has falled below our "starvation"
0259  *  threshold.
0260  */
0261 static inline bool fl_starving(const struct adapter *adapter,
0262                    const struct sge_fl *fl)
0263 {
0264     const struct sge *s = &adapter->sge;
0265 
0266     return fl->avail - fl->pend_cred <= s->fl_starve_thres;
0267 }
0268 
0269 /**
0270  *  map_skb -  map an skb for DMA to the device
0271  *  @dev: the egress net device
0272  *  @skb: the packet to map
0273  *  @addr: a pointer to the base of the DMA mapping array
0274  *
0275  *  Map an skb for DMA to the device and return an array of DMA addresses.
0276  */
0277 static int map_skb(struct device *dev, const struct sk_buff *skb,
0278            dma_addr_t *addr)
0279 {
0280     const skb_frag_t *fp, *end;
0281     const struct skb_shared_info *si;
0282 
0283     *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
0284     if (dma_mapping_error(dev, *addr))
0285         goto out_err;
0286 
0287     si = skb_shinfo(skb);
0288     end = &si->frags[si->nr_frags];
0289     for (fp = si->frags; fp < end; fp++) {
0290         *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
0291                        DMA_TO_DEVICE);
0292         if (dma_mapping_error(dev, *addr))
0293             goto unwind;
0294     }
0295     return 0;
0296 
0297 unwind:
0298     while (fp-- > si->frags)
0299         dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
0300     dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
0301 
0302 out_err:
0303     return -ENOMEM;
0304 }
0305 
0306 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
0307               const struct ulptx_sgl *sgl, const struct sge_txq *tq)
0308 {
0309     const struct ulptx_sge_pair *p;
0310     unsigned int nfrags = skb_shinfo(skb)->nr_frags;
0311 
0312     if (likely(skb_headlen(skb)))
0313         dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
0314                  be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
0315     else {
0316         dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
0317                    be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
0318         nfrags--;
0319     }
0320 
0321     /*
0322      * the complexity below is because of the possibility of a wrap-around
0323      * in the middle of an SGL
0324      */
0325     for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
0326         if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
0327 unmap:
0328             dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
0329                        be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
0330             dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
0331                        be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
0332             p++;
0333         } else if ((u8 *)p == (u8 *)tq->stat) {
0334             p = (const struct ulptx_sge_pair *)tq->desc;
0335             goto unmap;
0336         } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
0337             const __be64 *addr = (const __be64 *)tq->desc;
0338 
0339             dma_unmap_page(dev, be64_to_cpu(addr[0]),
0340                        be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
0341             dma_unmap_page(dev, be64_to_cpu(addr[1]),
0342                        be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
0343             p = (const struct ulptx_sge_pair *)&addr[2];
0344         } else {
0345             const __be64 *addr = (const __be64 *)tq->desc;
0346 
0347             dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
0348                        be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
0349             dma_unmap_page(dev, be64_to_cpu(addr[0]),
0350                        be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
0351             p = (const struct ulptx_sge_pair *)&addr[1];
0352         }
0353     }
0354     if (nfrags) {
0355         __be64 addr;
0356 
0357         if ((u8 *)p == (u8 *)tq->stat)
0358             p = (const struct ulptx_sge_pair *)tq->desc;
0359         addr = ((u8 *)p + 16 <= (u8 *)tq->stat
0360             ? p->addr[0]
0361             : *(const __be64 *)tq->desc);
0362         dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
0363                    DMA_TO_DEVICE);
0364     }
0365 }
0366 
0367 /**
0368  *  free_tx_desc - reclaims TX descriptors and their buffers
0369  *  @adapter: the adapter
0370  *  @tq: the TX queue to reclaim descriptors from
0371  *  @n: the number of descriptors to reclaim
0372  *  @unmap: whether the buffers should be unmapped for DMA
0373  *
0374  *  Reclaims TX descriptors from an SGE TX queue and frees the associated
0375  *  TX buffers.  Called with the TX queue lock held.
0376  */
0377 static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
0378              unsigned int n, bool unmap)
0379 {
0380     struct tx_sw_desc *sdesc;
0381     unsigned int cidx = tq->cidx;
0382     struct device *dev = adapter->pdev_dev;
0383 
0384     const int need_unmap = need_skb_unmap() && unmap;
0385 
0386     sdesc = &tq->sdesc[cidx];
0387     while (n--) {
0388         /*
0389          * If we kept a reference to the original TX skb, we need to
0390          * unmap it from PCI DMA space (if required) and free it.
0391          */
0392         if (sdesc->skb) {
0393             if (need_unmap)
0394                 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
0395             dev_consume_skb_any(sdesc->skb);
0396             sdesc->skb = NULL;
0397         }
0398 
0399         sdesc++;
0400         if (++cidx == tq->size) {
0401             cidx = 0;
0402             sdesc = tq->sdesc;
0403         }
0404     }
0405     tq->cidx = cidx;
0406 }
0407 
0408 /*
0409  * Return the number of reclaimable descriptors in a TX queue.
0410  */
0411 static inline int reclaimable(const struct sge_txq *tq)
0412 {
0413     int hw_cidx = be16_to_cpu(tq->stat->cidx);
0414     int reclaimable = hw_cidx - tq->cidx;
0415     if (reclaimable < 0)
0416         reclaimable += tq->size;
0417     return reclaimable;
0418 }
0419 
0420 /**
0421  *  reclaim_completed_tx - reclaims completed TX descriptors
0422  *  @adapter: the adapter
0423  *  @tq: the TX queue to reclaim completed descriptors from
0424  *  @unmap: whether the buffers should be unmapped for DMA
0425  *
0426  *  Reclaims TX descriptors that the SGE has indicated it has processed,
0427  *  and frees the associated buffers if possible.  Called with the TX
0428  *  queue locked.
0429  */
0430 static inline void reclaim_completed_tx(struct adapter *adapter,
0431                     struct sge_txq *tq,
0432                     bool unmap)
0433 {
0434     int avail = reclaimable(tq);
0435 
0436     if (avail) {
0437         /*
0438          * Limit the amount of clean up work we do at a time to keep
0439          * the TX lock hold time O(1).
0440          */
0441         if (avail > MAX_TX_RECLAIM)
0442             avail = MAX_TX_RECLAIM;
0443 
0444         free_tx_desc(adapter, tq, avail, unmap);
0445         tq->in_use -= avail;
0446     }
0447 }
0448 
0449 /**
0450  *  get_buf_size - return the size of an RX Free List buffer.
0451  *  @adapter: pointer to the associated adapter
0452  *  @sdesc: pointer to the software buffer descriptor
0453  */
0454 static inline int get_buf_size(const struct adapter *adapter,
0455                    const struct rx_sw_desc *sdesc)
0456 {
0457     const struct sge *s = &adapter->sge;
0458 
0459     return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
0460         ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
0461 }
0462 
0463 /**
0464  *  free_rx_bufs - free RX buffers on an SGE Free List
0465  *  @adapter: the adapter
0466  *  @fl: the SGE Free List to free buffers from
0467  *  @n: how many buffers to free
0468  *
0469  *  Release the next @n buffers on an SGE Free List RX queue.   The
0470  *  buffers must be made inaccessible to hardware before calling this
0471  *  function.
0472  */
0473 static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
0474 {
0475     while (n--) {
0476         struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
0477 
0478         if (is_buf_mapped(sdesc))
0479             dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
0480                        get_buf_size(adapter, sdesc),
0481                        DMA_FROM_DEVICE);
0482         put_page(sdesc->page);
0483         sdesc->page = NULL;
0484         if (++fl->cidx == fl->size)
0485             fl->cidx = 0;
0486         fl->avail--;
0487     }
0488 }
0489 
0490 /**
0491  *  unmap_rx_buf - unmap the current RX buffer on an SGE Free List
0492  *  @adapter: the adapter
0493  *  @fl: the SGE Free List
0494  *
0495  *  Unmap the current buffer on an SGE Free List RX queue.   The
0496  *  buffer must be made inaccessible to HW before calling this function.
0497  *
0498  *  This is similar to @free_rx_bufs above but does not free the buffer.
0499  *  Do note that the FL still loses any further access to the buffer.
0500  *  This is used predominantly to "transfer ownership" of an FL buffer
0501  *  to another entity (typically an skb's fragment list).
0502  */
0503 static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
0504 {
0505     struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
0506 
0507     if (is_buf_mapped(sdesc))
0508         dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
0509                    get_buf_size(adapter, sdesc),
0510                    DMA_FROM_DEVICE);
0511     sdesc->page = NULL;
0512     if (++fl->cidx == fl->size)
0513         fl->cidx = 0;
0514     fl->avail--;
0515 }
0516 
0517 /**
0518  *  ring_fl_db - righ doorbell on free list
0519  *  @adapter: the adapter
0520  *  @fl: the Free List whose doorbell should be rung ...
0521  *
0522  *  Tell the Scatter Gather Engine that there are new free list entries
0523  *  available.
0524  */
0525 static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
0526 {
0527     u32 val = adapter->params.arch.sge_fl_db;
0528 
0529     /* The SGE keeps track of its Producer and Consumer Indices in terms
0530      * of Egress Queue Units so we can only tell it about integral numbers
0531      * of multiples of Free List Entries per Egress Queue Units ...
0532      */
0533     if (fl->pend_cred >= FL_PER_EQ_UNIT) {
0534         if (is_t4(adapter->params.chip))
0535             val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
0536         else
0537             val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
0538 
0539         /* Make sure all memory writes to the Free List queue are
0540          * committed before we tell the hardware about them.
0541          */
0542         wmb();
0543 
0544         /* If we don't have access to the new User Doorbell (T5+), use
0545          * the old doorbell mechanism; otherwise use the new BAR2
0546          * mechanism.
0547          */
0548         if (unlikely(fl->bar2_addr == NULL)) {
0549             t4_write_reg(adapter,
0550                      T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
0551                      QID_V(fl->cntxt_id) | val);
0552         } else {
0553             writel(val | QID_V(fl->bar2_qid),
0554                    fl->bar2_addr + SGE_UDB_KDOORBELL);
0555 
0556             /* This Write memory Barrier will force the write to
0557              * the User Doorbell area to be flushed.
0558              */
0559             wmb();
0560         }
0561         fl->pend_cred %= FL_PER_EQ_UNIT;
0562     }
0563 }
0564 
0565 /**
0566  *  set_rx_sw_desc - initialize software RX buffer descriptor
0567  *  @sdesc: pointer to the softwore RX buffer descriptor
0568  *  @page: pointer to the page data structure backing the RX buffer
0569  *  @dma_addr: PCI DMA address (possibly with low-bit flags)
0570  */
0571 static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
0572                   dma_addr_t dma_addr)
0573 {
0574     sdesc->page = page;
0575     sdesc->dma_addr = dma_addr;
0576 }
0577 
0578 /*
0579  * Support for poisoning RX buffers ...
0580  */
0581 #define POISON_BUF_VAL -1
0582 
0583 static inline void poison_buf(struct page *page, size_t sz)
0584 {
0585 #if POISON_BUF_VAL >= 0
0586     memset(page_address(page), POISON_BUF_VAL, sz);
0587 #endif
0588 }
0589 
0590 /**
0591  *  refill_fl - refill an SGE RX buffer ring
0592  *  @adapter: the adapter
0593  *  @fl: the Free List ring to refill
0594  *  @n: the number of new buffers to allocate
0595  *  @gfp: the gfp flags for the allocations
0596  *
0597  *  (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
0598  *  allocated with the supplied gfp flags.  The caller must assure that
0599  *  @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
0600  *  EGRESS QUEUE UNITS_ indicates an empty Free List!  Returns the number
0601  *  of buffers allocated.  If afterwards the queue is found critically low,
0602  *  mark it as starving in the bitmap of starving FLs.
0603  */
0604 static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
0605                   int n, gfp_t gfp)
0606 {
0607     struct sge *s = &adapter->sge;
0608     struct page *page;
0609     dma_addr_t dma_addr;
0610     unsigned int cred = fl->avail;
0611     __be64 *d = &fl->desc[fl->pidx];
0612     struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
0613 
0614     /*
0615      * Sanity: ensure that the result of adding n Free List buffers
0616      * won't result in wrapping the SGE's Producer Index around to
0617      * it's Consumer Index thereby indicating an empty Free List ...
0618      */
0619     BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
0620 
0621     gfp |= __GFP_NOWARN;
0622 
0623     /*
0624      * If we support large pages, prefer large buffers and fail over to
0625      * small pages if we can't allocate large pages to satisfy the refill.
0626      * If we don't support large pages, drop directly into the small page
0627      * allocation code.
0628      */
0629     if (s->fl_pg_order == 0)
0630         goto alloc_small_pages;
0631 
0632     while (n) {
0633         page = __dev_alloc_pages(gfp, s->fl_pg_order);
0634         if (unlikely(!page)) {
0635             /*
0636              * We've failed inour attempt to allocate a "large
0637              * page".  Fail over to the "small page" allocation
0638              * below.
0639              */
0640             fl->large_alloc_failed++;
0641             break;
0642         }
0643         poison_buf(page, PAGE_SIZE << s->fl_pg_order);
0644 
0645         dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
0646                     PAGE_SIZE << s->fl_pg_order,
0647                     DMA_FROM_DEVICE);
0648         if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
0649             /*
0650              * We've run out of DMA mapping space.  Free up the
0651              * buffer and return with what we've managed to put
0652              * into the free list.  We don't want to fail over to
0653              * the small page allocation below in this case
0654              * because DMA mapping resources are typically
0655              * critical resources once they become scarse.
0656              */
0657             __free_pages(page, s->fl_pg_order);
0658             goto out;
0659         }
0660         dma_addr |= RX_LARGE_BUF;
0661         *d++ = cpu_to_be64(dma_addr);
0662 
0663         set_rx_sw_desc(sdesc, page, dma_addr);
0664         sdesc++;
0665 
0666         fl->avail++;
0667         if (++fl->pidx == fl->size) {
0668             fl->pidx = 0;
0669             sdesc = fl->sdesc;
0670             d = fl->desc;
0671         }
0672         n--;
0673     }
0674 
0675 alloc_small_pages:
0676     while (n--) {
0677         page = __dev_alloc_page(gfp);
0678         if (unlikely(!page)) {
0679             fl->alloc_failed++;
0680             break;
0681         }
0682         poison_buf(page, PAGE_SIZE);
0683 
0684         dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
0685                        DMA_FROM_DEVICE);
0686         if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
0687             put_page(page);
0688             break;
0689         }
0690         *d++ = cpu_to_be64(dma_addr);
0691 
0692         set_rx_sw_desc(sdesc, page, dma_addr);
0693         sdesc++;
0694 
0695         fl->avail++;
0696         if (++fl->pidx == fl->size) {
0697             fl->pidx = 0;
0698             sdesc = fl->sdesc;
0699             d = fl->desc;
0700         }
0701     }
0702 
0703 out:
0704     /*
0705      * Update our accounting state to incorporate the new Free List
0706      * buffers, tell the hardware about them and return the number of
0707      * buffers which we were able to allocate.
0708      */
0709     cred = fl->avail - cred;
0710     fl->pend_cred += cred;
0711     ring_fl_db(adapter, fl);
0712 
0713     if (unlikely(fl_starving(adapter, fl))) {
0714         smp_wmb();
0715         set_bit(fl->cntxt_id, adapter->sge.starving_fl);
0716     }
0717 
0718     return cred;
0719 }
0720 
0721 /*
0722  * Refill a Free List to its capacity or the Maximum Refill Increment,
0723  * whichever is smaller ...
0724  */
0725 static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
0726 {
0727     refill_fl(adapter, fl,
0728           min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
0729           GFP_ATOMIC);
0730 }
0731 
0732 /**
0733  *  alloc_ring - allocate resources for an SGE descriptor ring
0734  *  @dev: the PCI device's core device
0735  *  @nelem: the number of descriptors
0736  *  @hwsize: the size of each hardware descriptor
0737  *  @swsize: the size of each software descriptor
0738  *  @busaddrp: the physical PCI bus address of the allocated ring
0739  *  @swringp: return address pointer for software ring
0740  *  @stat_size: extra space in hardware ring for status information
0741  *
0742  *  Allocates resources for an SGE descriptor ring, such as TX queues,
0743  *  free buffer lists, response queues, etc.  Each SGE ring requires
0744  *  space for its hardware descriptors plus, optionally, space for software
0745  *  state associated with each hardware entry (the metadata).  The function
0746  *  returns three values: the virtual address for the hardware ring (the
0747  *  return value of the function), the PCI bus address of the hardware
0748  *  ring (in *busaddrp), and the address of the software ring (in swringp).
0749  *  Both the hardware and software rings are returned zeroed out.
0750  */
0751 static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
0752             size_t swsize, dma_addr_t *busaddrp, void *swringp,
0753             size_t stat_size)
0754 {
0755     /*
0756      * Allocate the hardware ring and PCI DMA bus address space for said.
0757      */
0758     size_t hwlen = nelem * hwsize + stat_size;
0759     void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
0760 
0761     if (!hwring)
0762         return NULL;
0763 
0764     /*
0765      * If the caller wants a software ring, allocate it and return a
0766      * pointer to it in *swringp.
0767      */
0768     BUG_ON((swsize != 0) != (swringp != NULL));
0769     if (swsize) {
0770         void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
0771 
0772         if (!swring) {
0773             dma_free_coherent(dev, hwlen, hwring, *busaddrp);
0774             return NULL;
0775         }
0776         *(void **)swringp = swring;
0777     }
0778 
0779     return hwring;
0780 }
0781 
0782 /**
0783  *  sgl_len - calculates the size of an SGL of the given capacity
0784  *  @n: the number of SGL entries
0785  *
0786  *  Calculates the number of flits (8-byte units) needed for a Direct
0787  *  Scatter/Gather List that can hold the given number of entries.
0788  */
0789 static inline unsigned int sgl_len(unsigned int n)
0790 {
0791     /*
0792      * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
0793      * addresses.  The DSGL Work Request starts off with a 32-bit DSGL
0794      * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
0795      * repeated sequences of { Length[i], Length[i+1], Address[i],
0796      * Address[i+1] } (this ensures that all addresses are on 64-bit
0797      * boundaries).  If N is even, then Length[N+1] should be set to 0 and
0798      * Address[N+1] is omitted.
0799      *
0800      * The following calculation incorporates all of the above.  It's
0801      * somewhat hard to follow but, briefly: the "+2" accounts for the
0802      * first two flits which include the DSGL header, Length0 and
0803      * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
0804      * flits for every pair of the remaining N) +1 if (n-1) is odd; and
0805      * finally the "+((n-1)&1)" adds the one remaining flit needed if
0806      * (n-1) is odd ...
0807      */
0808     n--;
0809     return (3 * n) / 2 + (n & 1) + 2;
0810 }
0811 
0812 /**
0813  *  flits_to_desc - returns the num of TX descriptors for the given flits
0814  *  @flits: the number of flits
0815  *
0816  *  Returns the number of TX descriptors needed for the supplied number
0817  *  of flits.
0818  */
0819 static inline unsigned int flits_to_desc(unsigned int flits)
0820 {
0821     BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
0822     return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
0823 }
0824 
0825 /**
0826  *  is_eth_imm - can an Ethernet packet be sent as immediate data?
0827  *  @skb: the packet
0828  *
0829  *  Returns whether an Ethernet packet is small enough to fit completely as
0830  *  immediate data.
0831  */
0832 static inline int is_eth_imm(const struct sk_buff *skb)
0833 {
0834     /*
0835      * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
0836      * which does not accommodate immediate data.  We could dike out all
0837      * of the support code for immediate data but that would tie our hands
0838      * too much if we ever want to enhace the firmware.  It would also
0839      * create more differences between the PF and VF Drivers.
0840      */
0841     return false;
0842 }
0843 
0844 /**
0845  *  calc_tx_flits - calculate the number of flits for a packet TX WR
0846  *  @skb: the packet
0847  *
0848  *  Returns the number of flits needed for a TX Work Request for the
0849  *  given Ethernet packet, including the needed WR and CPL headers.
0850  */
0851 static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
0852 {
0853     unsigned int flits;
0854 
0855     /*
0856      * If the skb is small enough, we can pump it out as a work request
0857      * with only immediate data.  In that case we just have to have the
0858      * TX Packet header plus the skb data in the Work Request.
0859      */
0860     if (is_eth_imm(skb))
0861         return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
0862                     sizeof(__be64));
0863 
0864     /*
0865      * Otherwise, we're going to have to construct a Scatter gather list
0866      * of the skb body and fragments.  We also include the flits necessary
0867      * for the TX Packet Work Request and CPL.  We always have a firmware
0868      * Write Header (incorporated as part of the cpl_tx_pkt_lso and
0869      * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
0870      * message or, if we're doing a Large Send Offload, an LSO CPL message
0871      * with an embedded TX Packet Write CPL message.
0872      */
0873     flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
0874     if (skb_shinfo(skb)->gso_size)
0875         flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
0876               sizeof(struct cpl_tx_pkt_lso_core) +
0877               sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
0878     else
0879         flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
0880               sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
0881     return flits;
0882 }
0883 
0884 /**
0885  *  write_sgl - populate a Scatter/Gather List for a packet
0886  *  @skb: the packet
0887  *  @tq: the TX queue we are writing into
0888  *  @sgl: starting location for writing the SGL
0889  *  @end: points right after the end of the SGL
0890  *  @start: start offset into skb main-body data to include in the SGL
0891  *  @addr: the list of DMA bus addresses for the SGL elements
0892  *
0893  *  Generates a Scatter/Gather List for the buffers that make up a packet.
0894  *  The caller must provide adequate space for the SGL that will be written.
0895  *  The SGL includes all of the packet's page fragments and the data in its
0896  *  main body except for the first @start bytes.  @pos must be 16-byte
0897  *  aligned and within a TX descriptor with available space.  @end points
0898  *  write after the end of the SGL but does not account for any potential
0899  *  wrap around, i.e., @end > @tq->stat.
0900  */
0901 static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
0902               struct ulptx_sgl *sgl, u64 *end, unsigned int start,
0903               const dma_addr_t *addr)
0904 {
0905     unsigned int i, len;
0906     struct ulptx_sge_pair *to;
0907     const struct skb_shared_info *si = skb_shinfo(skb);
0908     unsigned int nfrags = si->nr_frags;
0909     struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
0910 
0911     len = skb_headlen(skb) - start;
0912     if (likely(len)) {
0913         sgl->len0 = htonl(len);
0914         sgl->addr0 = cpu_to_be64(addr[0] + start);
0915         nfrags++;
0916     } else {
0917         sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
0918         sgl->addr0 = cpu_to_be64(addr[1]);
0919     }
0920 
0921     sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
0922                   ULPTX_NSGE_V(nfrags));
0923     if (likely(--nfrags == 0))
0924         return;
0925     /*
0926      * Most of the complexity below deals with the possibility we hit the
0927      * end of the queue in the middle of writing the SGL.  For this case
0928      * only we create the SGL in a temporary buffer and then copy it.
0929      */
0930     to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
0931 
0932     for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
0933         to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
0934         to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
0935         to->addr[0] = cpu_to_be64(addr[i]);
0936         to->addr[1] = cpu_to_be64(addr[++i]);
0937     }
0938     if (nfrags) {
0939         to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
0940         to->len[1] = cpu_to_be32(0);
0941         to->addr[0] = cpu_to_be64(addr[i + 1]);
0942     }
0943     if (unlikely((u8 *)end > (u8 *)tq->stat)) {
0944         unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
0945 
0946         if (likely(part0))
0947             memcpy(sgl->sge, buf, part0);
0948         part1 = (u8 *)end - (u8 *)tq->stat;
0949         memcpy(tq->desc, (u8 *)buf + part0, part1);
0950         end = (void *)tq->desc + part1;
0951     }
0952     if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
0953         *end = 0;
0954 }
0955 
0956 /**
0957  *  ring_tx_db - check and potentially ring a TX queue's doorbell
0958  *  @adapter: the adapter
0959  *  @tq: the TX queue
0960  *  @n: number of new descriptors to give to HW
0961  *
0962  *  Ring the doorbel for a TX queue.
0963  */
0964 static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
0965                   int n)
0966 {
0967     /* Make sure that all writes to the TX Descriptors are committed
0968      * before we tell the hardware about them.
0969      */
0970     wmb();
0971 
0972     /* If we don't have access to the new User Doorbell (T5+), use the old
0973      * doorbell mechanism; otherwise use the new BAR2 mechanism.
0974      */
0975     if (unlikely(tq->bar2_addr == NULL)) {
0976         u32 val = PIDX_V(n);
0977 
0978         t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
0979                  QID_V(tq->cntxt_id) | val);
0980     } else {
0981         u32 val = PIDX_T5_V(n);
0982 
0983         /* T4 and later chips share the same PIDX field offset within
0984          * the doorbell, but T5 and later shrank the field in order to
0985          * gain a bit for Doorbell Priority.  The field was absurdly
0986          * large in the first place (14 bits) so we just use the T5
0987          * and later limits and warn if a Queue ID is too large.
0988          */
0989         WARN_ON(val & DBPRIO_F);
0990 
0991         /* If we're only writing a single Egress Unit and the BAR2
0992          * Queue ID is 0, we can use the Write Combining Doorbell
0993          * Gather Buffer; otherwise we use the simple doorbell.
0994          */
0995         if (n == 1 && tq->bar2_qid == 0) {
0996             unsigned int index = (tq->pidx
0997                           ? (tq->pidx - 1)
0998                           : (tq->size - 1));
0999             __be64 *src = (__be64 *)&tq->desc[index];
1000             __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
1001                              SGE_UDB_WCDOORBELL);
1002             unsigned int count = EQ_UNIT / sizeof(__be64);
1003 
1004             /* Copy the TX Descriptor in a tight loop in order to
1005              * try to get it to the adapter in a single Write
1006              * Combined transfer on the PCI-E Bus.  If the Write
1007              * Combine fails (say because of an interrupt, etc.)
1008              * the hardware will simply take the last write as a
1009              * simple doorbell write with a PIDX Increment of 1
1010              * and will fetch the TX Descriptor from memory via
1011              * DMA.
1012              */
1013             while (count) {
1014                 /* the (__force u64) is because the compiler
1015                  * doesn't understand the endian swizzling
1016                  * going on
1017                  */
1018                 writeq((__force u64)*src, dst);
1019                 src++;
1020                 dst++;
1021                 count--;
1022             }
1023         } else
1024             writel(val | QID_V(tq->bar2_qid),
1025                    tq->bar2_addr + SGE_UDB_KDOORBELL);
1026 
1027         /* This Write Memory Barrier will force the write to the User
1028          * Doorbell area to be flushed.  This is needed to prevent
1029          * writes on different CPUs for the same queue from hitting
1030          * the adapter out of order.  This is required when some Work
1031          * Requests take the Write Combine Gather Buffer path (user
1032          * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
1033          * take the traditional path where we simply increment the
1034          * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
1035          * hardware DMA read the actual Work Request.
1036          */
1037         wmb();
1038     }
1039 }
1040 
1041 /**
1042  *  inline_tx_skb - inline a packet's data into TX descriptors
1043  *  @skb: the packet
1044  *  @tq: the TX queue where the packet will be inlined
1045  *  @pos: starting position in the TX queue to inline the packet
1046  *
1047  *  Inline a packet's contents directly into TX descriptors, starting at
1048  *  the given position within the TX DMA ring.
1049  *  Most of the complexity of this operation is dealing with wrap arounds
1050  *  in the middle of the packet we want to inline.
1051  */
1052 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
1053               void *pos)
1054 {
1055     u64 *p;
1056     int left = (void *)tq->stat - pos;
1057 
1058     if (likely(skb->len <= left)) {
1059         if (likely(!skb->data_len))
1060             skb_copy_from_linear_data(skb, pos, skb->len);
1061         else
1062             skb_copy_bits(skb, 0, pos, skb->len);
1063         pos += skb->len;
1064     } else {
1065         skb_copy_bits(skb, 0, pos, left);
1066         skb_copy_bits(skb, left, tq->desc, skb->len - left);
1067         pos = (void *)tq->desc + (skb->len - left);
1068     }
1069 
1070     /* 0-pad to multiple of 16 */
1071     p = PTR_ALIGN(pos, 8);
1072     if ((uintptr_t)p & 8)
1073         *p = 0;
1074 }
1075 
1076 /*
1077  * Figure out what HW csum a packet wants and return the appropriate control
1078  * bits.
1079  */
1080 static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1081 {
1082     int csum_type;
1083     const struct iphdr *iph = ip_hdr(skb);
1084 
1085     if (iph->version == 4) {
1086         if (iph->protocol == IPPROTO_TCP)
1087             csum_type = TX_CSUM_TCPIP;
1088         else if (iph->protocol == IPPROTO_UDP)
1089             csum_type = TX_CSUM_UDPIP;
1090         else {
1091 nocsum:
1092             /*
1093              * unknown protocol, disable HW csum
1094              * and hope a bad packet is detected
1095              */
1096             return TXPKT_L4CSUM_DIS_F;
1097         }
1098     } else {
1099         /*
1100          * this doesn't work with extension headers
1101          */
1102         const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1103 
1104         if (ip6h->nexthdr == IPPROTO_TCP)
1105             csum_type = TX_CSUM_TCPIP6;
1106         else if (ip6h->nexthdr == IPPROTO_UDP)
1107             csum_type = TX_CSUM_UDPIP6;
1108         else
1109             goto nocsum;
1110     }
1111 
1112     if (likely(csum_type >= TX_CSUM_TCPIP)) {
1113         u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
1114         int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1115 
1116         if (chip <= CHELSIO_T5)
1117             hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1118         else
1119             hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1120         return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1121     } else {
1122         int start = skb_transport_offset(skb);
1123 
1124         return TXPKT_CSUM_TYPE_V(csum_type) |
1125             TXPKT_CSUM_START_V(start) |
1126             TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1127     }
1128 }
1129 
1130 /*
1131  * Stop an Ethernet TX queue and record that state change.
1132  */
1133 static void txq_stop(struct sge_eth_txq *txq)
1134 {
1135     netif_tx_stop_queue(txq->txq);
1136     txq->q.stops++;
1137 }
1138 
1139 /*
1140  * Advance our software state for a TX queue by adding n in use descriptors.
1141  */
1142 static inline void txq_advance(struct sge_txq *tq, unsigned int n)
1143 {
1144     tq->in_use += n;
1145     tq->pidx += n;
1146     if (tq->pidx >= tq->size)
1147         tq->pidx -= tq->size;
1148 }
1149 
1150 /**
1151  *  t4vf_eth_xmit - add a packet to an Ethernet TX queue
1152  *  @skb: the packet
1153  *  @dev: the egress net device
1154  *
1155  *  Add a packet to an SGE Ethernet TX queue.  Runs with softirqs disabled.
1156  */
1157 netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1158 {
1159     u32 wr_mid;
1160     u64 cntrl, *end;
1161     int qidx, credits, max_pkt_len;
1162     unsigned int flits, ndesc;
1163     struct adapter *adapter;
1164     struct sge_eth_txq *txq;
1165     const struct port_info *pi;
1166     struct fw_eth_tx_pkt_vm_wr *wr;
1167     struct cpl_tx_pkt_core *cpl;
1168     const struct skb_shared_info *ssi;
1169     dma_addr_t addr[MAX_SKB_FRAGS + 1];
1170     const size_t fw_hdr_copy_len = sizeof(wr->firmware);
1171 
1172     /*
1173      * The chip minimum packet length is 10 octets but the firmware
1174      * command that we are using requires that we copy the Ethernet header
1175      * (including the VLAN tag) into the header so we reject anything
1176      * smaller than that ...
1177      */
1178     if (unlikely(skb->len < fw_hdr_copy_len))
1179         goto out_free;
1180 
1181     /* Discard the packet if the length is greater than mtu */
1182     max_pkt_len = ETH_HLEN + dev->mtu;
1183     if (skb_vlan_tagged(skb))
1184         max_pkt_len += VLAN_HLEN;
1185     if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1186         goto out_free;
1187 
1188     /*
1189      * Figure out which TX Queue we're going to use.
1190      */
1191     pi = netdev_priv(dev);
1192     adapter = pi->adapter;
1193     qidx = skb_get_queue_mapping(skb);
1194     BUG_ON(qidx >= pi->nqsets);
1195     txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1196 
1197     if (pi->vlan_id && !skb_vlan_tag_present(skb))
1198         __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1199                        pi->vlan_id);
1200 
1201     /*
1202      * Take this opportunity to reclaim any TX Descriptors whose DMA
1203      * transfers have completed.
1204      */
1205     reclaim_completed_tx(adapter, &txq->q, true);
1206 
1207     /*
1208      * Calculate the number of flits and TX Descriptors we're going to
1209      * need along with how many TX Descriptors will be left over after
1210      * we inject our Work Request.
1211      */
1212     flits = calc_tx_flits(skb);
1213     ndesc = flits_to_desc(flits);
1214     credits = txq_avail(&txq->q) - ndesc;
1215 
1216     if (unlikely(credits < 0)) {
1217         /*
1218          * Not enough room for this packet's Work Request.  Stop the
1219          * TX Queue and return a "busy" condition.  The queue will get
1220          * started later on when the firmware informs us that space
1221          * has opened up.
1222          */
1223         txq_stop(txq);
1224         dev_err(adapter->pdev_dev,
1225             "%s: TX ring %u full while queue awake!\n",
1226             dev->name, qidx);
1227         return NETDEV_TX_BUSY;
1228     }
1229 
1230     if (!is_eth_imm(skb) &&
1231         unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1232         /*
1233          * We need to map the skb into PCI DMA space (because it can't
1234          * be in-lined directly into the Work Request) and the mapping
1235          * operation failed.  Record the error and drop the packet.
1236          */
1237         txq->mapping_err++;
1238         goto out_free;
1239     }
1240 
1241     wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1242     if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1243         /*
1244          * After we're done injecting the Work Request for this
1245          * packet, we'll be below our "stop threshold" so stop the TX
1246          * Queue now and schedule a request for an SGE Egress Queue
1247          * Update message.  The queue will get started later on when
1248          * the firmware processes this Work Request and sends us an
1249          * Egress Queue Status Update message indicating that space
1250          * has opened up.
1251          */
1252         txq_stop(txq);
1253         wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1254     }
1255 
1256     /*
1257      * Start filling in our Work Request.  Note that we do _not_ handle
1258      * the WR Header wrapping around the TX Descriptor Ring.  If our
1259      * maximum header size ever exceeds one TX Descriptor, we'll need to
1260      * do something else here.
1261      */
1262     BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1263     wr = (void *)&txq->q.desc[txq->q.pidx];
1264     wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1265     wr->r3[0] = cpu_to_be32(0);
1266     wr->r3[1] = cpu_to_be32(0);
1267     skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
1268     end = (u64 *)wr + flits;
1269 
1270     /*
1271      * If this is a Large Send Offload packet we'll put in an LSO CPL
1272      * message with an encapsulated TX Packet CPL message.  Otherwise we
1273      * just use a TX Packet CPL message.
1274      */
1275     ssi = skb_shinfo(skb);
1276     if (ssi->gso_size) {
1277         struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1278         bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1279         int l3hdr_len = skb_network_header_len(skb);
1280         int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1281 
1282         wr->op_immdlen =
1283             cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1284                     FW_WR_IMMDLEN_V(sizeof(*lso) +
1285                             sizeof(*cpl)));
1286         /*
1287          * Fill in the LSO CPL message.
1288          */
1289         lso->lso_ctrl =
1290             cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1291                     LSO_FIRST_SLICE_F |
1292                     LSO_LAST_SLICE_F |
1293                     LSO_IPV6_V(v6) |
1294                     LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1295                     LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1296                     LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1297         lso->ipid_ofst = cpu_to_be16(0);
1298         lso->mss = cpu_to_be16(ssi->gso_size);
1299         lso->seqno_offset = cpu_to_be32(0);
1300         if (is_t4(adapter->params.chip))
1301             lso->len = cpu_to_be32(skb->len);
1302         else
1303             lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1304 
1305         /*
1306          * Set up TX Packet CPL pointer, control word and perform
1307          * accounting.
1308          */
1309         cpl = (void *)(lso + 1);
1310 
1311         if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1312             cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1313         else
1314             cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1315 
1316         cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1317                        TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1318              TXPKT_IPHDR_LEN_V(l3hdr_len);
1319         txq->tso++;
1320         txq->tx_cso += ssi->gso_segs;
1321     } else {
1322         int len;
1323 
1324         len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1325         wr->op_immdlen =
1326             cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1327                     FW_WR_IMMDLEN_V(len));
1328 
1329         /*
1330          * Set up TX Packet CPL pointer, control word and perform
1331          * accounting.
1332          */
1333         cpl = (void *)(wr + 1);
1334         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1335             cntrl = hwcsum(adapter->params.chip, skb) |
1336                 TXPKT_IPCSUM_DIS_F;
1337             txq->tx_cso++;
1338         } else
1339             cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1340     }
1341 
1342     /*
1343      * If there's a VLAN tag present, add that to the list of things to
1344      * do in this Work Request.
1345      */
1346     if (skb_vlan_tag_present(skb)) {
1347         txq->vlan_ins++;
1348         cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1349     }
1350 
1351     /*
1352      * Fill in the TX Packet CPL message header.
1353      */
1354     cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
1355                  TXPKT_INTF_V(pi->port_id) |
1356                  TXPKT_PF_V(0));
1357     cpl->pack = cpu_to_be16(0);
1358     cpl->len = cpu_to_be16(skb->len);
1359     cpl->ctrl1 = cpu_to_be64(cntrl);
1360 
1361 #ifdef T4_TRACE
1362     T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
1363           "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
1364           ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
1365 #endif
1366 
1367     /*
1368      * Fill in the body of the TX Packet CPL message with either in-lined
1369      * data or a Scatter/Gather List.
1370      */
1371     if (is_eth_imm(skb)) {
1372         /*
1373          * In-line the packet's data and free the skb since we don't
1374          * need it any longer.
1375          */
1376         inline_tx_skb(skb, &txq->q, cpl + 1);
1377         dev_consume_skb_any(skb);
1378     } else {
1379         /*
1380          * Write the skb's Scatter/Gather list into the TX Packet CPL
1381          * message and retain a pointer to the skb so we can free it
1382          * later when its DMA completes.  (We store the skb pointer
1383          * in the Software Descriptor corresponding to the last TX
1384          * Descriptor used by the Work Request.)
1385          *
1386          * The retained skb will be freed when the corresponding TX
1387          * Descriptors are reclaimed after their DMAs complete.
1388          * However, this could take quite a while since, in general,
1389          * the hardware is set up to be lazy about sending DMA
1390          * completion notifications to us and we mostly perform TX
1391          * reclaims in the transmit routine.
1392          *
1393          * This is good for performamce but means that we rely on new
1394          * TX packets arriving to run the destructors of completed
1395          * packets, which open up space in their sockets' send queues.
1396          * Sometimes we do not get such new packets causing TX to
1397          * stall.  A single UDP transmitter is a good example of this
1398          * situation.  We have a clean up timer that periodically
1399          * reclaims completed packets but it doesn't run often enough
1400          * (nor do we want it to) to prevent lengthy stalls.  A
1401          * solution to this problem is to run the destructor early,
1402          * after the packet is queued but before it's DMAd.  A con is
1403          * that we lie to socket memory accounting, but the amount of
1404          * extra memory is reasonable (limited by the number of TX
1405          * descriptors), the packets do actually get freed quickly by
1406          * new packets almost always, and for protocols like TCP that
1407          * wait for acks to really free up the data the extra memory
1408          * is even less.  On the positive side we run the destructors
1409          * on the sending CPU rather than on a potentially different
1410          * completing CPU, usually a good thing.
1411          *
1412          * Run the destructor before telling the DMA engine about the
1413          * packet to make sure it doesn't complete and get freed
1414          * prematurely.
1415          */
1416         struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1417         struct sge_txq *tq = &txq->q;
1418         int last_desc;
1419 
1420         /*
1421          * If the Work Request header was an exact multiple of our TX
1422          * Descriptor length, then it's possible that the starting SGL
1423          * pointer lines up exactly with the end of our TX Descriptor
1424          * ring.  If that's the case, wrap around to the beginning
1425          * here ...
1426          */
1427         if (unlikely((void *)sgl == (void *)tq->stat)) {
1428             sgl = (void *)tq->desc;
1429             end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
1430         }
1431 
1432         write_sgl(skb, tq, sgl, end, 0, addr);
1433         skb_orphan(skb);
1434 
1435         last_desc = tq->pidx + ndesc - 1;
1436         if (last_desc >= tq->size)
1437             last_desc -= tq->size;
1438         tq->sdesc[last_desc].skb = skb;
1439         tq->sdesc[last_desc].sgl = sgl;
1440     }
1441 
1442     /*
1443      * Advance our internal TX Queue state, tell the hardware about
1444      * the new TX descriptors and return success.
1445      */
1446     txq_advance(&txq->q, ndesc);
1447     netif_trans_update(dev);
1448     ring_tx_db(adapter, &txq->q, ndesc);
1449     return NETDEV_TX_OK;
1450 
1451 out_free:
1452     /*
1453      * An error of some sort happened.  Free the TX skb and tell the
1454      * OS that we've "dealt" with the packet ...
1455      */
1456     dev_kfree_skb_any(skb);
1457     return NETDEV_TX_OK;
1458 }
1459 
1460 /**
1461  *  copy_frags - copy fragments from gather list into skb_shared_info
1462  *  @skb: destination skb
1463  *  @gl: source internal packet gather list
1464  *  @offset: packet start offset in first page
1465  *
1466  *  Copy an internal packet gather list into a Linux skb_shared_info
1467  *  structure.
1468  */
1469 static inline void copy_frags(struct sk_buff *skb,
1470                   const struct pkt_gl *gl,
1471                   unsigned int offset)
1472 {
1473     int i;
1474 
1475     /* usually there's just one frag */
1476     __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1477                  gl->frags[0].offset + offset,
1478                  gl->frags[0].size - offset);
1479     skb_shinfo(skb)->nr_frags = gl->nfrags;
1480     for (i = 1; i < gl->nfrags; i++)
1481         __skb_fill_page_desc(skb, i, gl->frags[i].page,
1482                      gl->frags[i].offset,
1483                      gl->frags[i].size);
1484 
1485     /* get a reference to the last page, we don't own it */
1486     get_page(gl->frags[gl->nfrags - 1].page);
1487 }
1488 
1489 /**
1490  *  t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1491  *  @gl: the gather list
1492  *  @skb_len: size of sk_buff main body if it carries fragments
1493  *  @pull_len: amount of data to move to the sk_buff's main body
1494  *
1495  *  Builds an sk_buff from the given packet gather list.  Returns the
1496  *  sk_buff or %NULL if sk_buff allocation failed.
1497  */
1498 static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1499                      unsigned int skb_len,
1500                      unsigned int pull_len)
1501 {
1502     struct sk_buff *skb;
1503 
1504     /*
1505      * If the ingress packet is small enough, allocate an skb large enough
1506      * for all of the data and copy it inline.  Otherwise, allocate an skb
1507      * with enough room to pull in the header and reference the rest of
1508      * the data via the skb fragment list.
1509      *
1510      * Below we rely on RX_COPY_THRES being less than the smallest Rx
1511      * buff!  size, which is expected since buffers are at least
1512      * PAGE_SIZEd.  In this case packets up to RX_COPY_THRES have only one
1513      * fragment.
1514      */
1515     if (gl->tot_len <= RX_COPY_THRES) {
1516         /* small packets have only one fragment */
1517         skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1518         if (unlikely(!skb))
1519             goto out;
1520         __skb_put(skb, gl->tot_len);
1521         skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1522     } else {
1523         skb = alloc_skb(skb_len, GFP_ATOMIC);
1524         if (unlikely(!skb))
1525             goto out;
1526         __skb_put(skb, pull_len);
1527         skb_copy_to_linear_data(skb, gl->va, pull_len);
1528 
1529         copy_frags(skb, gl, pull_len);
1530         skb->len = gl->tot_len;
1531         skb->data_len = skb->len - pull_len;
1532         skb->truesize += skb->data_len;
1533     }
1534 
1535 out:
1536     return skb;
1537 }
1538 
1539 /**
1540  *  t4vf_pktgl_free - free a packet gather list
1541  *  @gl: the gather list
1542  *
1543  *  Releases the pages of a packet gather list.  We do not own the last
1544  *  page on the list and do not free it.
1545  */
1546 static void t4vf_pktgl_free(const struct pkt_gl *gl)
1547 {
1548     int frag;
1549 
1550     frag = gl->nfrags - 1;
1551     while (frag--)
1552         put_page(gl->frags[frag].page);
1553 }
1554 
1555 /**
1556  *  do_gro - perform Generic Receive Offload ingress packet processing
1557  *  @rxq: ingress RX Ethernet Queue
1558  *  @gl: gather list for ingress packet
1559  *  @pkt: CPL header for last packet fragment
1560  *
1561  *  Perform Generic Receive Offload (GRO) ingress packet processing.
1562  *  We use the standard Linux GRO interfaces for this.
1563  */
1564 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1565            const struct cpl_rx_pkt *pkt)
1566 {
1567     struct adapter *adapter = rxq->rspq.adapter;
1568     struct sge *s = &adapter->sge;
1569     struct port_info *pi;
1570     int ret;
1571     struct sk_buff *skb;
1572 
1573     skb = napi_get_frags(&rxq->rspq.napi);
1574     if (unlikely(!skb)) {
1575         t4vf_pktgl_free(gl);
1576         rxq->stats.rx_drops++;
1577         return;
1578     }
1579 
1580     copy_frags(skb, gl, s->pktshift);
1581     skb->len = gl->tot_len - s->pktshift;
1582     skb->data_len = skb->len;
1583     skb->truesize += skb->data_len;
1584     skb->ip_summed = CHECKSUM_UNNECESSARY;
1585     skb_record_rx_queue(skb, rxq->rspq.idx);
1586     pi = netdev_priv(skb->dev);
1587 
1588     if (pkt->vlan_ex && !pi->vlan_id) {
1589         __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1590                     be16_to_cpu(pkt->vlan));
1591         rxq->stats.vlan_ex++;
1592     }
1593     ret = napi_gro_frags(&rxq->rspq.napi);
1594 
1595     if (ret == GRO_HELD)
1596         rxq->stats.lro_pkts++;
1597     else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1598         rxq->stats.lro_merged++;
1599     rxq->stats.pkts++;
1600     rxq->stats.rx_cso++;
1601 }
1602 
1603 /**
1604  *  t4vf_ethrx_handler - process an ingress ethernet packet
1605  *  @rspq: the response queue that received the packet
1606  *  @rsp: the response queue descriptor holding the RX_PKT message
1607  *  @gl: the gather list of packet fragments
1608  *
1609  *  Process an ingress ethernet packet and deliver it to the stack.
1610  */
1611 int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1612                const struct pkt_gl *gl)
1613 {
1614     struct sk_buff *skb;
1615     const struct cpl_rx_pkt *pkt = (void *)rsp;
1616     bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1617                (rspq->netdev->features & NETIF_F_RXCSUM);
1618     struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1619     struct adapter *adapter = rspq->adapter;
1620     struct sge *s = &adapter->sge;
1621     struct port_info *pi;
1622 
1623     /*
1624      * If this is a good TCP packet and we have Generic Receive Offload
1625      * enabled, handle the packet in the GRO path.
1626      */
1627     if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
1628         (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1629         !pkt->ip_frag) {
1630         do_gro(rxq, gl, pkt);
1631         return 0;
1632     }
1633 
1634     /*
1635      * Convert the Packet Gather List into an skb.
1636      */
1637     skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1638     if (unlikely(!skb)) {
1639         t4vf_pktgl_free(gl);
1640         rxq->stats.rx_drops++;
1641         return 0;
1642     }
1643     __skb_pull(skb, s->pktshift);
1644     skb->protocol = eth_type_trans(skb, rspq->netdev);
1645     skb_record_rx_queue(skb, rspq->idx);
1646     pi = netdev_priv(skb->dev);
1647     rxq->stats.pkts++;
1648 
1649     if (csum_ok && !pkt->err_vec &&
1650         (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
1651         if (!pkt->ip_frag) {
1652             skb->ip_summed = CHECKSUM_UNNECESSARY;
1653             rxq->stats.rx_cso++;
1654         } else if (pkt->l2info & htonl(RXF_IP_F)) {
1655             __sum16 c = (__force __sum16)pkt->csum;
1656             skb->csum = csum_unfold(c);
1657             skb->ip_summed = CHECKSUM_COMPLETE;
1658             rxq->stats.rx_cso++;
1659         }
1660     } else
1661         skb_checksum_none_assert(skb);
1662 
1663     if (pkt->vlan_ex && !pi->vlan_id) {
1664         rxq->stats.vlan_ex++;
1665         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1666                        be16_to_cpu(pkt->vlan));
1667     }
1668 
1669     netif_receive_skb(skb);
1670 
1671     return 0;
1672 }
1673 
1674 /**
1675  *  is_new_response - check if a response is newly written
1676  *  @rc: the response control descriptor
1677  *  @rspq: the response queue
1678  *
1679  *  Returns true if a response descriptor contains a yet unprocessed
1680  *  response.
1681  */
1682 static inline bool is_new_response(const struct rsp_ctrl *rc,
1683                    const struct sge_rspq *rspq)
1684 {
1685     return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
1686 }
1687 
1688 /**
1689  *  restore_rx_bufs - put back a packet's RX buffers
1690  *  @gl: the packet gather list
1691  *  @fl: the SGE Free List
1692  *  @frags: how many fragments in @si
1693  *
1694  *  Called when we find out that the current packet, @si, can't be
1695  *  processed right away for some reason.  This is a very rare event and
1696  *  there's no effort to make this suspension/resumption process
1697  *  particularly efficient.
1698  *
1699  *  We implement the suspension by putting all of the RX buffers associated
1700  *  with the current packet back on the original Free List.  The buffers
1701  *  have already been unmapped and are left unmapped, we mark them as
1702  *  unmapped in order to prevent further unmapping attempts.  (Effectively
1703  *  this function undoes the series of @unmap_rx_buf calls which were done
1704  *  to create the current packet's gather list.)  This leaves us ready to
1705  *  restart processing of the packet the next time we start processing the
1706  *  RX Queue ...
1707  */
1708 static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
1709                 int frags)
1710 {
1711     struct rx_sw_desc *sdesc;
1712 
1713     while (frags--) {
1714         if (fl->cidx == 0)
1715             fl->cidx = fl->size - 1;
1716         else
1717             fl->cidx--;
1718         sdesc = &fl->sdesc[fl->cidx];
1719         sdesc->page = gl->frags[frags].page;
1720         sdesc->dma_addr |= RX_UNMAPPED_BUF;
1721         fl->avail++;
1722     }
1723 }
1724 
1725 /**
1726  *  rspq_next - advance to the next entry in a response queue
1727  *  @rspq: the queue
1728  *
1729  *  Updates the state of a response queue to advance it to the next entry.
1730  */
1731 static inline void rspq_next(struct sge_rspq *rspq)
1732 {
1733     rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
1734     if (unlikely(++rspq->cidx == rspq->size)) {
1735         rspq->cidx = 0;
1736         rspq->gen ^= 1;
1737         rspq->cur_desc = rspq->desc;
1738     }
1739 }
1740 
1741 /**
1742  *  process_responses - process responses from an SGE response queue
1743  *  @rspq: the ingress response queue to process
1744  *  @budget: how many responses can be processed in this round
1745  *
1746  *  Process responses from a Scatter Gather Engine response queue up to
1747  *  the supplied budget.  Responses include received packets as well as
1748  *  control messages from firmware or hardware.
1749  *
1750  *  Additionally choose the interrupt holdoff time for the next interrupt
1751  *  on this queue.  If the system is under memory shortage use a fairly
1752  *  long delay to help recovery.
1753  */
1754 static int process_responses(struct sge_rspq *rspq, int budget)
1755 {
1756     struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1757     struct adapter *adapter = rspq->adapter;
1758     struct sge *s = &adapter->sge;
1759     int budget_left = budget;
1760 
1761     while (likely(budget_left)) {
1762         int ret, rsp_type;
1763         const struct rsp_ctrl *rc;
1764 
1765         rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
1766         if (!is_new_response(rc, rspq))
1767             break;
1768 
1769         /*
1770          * Figure out what kind of response we've received from the
1771          * SGE.
1772          */
1773         dma_rmb();
1774         rsp_type = RSPD_TYPE_G(rc->type_gen);
1775         if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
1776             struct page_frag *fp;
1777             struct pkt_gl gl;
1778             const struct rx_sw_desc *sdesc;
1779             u32 bufsz, frag;
1780             u32 len = be32_to_cpu(rc->pldbuflen_qid);
1781 
1782             /*
1783              * If we get a "new buffer" message from the SGE we
1784              * need to move on to the next Free List buffer.
1785              */
1786             if (len & RSPD_NEWBUF_F) {
1787                 /*
1788                  * We get one "new buffer" message when we
1789                  * first start up a queue so we need to ignore
1790                  * it when our offset into the buffer is 0.
1791                  */
1792                 if (likely(rspq->offset > 0)) {
1793                     free_rx_bufs(rspq->adapter, &rxq->fl,
1794                              1);
1795                     rspq->offset = 0;
1796                 }
1797                 len = RSPD_LEN_G(len);
1798             }
1799             gl.tot_len = len;
1800 
1801             /*
1802              * Gather packet fragments.
1803              */
1804             for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
1805                 BUG_ON(frag >= MAX_SKB_FRAGS);
1806                 BUG_ON(rxq->fl.avail == 0);
1807                 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1808                 bufsz = get_buf_size(adapter, sdesc);
1809                 fp->page = sdesc->page;
1810                 fp->offset = rspq->offset;
1811                 fp->size = min(bufsz, len);
1812                 len -= fp->size;
1813                 if (!len)
1814                     break;
1815                 unmap_rx_buf(rspq->adapter, &rxq->fl);
1816             }
1817             gl.nfrags = frag+1;
1818 
1819             /*
1820              * Last buffer remains mapped so explicitly make it
1821              * coherent for CPU access and start preloading first
1822              * cache line ...
1823              */
1824             dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1825                         get_buf_addr(sdesc),
1826                         fp->size, DMA_FROM_DEVICE);
1827             gl.va = (page_address(gl.frags[0].page) +
1828                  gl.frags[0].offset);
1829             prefetch(gl.va);
1830 
1831             /*
1832              * Hand the new ingress packet to the handler for
1833              * this Response Queue.
1834              */
1835             ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1836             if (likely(ret == 0))
1837                 rspq->offset += ALIGN(fp->size, s->fl_align);
1838             else
1839                 restore_rx_bufs(&gl, &rxq->fl, frag);
1840         } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
1841             ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1842         } else {
1843             WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
1844             ret = 0;
1845         }
1846 
1847         if (unlikely(ret)) {
1848             /*
1849              * Couldn't process descriptor, back off for recovery.
1850              * We use the SGE's last timer which has the longest
1851              * interrupt coalescing value ...
1852              */
1853             const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
1854             rspq->next_intr_params =
1855                 QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
1856             break;
1857         }
1858 
1859         rspq_next(rspq);
1860         budget_left--;
1861     }
1862 
1863     /*
1864      * If this is a Response Queue with an associated Free List and
1865      * at least two Egress Queue units available in the Free List
1866      * for new buffer pointers, refill the Free List.
1867      */
1868     if (rspq->offset >= 0 &&
1869         fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
1870         __refill_fl(rspq->adapter, &rxq->fl);
1871     return budget - budget_left;
1872 }
1873 
1874 /**
1875  *  napi_rx_handler - the NAPI handler for RX processing
1876  *  @napi: the napi instance
1877  *  @budget: how many packets we can process in this round
1878  *
1879  *  Handler for new data events when using NAPI.  This does not need any
1880  *  locking or protection from interrupts as data interrupts are off at
1881  *  this point and other adapter interrupts do not interfere (the latter
1882  *  in not a concern at all with MSI-X as non-data interrupts then have
1883  *  a separate handler).
1884  */
1885 static int napi_rx_handler(struct napi_struct *napi, int budget)
1886 {
1887     unsigned int intr_params;
1888     struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
1889     int work_done = process_responses(rspq, budget);
1890     u32 val;
1891 
1892     if (likely(work_done < budget)) {
1893         napi_complete_done(napi, work_done);
1894         intr_params = rspq->next_intr_params;
1895         rspq->next_intr_params = rspq->intr_params;
1896     } else
1897         intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
1898 
1899     if (unlikely(work_done == 0))
1900         rspq->unhandled_irqs++;
1901 
1902     val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
1903     /* If we don't have access to the new User GTS (T5+), use the old
1904      * doorbell mechanism; otherwise use the new BAR2 mechanism.
1905      */
1906     if (unlikely(!rspq->bar2_addr)) {
1907         t4_write_reg(rspq->adapter,
1908                  T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1909                  val | INGRESSQID_V((u32)rspq->cntxt_id));
1910     } else {
1911         writel(val | INGRESSQID_V(rspq->bar2_qid),
1912                rspq->bar2_addr + SGE_UDB_GTS);
1913         wmb();
1914     }
1915     return work_done;
1916 }
1917 
1918 /*
1919  * The MSI-X interrupt handler for an SGE response queue for the NAPI case
1920  * (i.e., response queue serviced by NAPI polling).
1921  */
1922 irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
1923 {
1924     struct sge_rspq *rspq = cookie;
1925 
1926     napi_schedule(&rspq->napi);
1927     return IRQ_HANDLED;
1928 }
1929 
1930 /*
1931  * Process the indirect interrupt entries in the interrupt queue and kick off
1932  * NAPI for each queue that has generated an entry.
1933  */
1934 static unsigned int process_intrq(struct adapter *adapter)
1935 {
1936     struct sge *s = &adapter->sge;
1937     struct sge_rspq *intrq = &s->intrq;
1938     unsigned int work_done;
1939     u32 val;
1940 
1941     spin_lock(&adapter->sge.intrq_lock);
1942     for (work_done = 0; ; work_done++) {
1943         const struct rsp_ctrl *rc;
1944         unsigned int qid, iq_idx;
1945         struct sge_rspq *rspq;
1946 
1947         /*
1948          * Grab the next response from the interrupt queue and bail
1949          * out if it's not a new response.
1950          */
1951         rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
1952         if (!is_new_response(rc, intrq))
1953             break;
1954 
1955         /*
1956          * If the response isn't a forwarded interrupt message issue a
1957          * error and go on to the next response message.  This should
1958          * never happen ...
1959          */
1960         dma_rmb();
1961         if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
1962             dev_err(adapter->pdev_dev,
1963                 "Unexpected INTRQ response type %d\n",
1964                 RSPD_TYPE_G(rc->type_gen));
1965             continue;
1966         }
1967 
1968         /*
1969          * Extract the Queue ID from the interrupt message and perform
1970          * sanity checking to make sure it really refers to one of our
1971          * Ingress Queues which is active and matches the queue's ID.
1972          * None of these error conditions should ever happen so we may
1973          * want to either make them fatal and/or conditionalized under
1974          * DEBUG.
1975          */
1976         qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
1977         iq_idx = IQ_IDX(s, qid);
1978         if (unlikely(iq_idx >= MAX_INGQ)) {
1979             dev_err(adapter->pdev_dev,
1980                 "Ingress QID %d out of range\n", qid);
1981             continue;
1982         }
1983         rspq = s->ingr_map[iq_idx];
1984         if (unlikely(rspq == NULL)) {
1985             dev_err(adapter->pdev_dev,
1986                 "Ingress QID %d RSPQ=NULL\n", qid);
1987             continue;
1988         }
1989         if (unlikely(rspq->abs_id != qid)) {
1990             dev_err(adapter->pdev_dev,
1991                 "Ingress QID %d refers to RSPQ %d\n",
1992                 qid, rspq->abs_id);
1993             continue;
1994         }
1995 
1996         /*
1997          * Schedule NAPI processing on the indicated Response Queue
1998          * and move on to the next entry in the Forwarded Interrupt
1999          * Queue.
2000          */
2001         napi_schedule(&rspq->napi);
2002         rspq_next(intrq);
2003     }
2004 
2005     val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
2006     /* If we don't have access to the new User GTS (T5+), use the old
2007      * doorbell mechanism; otherwise use the new BAR2 mechanism.
2008      */
2009     if (unlikely(!intrq->bar2_addr)) {
2010         t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
2011                  val | INGRESSQID_V(intrq->cntxt_id));
2012     } else {
2013         writel(val | INGRESSQID_V(intrq->bar2_qid),
2014                intrq->bar2_addr + SGE_UDB_GTS);
2015         wmb();
2016     }
2017 
2018     spin_unlock(&adapter->sge.intrq_lock);
2019 
2020     return work_done;
2021 }
2022 
2023 /*
2024  * The MSI interrupt handler handles data events from SGE response queues as
2025  * well as error and other async events as they all use the same MSI vector.
2026  */
2027 static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
2028 {
2029     struct adapter *adapter = cookie;
2030 
2031     process_intrq(adapter);
2032     return IRQ_HANDLED;
2033 }
2034 
2035 /**
2036  *  t4vf_intr_handler - select the top-level interrupt handler
2037  *  @adapter: the adapter
2038  *
2039  *  Selects the top-level interrupt handler based on the type of interrupts
2040  *  (MSI-X or MSI).
2041  */
2042 irq_handler_t t4vf_intr_handler(struct adapter *adapter)
2043 {
2044     BUG_ON((adapter->flags &
2045            (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
2046     if (adapter->flags & CXGB4VF_USING_MSIX)
2047         return t4vf_sge_intr_msix;
2048     else
2049         return t4vf_intr_msi;
2050 }
2051 
2052 /**
2053  *  sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
2054  *  @t: Rx timer
2055  *
2056  *  Runs periodically from a timer to perform maintenance of SGE RX queues.
2057  *
2058  *  a) Replenishes RX queues that have run out due to memory shortage.
2059  *  Normally new RX buffers are added when existing ones are consumed but
2060  *  when out of memory a queue can become empty.  We schedule NAPI to do
2061  *  the actual refill.
2062  */
2063 static void sge_rx_timer_cb(struct timer_list *t)
2064 {
2065     struct adapter *adapter = from_timer(adapter, t, sge.rx_timer);
2066     struct sge *s = &adapter->sge;
2067     unsigned int i;
2068 
2069     /*
2070      * Scan the "Starving Free Lists" flag array looking for any Free
2071      * Lists in need of more free buffers.  If we find one and it's not
2072      * being actively polled, then bump its "starving" counter and attempt
2073      * to refill it.  If we're successful in adding enough buffers to push
2074      * the Free List over the starving threshold, then we can clear its
2075      * "starving" status.
2076      */
2077     for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
2078         unsigned long m;
2079 
2080         for (m = s->starving_fl[i]; m; m &= m - 1) {
2081             unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2082             struct sge_fl *fl = s->egr_map[id];
2083 
2084             clear_bit(id, s->starving_fl);
2085             smp_mb__after_atomic();
2086 
2087             /*
2088              * Since we are accessing fl without a lock there's a
2089              * small probability of a false positive where we
2090              * schedule napi but the FL is no longer starving.
2091              * No biggie.
2092              */
2093             if (fl_starving(adapter, fl)) {
2094                 struct sge_eth_rxq *rxq;
2095 
2096                 rxq = container_of(fl, struct sge_eth_rxq, fl);
2097                 if (napi_reschedule(&rxq->rspq.napi))
2098                     fl->starving++;
2099                 else
2100                     set_bit(id, s->starving_fl);
2101             }
2102         }
2103     }
2104 
2105     /*
2106      * Reschedule the next scan for starving Free Lists ...
2107      */
2108     mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2109 }
2110 
2111 /**
2112  *  sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
2113  *  @t: Tx timer
2114  *
2115  *  Runs periodically from a timer to perform maintenance of SGE TX queues.
2116  *
2117  *  b) Reclaims completed Tx packets for the Ethernet queues.  Normally
2118  *  packets are cleaned up by new Tx packets, this timer cleans up packets
2119  *  when no new packets are being submitted.  This is essential for pktgen,
2120  *  at least.
2121  */
2122 static void sge_tx_timer_cb(struct timer_list *t)
2123 {
2124     struct adapter *adapter = from_timer(adapter, t, sge.tx_timer);
2125     struct sge *s = &adapter->sge;
2126     unsigned int i, budget;
2127 
2128     budget = MAX_TIMER_TX_RECLAIM;
2129     i = s->ethtxq_rover;
2130     do {
2131         struct sge_eth_txq *txq = &s->ethtxq[i];
2132 
2133         if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
2134             int avail = reclaimable(&txq->q);
2135 
2136             if (avail > budget)
2137                 avail = budget;
2138 
2139             free_tx_desc(adapter, &txq->q, avail, true);
2140             txq->q.in_use -= avail;
2141             __netif_tx_unlock(txq->txq);
2142 
2143             budget -= avail;
2144             if (!budget)
2145                 break;
2146         }
2147 
2148         i++;
2149         if (i >= s->ethqsets)
2150             i = 0;
2151     } while (i != s->ethtxq_rover);
2152     s->ethtxq_rover = i;
2153 
2154     /*
2155      * If we found too many reclaimable packets schedule a timer in the
2156      * near future to continue where we left off.  Otherwise the next timer
2157      * will be at its normal interval.
2158      */
2159     mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2160 }
2161 
2162 /**
2163  *  bar2_address - return the BAR2 address for an SGE Queue's Registers
2164  *  @adapter: the adapter
2165  *  @qid: the SGE Queue ID
2166  *  @qtype: the SGE Queue Type (Egress or Ingress)
2167  *  @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2168  *
2169  *  Returns the BAR2 address for the SGE Queue Registers associated with
2170  *  @qid.  If BAR2 SGE Registers aren't available, returns NULL.  Also
2171  *  returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2172  *  Queue Registers.  If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2173  *  Registers are supported (e.g. the Write Combining Doorbell Buffer).
2174  */
2175 static void __iomem *bar2_address(struct adapter *adapter,
2176                   unsigned int qid,
2177                   enum t4_bar2_qtype qtype,
2178                   unsigned int *pbar2_qid)
2179 {
2180     u64 bar2_qoffset;
2181     int ret;
2182 
2183     ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
2184                   &bar2_qoffset, pbar2_qid);
2185     if (ret)
2186         return NULL;
2187 
2188     return adapter->bar2 + bar2_qoffset;
2189 }
2190 
2191 /**
2192  *  t4vf_sge_alloc_rxq - allocate an SGE RX Queue
2193  *  @adapter: the adapter
2194  *  @rspq: pointer to to the new rxq's Response Queue to be filled in
2195  *  @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
2196  *  @dev: the network device associated with the new rspq
2197  *  @intr_dest: MSI-X vector index (overriden in MSI mode)
2198  *  @fl: pointer to the new rxq's Free List to be filled in
2199  *  @hnd: the interrupt handler to invoke for the rspq
2200  */
2201 int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2202                bool iqasynch, struct net_device *dev,
2203                int intr_dest,
2204                struct sge_fl *fl, rspq_handler_t hnd)
2205 {
2206     struct sge *s = &adapter->sge;
2207     struct port_info *pi = netdev_priv(dev);
2208     struct fw_iq_cmd cmd, rpl;
2209     int ret, iqandst, flsz = 0;
2210     int relaxed = !(adapter->flags & CXGB4VF_ROOT_NO_RELAXED_ORDERING);
2211 
2212     /*
2213      * If we're using MSI interrupts and we're not initializing the
2214      * Forwarded Interrupt Queue itself, then set up this queue for
2215      * indirect interrupts to the Forwarded Interrupt Queue.  Obviously
2216      * the Forwarded Interrupt Queue must be set up before any other
2217      * ingress queue ...
2218      */
2219     if ((adapter->flags & CXGB4VF_USING_MSI) &&
2220         rspq != &adapter->sge.intrq) {
2221         iqandst = SGE_INTRDST_IQ;
2222         intr_dest = adapter->sge.intrq.abs_id;
2223     } else
2224         iqandst = SGE_INTRDST_PCI;
2225 
2226     /*
2227      * Allocate the hardware ring for the Response Queue.  The size needs
2228      * to be a multiple of 16 which includes the mandatory status entry
2229      * (regardless of whether the Status Page capabilities are enabled or
2230      * not).
2231      */
2232     rspq->size = roundup(rspq->size, 16);
2233     rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
2234                 0, &rspq->phys_addr, NULL, 0);
2235     if (!rspq->desc)
2236         return -ENOMEM;
2237 
2238     /*
2239      * Fill in the Ingress Queue Command.  Note: Ideally this code would
2240      * be in t4vf_hw.c but there are so many parameters and dependencies
2241      * on our Linux SGE state that we would end up having to pass tons of
2242      * parameters.  We'll have to think about how this might be migrated
2243      * into OS-independent common code ...
2244      */
2245     memset(&cmd, 0, sizeof(cmd));
2246     cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
2247                     FW_CMD_REQUEST_F |
2248                     FW_CMD_WRITE_F |
2249                     FW_CMD_EXEC_F);
2250     cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F |
2251                      FW_IQ_CMD_IQSTART_F |
2252                      FW_LEN16(cmd));
2253     cmd.type_to_iqandstindex =
2254         cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2255                 FW_IQ_CMD_IQASYNCH_V(iqasynch) |
2256                 FW_IQ_CMD_VIID_V(pi->viid) |
2257                 FW_IQ_CMD_IQANDST_V(iqandst) |
2258                 FW_IQ_CMD_IQANUS_V(1) |
2259                 FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) |
2260                 FW_IQ_CMD_IQANDSTINDEX_V(intr_dest));
2261     cmd.iqdroprss_to_iqesize =
2262         cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) |
2263                 FW_IQ_CMD_IQGTSMODE_F |
2264                 FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) |
2265                 FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4));
2266     cmd.iqsize = cpu_to_be16(rspq->size);
2267     cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
2268 
2269     if (fl) {
2270         unsigned int chip_ver =
2271             CHELSIO_CHIP_VERSION(adapter->params.chip);
2272         /*
2273          * Allocate the ring for the hardware free list (with space
2274          * for its status page) along with the associated software
2275          * descriptor ring.  The free list size needs to be a multiple
2276          * of the Egress Queue Unit and at least 2 Egress Units larger
2277          * than the SGE's Egress Congrestion Threshold
2278          * (fl_starve_thres - 1).
2279          */
2280         if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT)
2281             fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT;
2282         fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2283         fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2284                       sizeof(__be64), sizeof(struct rx_sw_desc),
2285                       &fl->addr, &fl->sdesc, s->stat_len);
2286         if (!fl->desc) {
2287             ret = -ENOMEM;
2288             goto err;
2289         }
2290 
2291         /*
2292          * Calculate the size of the hardware free list ring plus
2293          * Status Page (which the SGE will place after the end of the
2294          * free list ring) in Egress Queue Units.
2295          */
2296         flsz = (fl->size / FL_PER_EQ_UNIT +
2297             s->stat_len / EQ_UNIT);
2298 
2299         /*
2300          * Fill in all the relevant firmware Ingress Queue Command
2301          * fields for the free list.
2302          */
2303         cmd.iqns_to_fl0congen =
2304             cpu_to_be32(
2305                 FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
2306                 FW_IQ_CMD_FL0PACKEN_F |
2307                 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
2308                 FW_IQ_CMD_FL0DATARO_V(relaxed) |
2309                 FW_IQ_CMD_FL0PADEN_F);
2310 
2311         /* In T6, for egress queue type FL there is internal overhead
2312          * of 16B for header going into FLM module.  Hence the maximum
2313          * allowed burst size is 448 bytes.  For T4/T5, the hardware
2314          * doesn't coalesce fetch requests if more than 64 bytes of
2315          * Free List pointers are provided, so we use a 128-byte Fetch
2316          * Burst Minimum there (T6 implements coalescing so we can use
2317          * the smaller 64-byte value there).
2318          */
2319         cmd.fl0dcaen_to_fl0cidxfthresh =
2320             cpu_to_be16(
2321                 FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5
2322                              ? FETCHBURSTMIN_128B_X
2323                              : FETCHBURSTMIN_64B_T6_X) |
2324                 FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
2325                              FETCHBURSTMAX_512B_X :
2326                              FETCHBURSTMAX_256B_X));
2327         cmd.fl0size = cpu_to_be16(flsz);
2328         cmd.fl0addr = cpu_to_be64(fl->addr);
2329     }
2330 
2331     /*
2332      * Issue the firmware Ingress Queue Command and extract the results if
2333      * it completes successfully.
2334      */
2335     ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2336     if (ret)
2337         goto err;
2338 
2339     netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
2340     rspq->cur_desc = rspq->desc;
2341     rspq->cidx = 0;
2342     rspq->gen = 1;
2343     rspq->next_intr_params = rspq->intr_params;
2344     rspq->cntxt_id = be16_to_cpu(rpl.iqid);
2345     rspq->bar2_addr = bar2_address(adapter,
2346                        rspq->cntxt_id,
2347                        T4_BAR2_QTYPE_INGRESS,
2348                        &rspq->bar2_qid);
2349     rspq->abs_id = be16_to_cpu(rpl.physiqid);
2350     rspq->size--;           /* subtract status entry */
2351     rspq->adapter = adapter;
2352     rspq->netdev = dev;
2353     rspq->handler = hnd;
2354 
2355     /* set offset to -1 to distinguish ingress queues without FL */
2356     rspq->offset = fl ? 0 : -1;
2357 
2358     if (fl) {
2359         fl->cntxt_id = be16_to_cpu(rpl.fl0id);
2360         fl->avail = 0;
2361         fl->pend_cred = 0;
2362         fl->pidx = 0;
2363         fl->cidx = 0;
2364         fl->alloc_failed = 0;
2365         fl->large_alloc_failed = 0;
2366         fl->starving = 0;
2367 
2368         /* Note, we must initialize the BAR2 Free List User Doorbell
2369          * information before refilling the Free List!
2370          */
2371         fl->bar2_addr = bar2_address(adapter,
2372                          fl->cntxt_id,
2373                          T4_BAR2_QTYPE_EGRESS,
2374                          &fl->bar2_qid);
2375 
2376         refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
2377     }
2378 
2379     return 0;
2380 
2381 err:
2382     /*
2383      * An error occurred.  Clean up our partial allocation state and
2384      * return the error.
2385      */
2386     if (rspq->desc) {
2387         dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
2388                   rspq->desc, rspq->phys_addr);
2389         rspq->desc = NULL;
2390     }
2391     if (fl && fl->desc) {
2392         kfree(fl->sdesc);
2393         fl->sdesc = NULL;
2394         dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
2395                   fl->desc, fl->addr);
2396         fl->desc = NULL;
2397     }
2398     return ret;
2399 }
2400 
2401 /**
2402  *  t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
2403  *  @adapter: the adapter
2404  *  @txq: pointer to the new txq to be filled in
2405  *  @dev: the network device
2406  *  @devq: the network TX queue associated with the new txq
2407  *  @iqid: the relative ingress queue ID to which events relating to
2408  *      the new txq should be directed
2409  */
2410 int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2411                struct net_device *dev, struct netdev_queue *devq,
2412                unsigned int iqid)
2413 {
2414     unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
2415     struct port_info *pi = netdev_priv(dev);
2416     struct fw_eq_eth_cmd cmd, rpl;
2417     struct sge *s = &adapter->sge;
2418     int ret, nentries;
2419 
2420     /*
2421      * Calculate the size of the hardware TX Queue (including the Status
2422      * Page on the end of the TX Queue) in units of TX Descriptors.
2423      */
2424     nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2425 
2426     /*
2427      * Allocate the hardware ring for the TX ring (with space for its
2428      * status page) along with the associated software descriptor ring.
2429      */
2430     txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2431                  sizeof(struct tx_desc),
2432                  sizeof(struct tx_sw_desc),
2433                  &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2434     if (!txq->q.desc)
2435         return -ENOMEM;
2436 
2437     /*
2438      * Fill in the Egress Queue Command.  Note: As with the direct use of
2439      * the firmware Ingress Queue COmmand above in our RXQ allocation
2440      * routine, ideally, this code would be in t4vf_hw.c.  Again, we'll
2441      * have to see if there's some reasonable way to parameterize it
2442      * into the common code ...
2443      */
2444     memset(&cmd, 0, sizeof(cmd));
2445     cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
2446                     FW_CMD_REQUEST_F |
2447                     FW_CMD_WRITE_F |
2448                     FW_CMD_EXEC_F);
2449     cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
2450                      FW_EQ_ETH_CMD_EQSTART_F |
2451                      FW_LEN16(cmd));
2452     cmd.autoequiqe_to_viid = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2453                          FW_EQ_ETH_CMD_VIID_V(pi->viid));
2454     cmd.fetchszm_to_iqid =
2455         cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
2456                 FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
2457                 FW_EQ_ETH_CMD_IQID_V(iqid));
2458     cmd.dcaen_to_eqsize =
2459         cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
2460                           ? FETCHBURSTMIN_64B_X
2461                           : FETCHBURSTMIN_64B_T6_X) |
2462                 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
2463                 FW_EQ_ETH_CMD_CIDXFTHRESH_V(
2464                         CIDXFLUSHTHRESH_32_X) |
2465                 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2466     cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
2467 
2468     /*
2469      * Issue the firmware Egress Queue Command and extract the results if
2470      * it completes successfully.
2471      */
2472     ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2473     if (ret) {
2474         /*
2475          * The girmware Ingress Queue Command failed for some reason.
2476          * Free up our partial allocation state and return the error.
2477          */
2478         kfree(txq->q.sdesc);
2479         txq->q.sdesc = NULL;
2480         dma_free_coherent(adapter->pdev_dev,
2481                   nentries * sizeof(struct tx_desc),
2482                   txq->q.desc, txq->q.phys_addr);
2483         txq->q.desc = NULL;
2484         return ret;
2485     }
2486 
2487     txq->q.in_use = 0;
2488     txq->q.cidx = 0;
2489     txq->q.pidx = 0;
2490     txq->q.stat = (void *)&txq->q.desc[txq->q.size];
2491     txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd));
2492     txq->q.bar2_addr = bar2_address(adapter,
2493                     txq->q.cntxt_id,
2494                     T4_BAR2_QTYPE_EGRESS,
2495                     &txq->q.bar2_qid);
2496     txq->q.abs_id =
2497         FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd));
2498     txq->txq = devq;
2499     txq->tso = 0;
2500     txq->tx_cso = 0;
2501     txq->vlan_ins = 0;
2502     txq->q.stops = 0;
2503     txq->q.restarts = 0;
2504     txq->mapping_err = 0;
2505     return 0;
2506 }
2507 
2508 /*
2509  * Free the DMA map resources associated with a TX queue.
2510  */
2511 static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2512 {
2513     struct sge *s = &adapter->sge;
2514 
2515     dma_free_coherent(adapter->pdev_dev,
2516               tq->size * sizeof(*tq->desc) + s->stat_len,
2517               tq->desc, tq->phys_addr);
2518     tq->cntxt_id = 0;
2519     tq->sdesc = NULL;
2520     tq->desc = NULL;
2521 }
2522 
2523 /*
2524  * Free the resources associated with a response queue (possibly including a
2525  * free list).
2526  */
2527 static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2528              struct sge_fl *fl)
2529 {
2530     struct sge *s = &adapter->sge;
2531     unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2532 
2533     t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
2534              rspq->cntxt_id, flid, 0xffff);
2535     dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
2536               rspq->desc, rspq->phys_addr);
2537     netif_napi_del(&rspq->napi);
2538     rspq->netdev = NULL;
2539     rspq->cntxt_id = 0;
2540     rspq->abs_id = 0;
2541     rspq->desc = NULL;
2542 
2543     if (fl) {
2544         free_rx_bufs(adapter, fl, fl->avail);
2545         dma_free_coherent(adapter->pdev_dev,
2546                   fl->size * sizeof(*fl->desc) + s->stat_len,
2547                   fl->desc, fl->addr);
2548         kfree(fl->sdesc);
2549         fl->sdesc = NULL;
2550         fl->cntxt_id = 0;
2551         fl->desc = NULL;
2552     }
2553 }
2554 
2555 /**
2556  *  t4vf_free_sge_resources - free SGE resources
2557  *  @adapter: the adapter
2558  *
2559  *  Frees resources used by the SGE queue sets.
2560  */
2561 void t4vf_free_sge_resources(struct adapter *adapter)
2562 {
2563     struct sge *s = &adapter->sge;
2564     struct sge_eth_rxq *rxq = s->ethrxq;
2565     struct sge_eth_txq *txq = s->ethtxq;
2566     struct sge_rspq *evtq = &s->fw_evtq;
2567     struct sge_rspq *intrq = &s->intrq;
2568     int qs;
2569 
2570     for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
2571         if (rxq->rspq.desc)
2572             free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
2573         if (txq->q.desc) {
2574             t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
2575             free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
2576             kfree(txq->q.sdesc);
2577             free_txq(adapter, &txq->q);
2578         }
2579     }
2580     if (evtq->desc)
2581         free_rspq_fl(adapter, evtq, NULL);
2582     if (intrq->desc)
2583         free_rspq_fl(adapter, intrq, NULL);
2584 }
2585 
2586 /**
2587  *  t4vf_sge_start - enable SGE operation
2588  *  @adapter: the adapter
2589  *
2590  *  Start tasklets and timers associated with the DMA engine.
2591  */
2592 void t4vf_sge_start(struct adapter *adapter)
2593 {
2594     adapter->sge.ethtxq_rover = 0;
2595     mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2596     mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2597 }
2598 
2599 /**
2600  *  t4vf_sge_stop - disable SGE operation
2601  *  @adapter: the adapter
2602  *
2603  *  Stop tasklets and timers associated with the DMA engine.  Note that
2604  *  this is effective only if measures have been taken to disable any HW
2605  *  events that may restart them.
2606  */
2607 void t4vf_sge_stop(struct adapter *adapter)
2608 {
2609     struct sge *s = &adapter->sge;
2610 
2611     if (s->rx_timer.function)
2612         del_timer_sync(&s->rx_timer);
2613     if (s->tx_timer.function)
2614         del_timer_sync(&s->tx_timer);
2615 }
2616 
2617 /**
2618  *  t4vf_sge_init - initialize SGE
2619  *  @adapter: the adapter
2620  *
2621  *  Performs SGE initialization needed every time after a chip reset.
2622  *  We do not initialize any of the queue sets here, instead the driver
2623  *  top-level must request those individually.  We also do not enable DMA
2624  *  here, that should be done after the queues have been set up.
2625  */
2626 int t4vf_sge_init(struct adapter *adapter)
2627 {
2628     struct sge_params *sge_params = &adapter->params.sge;
2629     u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
2630     u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
2631     struct sge *s = &adapter->sge;
2632 
2633     /*
2634      * Start by vetting the basic SGE parameters which have been set up by
2635      * the Physical Function Driver.  Ideally we should be able to deal
2636      * with _any_ configuration.  Practice is different ...
2637      */
2638 
2639     /* We only bother using the Large Page logic if the Large Page Buffer
2640      * is larger than our Page Size Buffer.
2641      */
2642     if (fl_large_pg <= fl_small_pg)
2643         fl_large_pg = 0;
2644 
2645     /* The Page Size Buffer must be exactly equal to our Page Size and the
2646      * Large Page Size Buffer should be 0 (per above) or a power of 2.
2647      */
2648     if (fl_small_pg != PAGE_SIZE ||
2649         (fl_large_pg & (fl_large_pg - 1)) != 0) {
2650         dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2651             fl_small_pg, fl_large_pg);
2652         return -EINVAL;
2653     }
2654     if ((sge_params->sge_control & RXPKTCPLMODE_F) !=
2655         RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
2656         dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2657         return -EINVAL;
2658     }
2659 
2660     /*
2661      * Now translate the adapter parameters into our internal forms.
2662      */
2663     if (fl_large_pg)
2664         s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2665     s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
2666             ? 128 : 64);
2667     s->pktshift = PKTSHIFT_G(sge_params->sge_control);
2668     s->fl_align = t4vf_fl_pkt_align(adapter);
2669 
2670     /* A FL with <= fl_starve_thres buffers is starving and a periodic
2671      * timer will attempt to refill it.  This needs to be larger than the
2672      * SGE's Egress Congestion Threshold.  If it isn't, then we can get
2673      * stuck waiting for new packets while the SGE is waiting for us to
2674      * give it more Free List entries.  (Note that the SGE's Egress
2675      * Congestion Threshold is in units of 2 Free List pointers.)
2676      */
2677     switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
2678     case CHELSIO_T4:
2679         s->fl_starve_thres =
2680            EGRTHRESHOLD_G(sge_params->sge_congestion_control);
2681         break;
2682     case CHELSIO_T5:
2683         s->fl_starve_thres =
2684            EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2685         break;
2686     case CHELSIO_T6:
2687     default:
2688         s->fl_starve_thres =
2689            T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
2690         break;
2691     }
2692     s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
2693 
2694     /*
2695      * Set up tasklet timers.
2696      */
2697     timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
2698     timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
2699 
2700     /*
2701      * Initialize Forwarded Interrupt Queue lock.
2702      */
2703     spin_lock_init(&s->intrq_lock);
2704 
2705     return 0;
2706 }