Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* Copyright(c) 2013 - 2018 Intel Corporation. */
0003 
0004 #ifndef _IAVF_TXRX_H_
0005 #define _IAVF_TXRX_H_
0006 
0007 /* Interrupt Throttling and Rate Limiting Goodies */
0008 #define IAVF_DEFAULT_IRQ_WORK      256
0009 
0010 /* The datasheet for the X710 and XL710 indicate that the maximum value for
0011  * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
0012  * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
0013  * the register value which is divided by 2 lets use the actual values and
0014  * avoid an excessive amount of translation.
0015  */
0016 #define IAVF_ITR_DYNAMIC    0x8000  /* use top bit as a flag */
0017 #define IAVF_ITR_MASK       0x1FFE  /* mask for ITR register value */
0018 #define IAVF_MIN_ITR             2  /* reg uses 2 usec resolution */
0019 #define IAVF_ITR_100K           10  /* all values below must be even */
0020 #define IAVF_ITR_50K            20
0021 #define IAVF_ITR_20K            50
0022 #define IAVF_ITR_18K            60
0023 #define IAVF_ITR_8K        122
0024 #define IAVF_MAX_ITR          8160  /* maximum value as per datasheet */
0025 #define ITR_TO_REG(setting) ((setting) & ~IAVF_ITR_DYNAMIC)
0026 #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~IAVF_ITR_MASK)
0027 #define ITR_IS_DYNAMIC(setting) (!!((setting) & IAVF_ITR_DYNAMIC))
0028 
0029 #define IAVF_ITR_RX_DEF     (IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
0030 #define IAVF_ITR_TX_DEF     (IAVF_ITR_20K | IAVF_ITR_DYNAMIC)
0031 
0032 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
0033  * the value of the rate limit is non-zero
0034  */
0035 #define INTRL_ENA                  BIT(6)
0036 #define IAVF_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
0037 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
0038 #define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
0039 #define IAVF_INTRL_8K              125     /* 8000 ints/sec */
0040 #define IAVF_INTRL_62K             16      /* 62500 ints/sec */
0041 #define IAVF_INTRL_83K             12      /* 83333 ints/sec */
0042 
0043 #define IAVF_QUEUE_END_OF_LIST 0x7FF
0044 
0045 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
0046  * registers and QINT registers or more generally anywhere in the manual
0047  * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
0048  * register but instead is a special value meaning "don't update" ITR0/1/2.
0049  */
0050 enum iavf_dyn_idx_t {
0051     IAVF_IDX_ITR0 = 0,
0052     IAVF_IDX_ITR1 = 1,
0053     IAVF_IDX_ITR2 = 2,
0054     IAVF_ITR_NONE = 3   /* ITR_NONE must not be used as an index */
0055 };
0056 
0057 /* these are indexes into ITRN registers */
0058 #define IAVF_RX_ITR    IAVF_IDX_ITR0
0059 #define IAVF_TX_ITR    IAVF_IDX_ITR1
0060 #define IAVF_PE_ITR    IAVF_IDX_ITR2
0061 
0062 /* Supported RSS offloads */
0063 #define IAVF_DEFAULT_RSS_HENA ( \
0064     BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_UDP) | \
0065     BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
0066     BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP) | \
0067     BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
0068     BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV4) | \
0069     BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_UDP) | \
0070     BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP) | \
0071     BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
0072     BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
0073     BIT_ULL(IAVF_FILTER_PCTYPE_FRAG_IPV6) | \
0074     BIT_ULL(IAVF_FILTER_PCTYPE_L2_PAYLOAD))
0075 
0076 #define IAVF_DEFAULT_RSS_HENA_EXPANDED (IAVF_DEFAULT_RSS_HENA | \
0077     BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
0078     BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
0079     BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
0080     BIT_ULL(IAVF_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
0081     BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
0082     BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
0083 
0084 /* Supported Rx Buffer Sizes (a multiple of 128) */
0085 #define IAVF_RXBUFFER_256   256
0086 #define IAVF_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */
0087 #define IAVF_RXBUFFER_2048  2048
0088 #define IAVF_RXBUFFER_3072  3072  /* Used for large frames w/ padding */
0089 #define IAVF_MAX_RXBUFFER   9728  /* largest size for single descriptor */
0090 
0091 /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
0092  * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
0093  * this adds up to 512 bytes of extra data meaning the smallest allocation
0094  * we could have is 1K.
0095  * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
0096  * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
0097  */
0098 #define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256
0099 #define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
0100 #define iavf_rx_desc iavf_32byte_rx_desc
0101 
0102 #define IAVF_RX_DMA_ATTR \
0103     (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
0104 
0105 /* Attempt to maximize the headroom available for incoming frames.  We
0106  * use a 2K buffer for receives and need 1536/1534 to store the data for
0107  * the frame.  This leaves us with 512 bytes of room.  From that we need
0108  * to deduct the space needed for the shared info and the padding needed
0109  * to IP align the frame.
0110  *
0111  * Note: For cache line sizes 256 or larger this value is going to end
0112  *   up negative.  In these cases we should fall back to the legacy
0113  *   receive path.
0114  */
0115 #if (PAGE_SIZE < 8192)
0116 #define IAVF_2K_TOO_SMALL_WITH_PADDING \
0117 ((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048))
0118 
0119 static inline int iavf_compute_pad(int rx_buf_len)
0120 {
0121     int page_size, pad_size;
0122 
0123     page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
0124     pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
0125 
0126     return pad_size;
0127 }
0128 
0129 static inline int iavf_skb_pad(void)
0130 {
0131     int rx_buf_len;
0132 
0133     /* If a 2K buffer cannot handle a standard Ethernet frame then
0134      * optimize padding for a 3K buffer instead of a 1.5K buffer.
0135      *
0136      * For a 3K buffer we need to add enough padding to allow for
0137      * tailroom due to NET_IP_ALIGN possibly shifting us out of
0138      * cache-line alignment.
0139      */
0140     if (IAVF_2K_TOO_SMALL_WITH_PADDING)
0141         rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
0142     else
0143         rx_buf_len = IAVF_RXBUFFER_1536;
0144 
0145     /* if needed make room for NET_IP_ALIGN */
0146     rx_buf_len -= NET_IP_ALIGN;
0147 
0148     return iavf_compute_pad(rx_buf_len);
0149 }
0150 
0151 #define IAVF_SKB_PAD iavf_skb_pad()
0152 #else
0153 #define IAVF_2K_TOO_SMALL_WITH_PADDING false
0154 #define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
0155 #endif
0156 
0157 /**
0158  * iavf_test_staterr - tests bits in Rx descriptor status and error fields
0159  * @rx_desc: pointer to receive descriptor (in le64 format)
0160  * @stat_err_bits: value to mask
0161  *
0162  * This function does some fast chicanery in order to return the
0163  * value of the mask which is really only used for boolean tests.
0164  * The status_error_len doesn't need to be shifted because it begins
0165  * at offset zero.
0166  */
0167 static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc,
0168                      const u64 stat_err_bits)
0169 {
0170     return !!(rx_desc->wb.qword1.status_error_len &
0171           cpu_to_le64(stat_err_bits));
0172 }
0173 
0174 /* How many Rx Buffers do we bundle into one write to the hardware ? */
0175 #define IAVF_RX_INCREMENT(r, i) \
0176     do {                    \
0177         (i)++;              \
0178         if ((i) == (r)->count)      \
0179             i = 0;          \
0180         r->next_to_clean = i;       \
0181     } while (0)
0182 
0183 #define IAVF_RX_NEXT_DESC(r, i, n)      \
0184     do {                    \
0185         (i)++;              \
0186         if ((i) == (r)->count)      \
0187             i = 0;          \
0188         (n) = IAVF_RX_DESC((r), (i));   \
0189     } while (0)
0190 
0191 #define IAVF_RX_NEXT_DESC_PREFETCH(r, i, n)     \
0192     do {                        \
0193         IAVF_RX_NEXT_DESC((r), (i), (n));   \
0194         prefetch((n));              \
0195     } while (0)
0196 
0197 #define IAVF_MAX_BUFFER_TXD 8
0198 #define IAVF_MIN_TX_LEN     17
0199 
0200 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
0201  * In order to align with the read requests we will align the value to
0202  * the nearest 4K which represents our maximum read request size.
0203  */
0204 #define IAVF_MAX_READ_REQ_SIZE      4096
0205 #define IAVF_MAX_DATA_PER_TXD       (16 * 1024 - 1)
0206 #define IAVF_MAX_DATA_PER_TXD_ALIGNED \
0207     (IAVF_MAX_DATA_PER_TXD & ~(IAVF_MAX_READ_REQ_SIZE - 1))
0208 
0209 /**
0210  * iavf_txd_use_count  - estimate the number of descriptors needed for Tx
0211  * @size: transmit request size in bytes
0212  *
0213  * Due to hardware alignment restrictions (4K alignment), we need to
0214  * assume that we can have no more than 12K of data per descriptor, even
0215  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
0216  * Thus, we need to divide by 12K. But division is slow! Instead,
0217  * we decompose the operation into shifts and one relatively cheap
0218  * multiply operation.
0219  *
0220  * To divide by 12K, we first divide by 4K, then divide by 3:
0221  *     To divide by 4K, shift right by 12 bits
0222  *     To divide by 3, multiply by 85, then divide by 256
0223  *     (Divide by 256 is done by shifting right by 8 bits)
0224  * Finally, we add one to round up. Because 256 isn't an exact multiple of
0225  * 3, we'll underestimate near each multiple of 12K. This is actually more
0226  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
0227  * segment.  For our purposes this is accurate out to 1M which is orders of
0228  * magnitude greater than our largest possible GSO size.
0229  *
0230  * This would then be implemented as:
0231  *     return (((size >> 12) * 85) >> 8) + 1;
0232  *
0233  * Since multiplication and division are commutative, we can reorder
0234  * operations into:
0235  *     return ((size * 85) >> 20) + 1;
0236  */
0237 static inline unsigned int iavf_txd_use_count(unsigned int size)
0238 {
0239     return ((size * 85) >> 20) + 1;
0240 }
0241 
0242 /* Tx Descriptors needed, worst case */
0243 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
0244 #define IAVF_MIN_DESC_PENDING   4
0245 
0246 #define IAVF_TX_FLAGS_HW_VLAN           BIT(1)
0247 #define IAVF_TX_FLAGS_SW_VLAN           BIT(2)
0248 #define IAVF_TX_FLAGS_TSO           BIT(3)
0249 #define IAVF_TX_FLAGS_IPV4          BIT(4)
0250 #define IAVF_TX_FLAGS_IPV6          BIT(5)
0251 #define IAVF_TX_FLAGS_FCCRC         BIT(6)
0252 #define IAVF_TX_FLAGS_FSO           BIT(7)
0253 #define IAVF_TX_FLAGS_FD_SB         BIT(9)
0254 #define IAVF_TX_FLAGS_VXLAN_TUNNEL      BIT(10)
0255 #define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN  BIT(11)
0256 #define IAVF_TX_FLAGS_VLAN_MASK         0xffff0000
0257 #define IAVF_TX_FLAGS_VLAN_PRIO_MASK        0xe0000000
0258 #define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT       29
0259 #define IAVF_TX_FLAGS_VLAN_SHIFT        16
0260 
0261 struct iavf_tx_buffer {
0262     struct iavf_tx_desc *next_to_watch;
0263     union {
0264         struct sk_buff *skb;
0265         void *raw_buf;
0266     };
0267     unsigned int bytecount;
0268     unsigned short gso_segs;
0269 
0270     DEFINE_DMA_UNMAP_ADDR(dma);
0271     DEFINE_DMA_UNMAP_LEN(len);
0272     u32 tx_flags;
0273 };
0274 
0275 struct iavf_rx_buffer {
0276     dma_addr_t dma;
0277     struct page *page;
0278 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
0279     __u32 page_offset;
0280 #else
0281     __u16 page_offset;
0282 #endif
0283     __u16 pagecnt_bias;
0284 };
0285 
0286 struct iavf_queue_stats {
0287     u64 packets;
0288     u64 bytes;
0289 };
0290 
0291 struct iavf_tx_queue_stats {
0292     u64 restart_queue;
0293     u64 tx_busy;
0294     u64 tx_done_old;
0295     u64 tx_linearize;
0296     u64 tx_force_wb;
0297     int prev_pkt_ctr;
0298     u64 tx_lost_interrupt;
0299 };
0300 
0301 struct iavf_rx_queue_stats {
0302     u64 non_eop_descs;
0303     u64 alloc_page_failed;
0304     u64 alloc_buff_failed;
0305     u64 page_reuse_count;
0306     u64 realloc_count;
0307 };
0308 
0309 enum iavf_ring_state_t {
0310     __IAVF_TX_FDIR_INIT_DONE,
0311     __IAVF_TX_XPS_INIT_DONE,
0312     __IAVF_RING_STATE_NBITS /* must be last */
0313 };
0314 
0315 /* some useful defines for virtchannel interface, which
0316  * is the only remaining user of header split
0317  */
0318 #define IAVF_RX_DTYPE_NO_SPLIT      0
0319 #define IAVF_RX_DTYPE_HEADER_SPLIT  1
0320 #define IAVF_RX_DTYPE_SPLIT_ALWAYS  2
0321 #define IAVF_RX_SPLIT_L2      0x1
0322 #define IAVF_RX_SPLIT_IP      0x2
0323 #define IAVF_RX_SPLIT_TCP_UDP 0x4
0324 #define IAVF_RX_SPLIT_SCTP    0x8
0325 
0326 /* struct that defines a descriptor ring, associated with a VSI */
0327 struct iavf_ring {
0328     struct iavf_ring *next;     /* pointer to next ring in q_vector */
0329     void *desc;         /* Descriptor ring memory */
0330     struct device *dev;     /* Used for DMA mapping */
0331     struct net_device *netdev;  /* netdev ring maps to */
0332     union {
0333         struct iavf_tx_buffer *tx_bi;
0334         struct iavf_rx_buffer *rx_bi;
0335     };
0336     DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS);
0337     u16 queue_index;        /* Queue number of ring */
0338     u8 dcb_tc;          /* Traffic class of ring */
0339     u8 __iomem *tail;
0340 
0341     /* high bit set means dynamic, use accessors routines to read/write.
0342      * hardware only supports 2us resolution for the ITR registers.
0343      * these values always store the USER setting, and must be converted
0344      * before programming to a register.
0345      */
0346     u16 itr_setting;
0347 
0348     u16 count;          /* Number of descriptors */
0349     u16 reg_idx;            /* HW register index of the ring */
0350     u16 rx_buf_len;
0351 
0352     /* used in interrupt processing */
0353     u16 next_to_use;
0354     u16 next_to_clean;
0355 
0356     u8 atr_sample_rate;
0357     u8 atr_count;
0358 
0359     bool ring_active;       /* is ring online or not */
0360     bool arm_wb;        /* do something to arm write back */
0361     u8 packet_stride;
0362 
0363     u16 flags;
0364 #define IAVF_TXR_FLAGS_WB_ON_ITR        BIT(0)
0365 #define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED    BIT(1)
0366 #define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
0367 #define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2  BIT(4)
0368 #define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2    BIT(5)
0369 
0370     /* stats structs */
0371     struct iavf_queue_stats stats;
0372     struct u64_stats_sync syncp;
0373     union {
0374         struct iavf_tx_queue_stats tx_stats;
0375         struct iavf_rx_queue_stats rx_stats;
0376     };
0377 
0378     unsigned int size;      /* length of descriptor ring in bytes */
0379     dma_addr_t dma;         /* physical address of ring */
0380 
0381     struct iavf_vsi *vsi;       /* Backreference to associated VSI */
0382     struct iavf_q_vector *q_vector; /* Backreference to associated vector */
0383 
0384     struct rcu_head rcu;        /* to avoid race on free */
0385     u16 next_to_alloc;
0386     struct sk_buff *skb;        /* When iavf_clean_rx_ring_irq() must
0387                      * return before it sees the EOP for
0388                      * the current packet, we save that skb
0389                      * here and resume receiving this
0390                      * packet the next time
0391                      * iavf_clean_rx_ring_irq() is called
0392                      * for this ring.
0393                      */
0394 } ____cacheline_internodealigned_in_smp;
0395 
0396 static inline bool ring_uses_build_skb(struct iavf_ring *ring)
0397 {
0398     return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED);
0399 }
0400 
0401 static inline void set_ring_build_skb_enabled(struct iavf_ring *ring)
0402 {
0403     ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
0404 }
0405 
0406 static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring)
0407 {
0408     ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
0409 }
0410 
0411 #define IAVF_ITR_ADAPTIVE_MIN_INC   0x0002
0412 #define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002
0413 #define IAVF_ITR_ADAPTIVE_MAX_USECS 0x007e
0414 #define IAVF_ITR_ADAPTIVE_LATENCY   0x8000
0415 #define IAVF_ITR_ADAPTIVE_BULK      0x0000
0416 #define ITR_IS_BULK(x) (!((x) & IAVF_ITR_ADAPTIVE_LATENCY))
0417 
0418 struct iavf_ring_container {
0419     struct iavf_ring *ring;     /* pointer to linked list of ring(s) */
0420     unsigned long next_update;  /* jiffies value of next update */
0421     unsigned int total_bytes;   /* total bytes processed this int */
0422     unsigned int total_packets; /* total packets processed this int */
0423     u16 count;
0424     u16 target_itr;         /* target ITR setting for ring(s) */
0425     u16 current_itr;        /* current ITR setting for ring(s) */
0426 };
0427 
0428 /* iterator for handling rings in ring container */
0429 #define iavf_for_each_ring(pos, head) \
0430     for (pos = (head).ring; pos != NULL; pos = pos->next)
0431 
0432 static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
0433 {
0434 #if (PAGE_SIZE < 8192)
0435     if (ring->rx_buf_len > (PAGE_SIZE / 2))
0436         return 1;
0437 #endif
0438     return 0;
0439 }
0440 
0441 #define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring))
0442 
0443 bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
0444 netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
0445 void iavf_clean_tx_ring(struct iavf_ring *tx_ring);
0446 void iavf_clean_rx_ring(struct iavf_ring *rx_ring);
0447 int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
0448 int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring);
0449 void iavf_free_tx_resources(struct iavf_ring *tx_ring);
0450 void iavf_free_rx_resources(struct iavf_ring *rx_ring);
0451 int iavf_napi_poll(struct napi_struct *napi, int budget);
0452 void iavf_force_wb(struct iavf_vsi *vsi, struct iavf_q_vector *q_vector);
0453 u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw);
0454 void iavf_detect_recover_hung(struct iavf_vsi *vsi);
0455 int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size);
0456 bool __iavf_chk_linearize(struct sk_buff *skb);
0457 
0458 /**
0459  * iavf_xmit_descriptor_count - calculate number of Tx descriptors needed
0460  * @skb:     send buffer
0461  *
0462  * Returns number of data descriptors needed for this skb. Returns 0 to indicate
0463  * there is not enough descriptors available in this ring since we need at least
0464  * one descriptor.
0465  **/
0466 static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
0467 {
0468     const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
0469     unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
0470     int count = 0, size = skb_headlen(skb);
0471 
0472     for (;;) {
0473         count += iavf_txd_use_count(size);
0474 
0475         if (!nr_frags--)
0476             break;
0477 
0478         size = skb_frag_size(frag++);
0479     }
0480 
0481     return count;
0482 }
0483 
0484 /**
0485  * iavf_maybe_stop_tx - 1st level check for Tx stop conditions
0486  * @tx_ring: the ring to be checked
0487  * @size:    the size buffer we want to assure is available
0488  *
0489  * Returns 0 if stop is not needed
0490  **/
0491 static inline int iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
0492 {
0493     if (likely(IAVF_DESC_UNUSED(tx_ring) >= size))
0494         return 0;
0495     return __iavf_maybe_stop_tx(tx_ring, size);
0496 }
0497 
0498 /**
0499  * iavf_chk_linearize - Check if there are more than 8 fragments per packet
0500  * @skb:      send buffer
0501  * @count:    number of buffers used
0502  *
0503  * Note: Our HW can't scatter-gather more than 8 fragments to build
0504  * a packet on the wire and so we need to figure out the cases where we
0505  * need to linearize the skb.
0506  **/
0507 static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
0508 {
0509     /* Both TSO and single send will work if count is less than 8 */
0510     if (likely(count < IAVF_MAX_BUFFER_TXD))
0511         return false;
0512 
0513     if (skb_is_gso(skb))
0514         return __iavf_chk_linearize(skb);
0515 
0516     /* we can support up to 8 data buffers for a single send */
0517     return count != IAVF_MAX_BUFFER_TXD;
0518 }
0519 /**
0520  * txring_txq - helper to convert from a ring to a queue
0521  * @ring: Tx ring to find the netdev equivalent of
0522  **/
0523 static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
0524 {
0525     return netdev_get_tx_queue(ring->netdev, ring->queue_index);
0526 }
0527 #endif /* _IAVF_TXRX_H_ */