Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* Copyright(c) 2013 - 2018 Intel Corporation. */
0003 
0004 #ifndef _I40E_TXRX_H_
0005 #define _I40E_TXRX_H_
0006 
0007 #include <net/xdp.h>
0008 
0009 /* Interrupt Throttling and Rate Limiting Goodies */
0010 #define I40E_DEFAULT_IRQ_WORK      256
0011 
0012 /* The datasheet for the X710 and XL710 indicate that the maximum value for
0013  * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
0014  * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
0015  * the register value which is divided by 2 lets use the actual values and
0016  * avoid an excessive amount of translation.
0017  */
0018 #define I40E_ITR_DYNAMIC    0x8000  /* use top bit as a flag */
0019 #define I40E_ITR_MASK       0x1FFE  /* mask for ITR register value */
0020 #define I40E_MIN_ITR             2  /* reg uses 2 usec resolution */
0021 #define I40E_ITR_20K            50
0022 #define I40E_ITR_8K        122
0023 #define I40E_MAX_ITR          8160  /* maximum value as per datasheet */
0024 #define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
0025 #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
0026 #define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
0027 
0028 #define I40E_ITR_RX_DEF     (I40E_ITR_20K | I40E_ITR_DYNAMIC)
0029 #define I40E_ITR_TX_DEF     (I40E_ITR_20K | I40E_ITR_DYNAMIC)
0030 
0031 /* 0x40 is the enable bit for interrupt rate limiting, and must be set if
0032  * the value of the rate limit is non-zero
0033  */
0034 #define INTRL_ENA                  BIT(6)
0035 #define I40E_MAX_INTRL             0x3B    /* reg uses 4 usec resolution */
0036 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
0037 
0038 /**
0039  * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
0040  * @intrl: interrupt rate limit to convert
0041  *
0042  * This function converts a decimal interrupt rate limit to the appropriate
0043  * register format expected by the firmware when setting interrupt rate limit.
0044  */
0045 static inline u16 i40e_intrl_usec_to_reg(int intrl)
0046 {
0047     if (intrl >> 2)
0048         return ((intrl >> 2) | INTRL_ENA);
0049     else
0050         return 0;
0051 }
0052 
0053 #define I40E_QUEUE_END_OF_LIST 0x7FF
0054 
0055 /* this enum matches hardware bits and is meant to be used by DYN_CTLN
0056  * registers and QINT registers or more generally anywhere in the manual
0057  * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
0058  * register but instead is a special value meaning "don't update" ITR0/1/2.
0059  */
0060 enum i40e_dyn_idx_t {
0061     I40E_IDX_ITR0 = 0,
0062     I40E_IDX_ITR1 = 1,
0063     I40E_IDX_ITR2 = 2,
0064     I40E_ITR_NONE = 3   /* ITR_NONE must not be used as an index */
0065 };
0066 
0067 /* these are indexes into ITRN registers */
0068 #define I40E_RX_ITR    I40E_IDX_ITR0
0069 #define I40E_TX_ITR    I40E_IDX_ITR1
0070 
0071 /* Supported RSS offloads */
0072 #define I40E_DEFAULT_RSS_HENA ( \
0073     BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
0074     BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
0075     BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
0076     BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
0077     BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
0078     BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
0079     BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
0080     BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
0081     BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
0082     BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
0083     BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
0084 
0085 #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
0086     BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
0087     BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
0088     BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
0089     BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
0090     BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
0091     BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
0092 
0093 #define i40e_pf_get_default_rss_hena(pf) \
0094     (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
0095       I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
0096 
0097 /* Supported Rx Buffer Sizes (a multiple of 128) */
0098 #define I40E_RXBUFFER_256   256
0099 #define I40E_RXBUFFER_1536  1536  /* 128B aligned standard Ethernet frame */
0100 #define I40E_RXBUFFER_2048  2048
0101 #define I40E_RXBUFFER_3072  3072  /* Used for large frames w/ padding */
0102 #define I40E_MAX_RXBUFFER   9728  /* largest size for single descriptor */
0103 
0104 /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
0105  * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
0106  * this adds up to 512 bytes of extra data meaning the smallest allocation
0107  * we could have is 1K.
0108  * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
0109  * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
0110  */
0111 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
0112 #define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
0113 #define i40e_rx_desc i40e_16byte_rx_desc
0114 
0115 #define I40E_RX_DMA_ATTR \
0116     (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
0117 
0118 /* Attempt to maximize the headroom available for incoming frames.  We
0119  * use a 2K buffer for receives and need 1536/1534 to store the data for
0120  * the frame.  This leaves us with 512 bytes of room.  From that we need
0121  * to deduct the space needed for the shared info and the padding needed
0122  * to IP align the frame.
0123  *
0124  * Note: For cache line sizes 256 or larger this value is going to end
0125  *   up negative.  In these cases we should fall back to the legacy
0126  *   receive path.
0127  */
0128 #if (PAGE_SIZE < 8192)
0129 #define I40E_2K_TOO_SMALL_WITH_PADDING \
0130 ((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
0131 
0132 static inline int i40e_compute_pad(int rx_buf_len)
0133 {
0134     int page_size, pad_size;
0135 
0136     page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
0137     pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
0138 
0139     return pad_size;
0140 }
0141 
0142 static inline int i40e_skb_pad(void)
0143 {
0144     int rx_buf_len;
0145 
0146     /* If a 2K buffer cannot handle a standard Ethernet frame then
0147      * optimize padding for a 3K buffer instead of a 1.5K buffer.
0148      *
0149      * For a 3K buffer we need to add enough padding to allow for
0150      * tailroom due to NET_IP_ALIGN possibly shifting us out of
0151      * cache-line alignment.
0152      */
0153     if (I40E_2K_TOO_SMALL_WITH_PADDING)
0154         rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
0155     else
0156         rx_buf_len = I40E_RXBUFFER_1536;
0157 
0158     /* if needed make room for NET_IP_ALIGN */
0159     rx_buf_len -= NET_IP_ALIGN;
0160 
0161     return i40e_compute_pad(rx_buf_len);
0162 }
0163 
0164 #define I40E_SKB_PAD i40e_skb_pad()
0165 #else
0166 #define I40E_2K_TOO_SMALL_WITH_PADDING false
0167 #define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
0168 #endif
0169 
0170 /**
0171  * i40e_test_staterr - tests bits in Rx descriptor status and error fields
0172  * @rx_desc: pointer to receive descriptor (in le64 format)
0173  * @stat_err_bits: value to mask
0174  *
0175  * This function does some fast chicanery in order to return the
0176  * value of the mask which is really only used for boolean tests.
0177  * The status_error_len doesn't need to be shifted because it begins
0178  * at offset zero.
0179  */
0180 static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
0181                      const u64 stat_err_bits)
0182 {
0183     return !!(rx_desc->wb.qword1.status_error_len &
0184           cpu_to_le64(stat_err_bits));
0185 }
0186 
0187 /* How many Rx Buffers do we bundle into one write to the hardware ? */
0188 #define I40E_RX_BUFFER_WRITE    32  /* Must be power of 2 */
0189 
0190 #define I40E_RX_NEXT_DESC(r, i, n)      \
0191     do {                    \
0192         (i)++;              \
0193         if ((i) == (r)->count)      \
0194             i = 0;          \
0195         (n) = I40E_RX_DESC((r), (i));   \
0196     } while (0)
0197 
0198 
0199 #define I40E_MAX_BUFFER_TXD 8
0200 #define I40E_MIN_TX_LEN     17
0201 
0202 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
0203  * In order to align with the read requests we will align the value to
0204  * the nearest 4K which represents our maximum read request size.
0205  */
0206 #define I40E_MAX_READ_REQ_SIZE      4096
0207 #define I40E_MAX_DATA_PER_TXD       (16 * 1024 - 1)
0208 #define I40E_MAX_DATA_PER_TXD_ALIGNED \
0209     (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
0210 
0211 /**
0212  * i40e_txd_use_count  - estimate the number of descriptors needed for Tx
0213  * @size: transmit request size in bytes
0214  *
0215  * Due to hardware alignment restrictions (4K alignment), we need to
0216  * assume that we can have no more than 12K of data per descriptor, even
0217  * though each descriptor can take up to 16K - 1 bytes of aligned memory.
0218  * Thus, we need to divide by 12K. But division is slow! Instead,
0219  * we decompose the operation into shifts and one relatively cheap
0220  * multiply operation.
0221  *
0222  * To divide by 12K, we first divide by 4K, then divide by 3:
0223  *     To divide by 4K, shift right by 12 bits
0224  *     To divide by 3, multiply by 85, then divide by 256
0225  *     (Divide by 256 is done by shifting right by 8 bits)
0226  * Finally, we add one to round up. Because 256 isn't an exact multiple of
0227  * 3, we'll underestimate near each multiple of 12K. This is actually more
0228  * accurate as we have 4K - 1 of wiggle room that we can fit into the last
0229  * segment.  For our purposes this is accurate out to 1M which is orders of
0230  * magnitude greater than our largest possible GSO size.
0231  *
0232  * This would then be implemented as:
0233  *     return (((size >> 12) * 85) >> 8) + 1;
0234  *
0235  * Since multiplication and division are commutative, we can reorder
0236  * operations into:
0237  *     return ((size * 85) >> 20) + 1;
0238  */
0239 static inline unsigned int i40e_txd_use_count(unsigned int size)
0240 {
0241     return ((size * 85) >> 20) + 1;
0242 }
0243 
0244 /* Tx Descriptors needed, worst case */
0245 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
0246 
0247 #define I40E_TX_FLAGS_HW_VLAN       BIT(1)
0248 #define I40E_TX_FLAGS_SW_VLAN       BIT(2)
0249 #define I40E_TX_FLAGS_TSO       BIT(3)
0250 #define I40E_TX_FLAGS_IPV4      BIT(4)
0251 #define I40E_TX_FLAGS_IPV6      BIT(5)
0252 #define I40E_TX_FLAGS_TSYN      BIT(8)
0253 #define I40E_TX_FLAGS_FD_SB     BIT(9)
0254 #define I40E_TX_FLAGS_UDP_TUNNEL    BIT(10)
0255 #define I40E_TX_FLAGS_VLAN_MASK     0xffff0000
0256 #define I40E_TX_FLAGS_VLAN_PRIO_MASK    0xe0000000
0257 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT   29
0258 #define I40E_TX_FLAGS_VLAN_SHIFT    16
0259 
0260 struct i40e_tx_buffer {
0261     struct i40e_tx_desc *next_to_watch;
0262     union {
0263         struct xdp_frame *xdpf;
0264         struct sk_buff *skb;
0265         void *raw_buf;
0266     };
0267     unsigned int bytecount;
0268     unsigned short gso_segs;
0269 
0270     DEFINE_DMA_UNMAP_ADDR(dma);
0271     DEFINE_DMA_UNMAP_LEN(len);
0272     u32 tx_flags;
0273 };
0274 
0275 struct i40e_rx_buffer {
0276     dma_addr_t dma;
0277     struct page *page;
0278     __u32 page_offset;
0279     __u16 pagecnt_bias;
0280 };
0281 
0282 struct i40e_queue_stats {
0283     u64 packets;
0284     u64 bytes;
0285 };
0286 
0287 struct i40e_tx_queue_stats {
0288     u64 restart_queue;
0289     u64 tx_busy;
0290     u64 tx_done_old;
0291     u64 tx_linearize;
0292     u64 tx_force_wb;
0293     u64 tx_stopped;
0294     int prev_pkt_ctr;
0295 };
0296 
0297 struct i40e_rx_queue_stats {
0298     u64 non_eop_descs;
0299     u64 alloc_page_failed;
0300     u64 alloc_buff_failed;
0301     u64 page_reuse_count;
0302     u64 page_alloc_count;
0303     u64 page_waive_count;
0304     u64 page_busy_count;
0305 };
0306 
0307 enum i40e_ring_state_t {
0308     __I40E_TX_FDIR_INIT_DONE,
0309     __I40E_TX_XPS_INIT_DONE,
0310     __I40E_RING_STATE_NBITS /* must be last */
0311 };
0312 
0313 /* some useful defines for virtchannel interface, which
0314  * is the only remaining user of header split
0315  */
0316 #define I40E_RX_DTYPE_HEADER_SPLIT  1
0317 #define I40E_RX_SPLIT_L2      0x1
0318 #define I40E_RX_SPLIT_IP      0x2
0319 #define I40E_RX_SPLIT_TCP_UDP 0x4
0320 #define I40E_RX_SPLIT_SCTP    0x8
0321 
0322 /* struct that defines a descriptor ring, associated with a VSI */
0323 struct i40e_ring {
0324     struct i40e_ring *next;     /* pointer to next ring in q_vector */
0325     void *desc;         /* Descriptor ring memory */
0326     struct device *dev;     /* Used for DMA mapping */
0327     struct net_device *netdev;  /* netdev ring maps to */
0328     struct bpf_prog *xdp_prog;
0329     union {
0330         struct i40e_tx_buffer *tx_bi;
0331         struct i40e_rx_buffer *rx_bi;
0332         struct xdp_buff **rx_bi_zc;
0333     };
0334     DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
0335     u16 queue_index;        /* Queue number of ring */
0336     u8 dcb_tc;          /* Traffic class of ring */
0337     u8 __iomem *tail;
0338 
0339     /* high bit set means dynamic, use accessor routines to read/write.
0340      * hardware only supports 2us resolution for the ITR registers.
0341      * these values always store the USER setting, and must be converted
0342      * before programming to a register.
0343      */
0344     u16 itr_setting;
0345 
0346     u16 count;          /* Number of descriptors */
0347     u16 reg_idx;            /* HW register index of the ring */
0348     u16 rx_buf_len;
0349 
0350     /* used in interrupt processing */
0351     u16 next_to_use;
0352     u16 next_to_clean;
0353     u16 xdp_tx_active;
0354 
0355     u8 atr_sample_rate;
0356     u8 atr_count;
0357 
0358     bool ring_active;       /* is ring online or not */
0359     bool arm_wb;        /* do something to arm write back */
0360     u8 packet_stride;
0361 
0362     u16 flags;
0363 #define I40E_TXR_FLAGS_WB_ON_ITR        BIT(0)
0364 #define I40E_RXR_FLAGS_BUILD_SKB_ENABLED    BIT(1)
0365 #define I40E_TXR_FLAGS_XDP          BIT(2)
0366 
0367     /* stats structs */
0368     struct i40e_queue_stats stats;
0369     struct u64_stats_sync syncp;
0370     union {
0371         struct i40e_tx_queue_stats tx_stats;
0372         struct i40e_rx_queue_stats rx_stats;
0373     };
0374 
0375     unsigned int size;      /* length of descriptor ring in bytes */
0376     dma_addr_t dma;         /* physical address of ring */
0377 
0378     struct i40e_vsi *vsi;       /* Backreference to associated VSI */
0379     struct i40e_q_vector *q_vector; /* Backreference to associated vector */
0380 
0381     struct rcu_head rcu;        /* to avoid race on free */
0382     u16 next_to_alloc;
0383     struct sk_buff *skb;        /* When i40e_clean_rx_ring_irq() must
0384                      * return before it sees the EOP for
0385                      * the current packet, we save that skb
0386                      * here and resume receiving this
0387                      * packet the next time
0388                      * i40e_clean_rx_ring_irq() is called
0389                      * for this ring.
0390                      */
0391 
0392     struct i40e_channel *ch;
0393     u16 rx_offset;
0394     struct xdp_rxq_info xdp_rxq;
0395     struct xsk_buff_pool *xsk_pool;
0396 } ____cacheline_internodealigned_in_smp;
0397 
0398 static inline bool ring_uses_build_skb(struct i40e_ring *ring)
0399 {
0400     return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
0401 }
0402 
0403 static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
0404 {
0405     ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
0406 }
0407 
0408 static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
0409 {
0410     ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
0411 }
0412 
0413 static inline bool ring_is_xdp(struct i40e_ring *ring)
0414 {
0415     return !!(ring->flags & I40E_TXR_FLAGS_XDP);
0416 }
0417 
0418 static inline void set_ring_xdp(struct i40e_ring *ring)
0419 {
0420     ring->flags |= I40E_TXR_FLAGS_XDP;
0421 }
0422 
0423 #define I40E_ITR_ADAPTIVE_MIN_INC   0x0002
0424 #define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002
0425 #define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e
0426 #define I40E_ITR_ADAPTIVE_LATENCY   0x8000
0427 #define I40E_ITR_ADAPTIVE_BULK      0x0000
0428 
0429 struct i40e_ring_container {
0430     struct i40e_ring *ring;     /* pointer to linked list of ring(s) */
0431     unsigned long next_update;  /* jiffies value of next update */
0432     unsigned int total_bytes;   /* total bytes processed this int */
0433     unsigned int total_packets; /* total packets processed this int */
0434     u16 count;
0435     u16 target_itr;         /* target ITR setting for ring(s) */
0436     u16 current_itr;        /* current ITR setting for ring(s) */
0437 };
0438 
0439 /* iterator for handling rings in ring container */
0440 #define i40e_for_each_ring(pos, head) \
0441     for (pos = (head).ring; pos != NULL; pos = pos->next)
0442 
0443 static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
0444 {
0445 #if (PAGE_SIZE < 8192)
0446     if (ring->rx_buf_len > (PAGE_SIZE / 2))
0447         return 1;
0448 #endif
0449     return 0;
0450 }
0451 
0452 #define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
0453 
0454 bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
0455 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
0456 u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
0457               struct net_device *sb_dev);
0458 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
0459 void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
0460 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
0461 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
0462 void i40e_free_tx_resources(struct i40e_ring *tx_ring);
0463 void i40e_free_rx_resources(struct i40e_ring *rx_ring);
0464 int i40e_napi_poll(struct napi_struct *napi, int budget);
0465 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
0466 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
0467 void i40e_detect_recover_hung(struct i40e_vsi *vsi);
0468 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
0469 bool __i40e_chk_linearize(struct sk_buff *skb);
0470 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
0471           u32 flags);
0472 int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
0473 
0474 /**
0475  * i40e_get_head - Retrieve head from head writeback
0476  * @tx_ring:  tx ring to fetch head of
0477  *
0478  * Returns value of Tx ring head based on value stored
0479  * in head write-back location
0480  **/
0481 static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
0482 {
0483     void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
0484 
0485     return le32_to_cpu(*(volatile __le32 *)head);
0486 }
0487 
0488 /**
0489  * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
0490  * @skb:     send buffer
0491  *
0492  * Returns number of data descriptors needed for this skb. Returns 0 to indicate
0493  * there is not enough descriptors available in this ring since we need at least
0494  * one descriptor.
0495  **/
0496 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
0497 {
0498     const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
0499     unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
0500     int count = 0, size = skb_headlen(skb);
0501 
0502     for (;;) {
0503         count += i40e_txd_use_count(size);
0504 
0505         if (!nr_frags--)
0506             break;
0507 
0508         size = skb_frag_size(frag++);
0509     }
0510 
0511     return count;
0512 }
0513 
0514 /**
0515  * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
0516  * @tx_ring: the ring to be checked
0517  * @size:    the size buffer we want to assure is available
0518  *
0519  * Returns 0 if stop is not needed
0520  **/
0521 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
0522 {
0523     if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
0524         return 0;
0525     return __i40e_maybe_stop_tx(tx_ring, size);
0526 }
0527 
0528 /**
0529  * i40e_chk_linearize - Check if there are more than 8 fragments per packet
0530  * @skb:      send buffer
0531  * @count:    number of buffers used
0532  *
0533  * Note: Our HW can't scatter-gather more than 8 fragments to build
0534  * a packet on the wire and so we need to figure out the cases where we
0535  * need to linearize the skb.
0536  **/
0537 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
0538 {
0539     /* Both TSO and single send will work if count is less than 8 */
0540     if (likely(count < I40E_MAX_BUFFER_TXD))
0541         return false;
0542 
0543     if (skb_is_gso(skb))
0544         return __i40e_chk_linearize(skb);
0545 
0546     /* we can support up to 8 data buffers for a single send */
0547     return count != I40E_MAX_BUFFER_TXD;
0548 }
0549 
0550 /**
0551  * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
0552  * @ring: Tx ring to find the netdev equivalent of
0553  **/
0554 static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
0555 {
0556     return netdev_get_tx_queue(ring->netdev, ring->queue_index);
0557 }
0558 #endif /* _I40E_TXRX_H_ */