0001
0002
0003
0004 #ifndef _I40E_TXRX_H_
0005 #define _I40E_TXRX_H_
0006
0007 #include <net/xdp.h>
0008
0009
0010 #define I40E_DEFAULT_IRQ_WORK 256
0011
0012
0013
0014
0015
0016
0017
0018 #define I40E_ITR_DYNAMIC 0x8000
0019 #define I40E_ITR_MASK 0x1FFE
0020 #define I40E_MIN_ITR 2
0021 #define I40E_ITR_20K 50
0022 #define I40E_ITR_8K 122
0023 #define I40E_MAX_ITR 8160
0024 #define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
0025 #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
0026 #define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
0027
0028 #define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
0029 #define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
0030
0031
0032
0033
0034 #define INTRL_ENA BIT(6)
0035 #define I40E_MAX_INTRL 0x3B
0036 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
0037
0038
0039
0040
0041
0042
0043
0044
0045 static inline u16 i40e_intrl_usec_to_reg(int intrl)
0046 {
0047 if (intrl >> 2)
0048 return ((intrl >> 2) | INTRL_ENA);
0049 else
0050 return 0;
0051 }
0052
0053 #define I40E_QUEUE_END_OF_LIST 0x7FF
0054
0055
0056
0057
0058
0059
0060 enum i40e_dyn_idx_t {
0061 I40E_IDX_ITR0 = 0,
0062 I40E_IDX_ITR1 = 1,
0063 I40E_IDX_ITR2 = 2,
0064 I40E_ITR_NONE = 3
0065 };
0066
0067
0068 #define I40E_RX_ITR I40E_IDX_ITR0
0069 #define I40E_TX_ITR I40E_IDX_ITR1
0070
0071
0072 #define I40E_DEFAULT_RSS_HENA ( \
0073 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
0074 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
0075 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
0076 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
0077 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
0078 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
0079 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
0080 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
0081 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
0082 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
0083 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
0084
0085 #define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
0086 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
0087 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
0088 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
0089 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
0090 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
0091 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
0092
0093 #define i40e_pf_get_default_rss_hena(pf) \
0094 (((pf)->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
0095 I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
0096
0097
0098 #define I40E_RXBUFFER_256 256
0099 #define I40E_RXBUFFER_1536 1536
0100 #define I40E_RXBUFFER_2048 2048
0101 #define I40E_RXBUFFER_3072 3072
0102 #define I40E_MAX_RXBUFFER 9728
0103
0104
0105
0106
0107
0108
0109
0110
0111 #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
0112 #define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
0113 #define i40e_rx_desc i40e_16byte_rx_desc
0114
0115 #define I40E_RX_DMA_ATTR \
0116 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
0117
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128 #if (PAGE_SIZE < 8192)
0129 #define I40E_2K_TOO_SMALL_WITH_PADDING \
0130 ((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048))
0131
0132 static inline int i40e_compute_pad(int rx_buf_len)
0133 {
0134 int page_size, pad_size;
0135
0136 page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
0137 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
0138
0139 return pad_size;
0140 }
0141
0142 static inline int i40e_skb_pad(void)
0143 {
0144 int rx_buf_len;
0145
0146
0147
0148
0149
0150
0151
0152
0153 if (I40E_2K_TOO_SMALL_WITH_PADDING)
0154 rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
0155 else
0156 rx_buf_len = I40E_RXBUFFER_1536;
0157
0158
0159 rx_buf_len -= NET_IP_ALIGN;
0160
0161 return i40e_compute_pad(rx_buf_len);
0162 }
0163
0164 #define I40E_SKB_PAD i40e_skb_pad()
0165 #else
0166 #define I40E_2K_TOO_SMALL_WITH_PADDING false
0167 #define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
0168 #endif
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180 static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
0181 const u64 stat_err_bits)
0182 {
0183 return !!(rx_desc->wb.qword1.status_error_len &
0184 cpu_to_le64(stat_err_bits));
0185 }
0186
0187
0188 #define I40E_RX_BUFFER_WRITE 32
0189
0190 #define I40E_RX_NEXT_DESC(r, i, n) \
0191 do { \
0192 (i)++; \
0193 if ((i) == (r)->count) \
0194 i = 0; \
0195 (n) = I40E_RX_DESC((r), (i)); \
0196 } while (0)
0197
0198
0199 #define I40E_MAX_BUFFER_TXD 8
0200 #define I40E_MIN_TX_LEN 17
0201
0202
0203
0204
0205
0206 #define I40E_MAX_READ_REQ_SIZE 4096
0207 #define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
0208 #define I40E_MAX_DATA_PER_TXD_ALIGNED \
0209 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
0210
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239 static inline unsigned int i40e_txd_use_count(unsigned int size)
0240 {
0241 return ((size * 85) >> 20) + 1;
0242 }
0243
0244
0245 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
0246
0247 #define I40E_TX_FLAGS_HW_VLAN BIT(1)
0248 #define I40E_TX_FLAGS_SW_VLAN BIT(2)
0249 #define I40E_TX_FLAGS_TSO BIT(3)
0250 #define I40E_TX_FLAGS_IPV4 BIT(4)
0251 #define I40E_TX_FLAGS_IPV6 BIT(5)
0252 #define I40E_TX_FLAGS_TSYN BIT(8)
0253 #define I40E_TX_FLAGS_FD_SB BIT(9)
0254 #define I40E_TX_FLAGS_UDP_TUNNEL BIT(10)
0255 #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
0256 #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
0257 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
0258 #define I40E_TX_FLAGS_VLAN_SHIFT 16
0259
0260 struct i40e_tx_buffer {
0261 struct i40e_tx_desc *next_to_watch;
0262 union {
0263 struct xdp_frame *xdpf;
0264 struct sk_buff *skb;
0265 void *raw_buf;
0266 };
0267 unsigned int bytecount;
0268 unsigned short gso_segs;
0269
0270 DEFINE_DMA_UNMAP_ADDR(dma);
0271 DEFINE_DMA_UNMAP_LEN(len);
0272 u32 tx_flags;
0273 };
0274
0275 struct i40e_rx_buffer {
0276 dma_addr_t dma;
0277 struct page *page;
0278 __u32 page_offset;
0279 __u16 pagecnt_bias;
0280 };
0281
0282 struct i40e_queue_stats {
0283 u64 packets;
0284 u64 bytes;
0285 };
0286
0287 struct i40e_tx_queue_stats {
0288 u64 restart_queue;
0289 u64 tx_busy;
0290 u64 tx_done_old;
0291 u64 tx_linearize;
0292 u64 tx_force_wb;
0293 u64 tx_stopped;
0294 int prev_pkt_ctr;
0295 };
0296
0297 struct i40e_rx_queue_stats {
0298 u64 non_eop_descs;
0299 u64 alloc_page_failed;
0300 u64 alloc_buff_failed;
0301 u64 page_reuse_count;
0302 u64 page_alloc_count;
0303 u64 page_waive_count;
0304 u64 page_busy_count;
0305 };
0306
0307 enum i40e_ring_state_t {
0308 __I40E_TX_FDIR_INIT_DONE,
0309 __I40E_TX_XPS_INIT_DONE,
0310 __I40E_RING_STATE_NBITS
0311 };
0312
0313
0314
0315
0316 #define I40E_RX_DTYPE_HEADER_SPLIT 1
0317 #define I40E_RX_SPLIT_L2 0x1
0318 #define I40E_RX_SPLIT_IP 0x2
0319 #define I40E_RX_SPLIT_TCP_UDP 0x4
0320 #define I40E_RX_SPLIT_SCTP 0x8
0321
0322
0323 struct i40e_ring {
0324 struct i40e_ring *next;
0325 void *desc;
0326 struct device *dev;
0327 struct net_device *netdev;
0328 struct bpf_prog *xdp_prog;
0329 union {
0330 struct i40e_tx_buffer *tx_bi;
0331 struct i40e_rx_buffer *rx_bi;
0332 struct xdp_buff **rx_bi_zc;
0333 };
0334 DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
0335 u16 queue_index;
0336 u8 dcb_tc;
0337 u8 __iomem *tail;
0338
0339
0340
0341
0342
0343
0344 u16 itr_setting;
0345
0346 u16 count;
0347 u16 reg_idx;
0348 u16 rx_buf_len;
0349
0350
0351 u16 next_to_use;
0352 u16 next_to_clean;
0353 u16 xdp_tx_active;
0354
0355 u8 atr_sample_rate;
0356 u8 atr_count;
0357
0358 bool ring_active;
0359 bool arm_wb;
0360 u8 packet_stride;
0361
0362 u16 flags;
0363 #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
0364 #define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
0365 #define I40E_TXR_FLAGS_XDP BIT(2)
0366
0367
0368 struct i40e_queue_stats stats;
0369 struct u64_stats_sync syncp;
0370 union {
0371 struct i40e_tx_queue_stats tx_stats;
0372 struct i40e_rx_queue_stats rx_stats;
0373 };
0374
0375 unsigned int size;
0376 dma_addr_t dma;
0377
0378 struct i40e_vsi *vsi;
0379 struct i40e_q_vector *q_vector;
0380
0381 struct rcu_head rcu;
0382 u16 next_to_alloc;
0383 struct sk_buff *skb;
0384
0385
0386
0387
0388
0389
0390
0391
0392 struct i40e_channel *ch;
0393 u16 rx_offset;
0394 struct xdp_rxq_info xdp_rxq;
0395 struct xsk_buff_pool *xsk_pool;
0396 } ____cacheline_internodealigned_in_smp;
0397
0398 static inline bool ring_uses_build_skb(struct i40e_ring *ring)
0399 {
0400 return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED);
0401 }
0402
0403 static inline void set_ring_build_skb_enabled(struct i40e_ring *ring)
0404 {
0405 ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
0406 }
0407
0408 static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
0409 {
0410 ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
0411 }
0412
0413 static inline bool ring_is_xdp(struct i40e_ring *ring)
0414 {
0415 return !!(ring->flags & I40E_TXR_FLAGS_XDP);
0416 }
0417
0418 static inline void set_ring_xdp(struct i40e_ring *ring)
0419 {
0420 ring->flags |= I40E_TXR_FLAGS_XDP;
0421 }
0422
0423 #define I40E_ITR_ADAPTIVE_MIN_INC 0x0002
0424 #define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002
0425 #define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e
0426 #define I40E_ITR_ADAPTIVE_LATENCY 0x8000
0427 #define I40E_ITR_ADAPTIVE_BULK 0x0000
0428
0429 struct i40e_ring_container {
0430 struct i40e_ring *ring;
0431 unsigned long next_update;
0432 unsigned int total_bytes;
0433 unsigned int total_packets;
0434 u16 count;
0435 u16 target_itr;
0436 u16 current_itr;
0437 };
0438
0439
0440 #define i40e_for_each_ring(pos, head) \
0441 for (pos = (head).ring; pos != NULL; pos = pos->next)
0442
0443 static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
0444 {
0445 #if (PAGE_SIZE < 8192)
0446 if (ring->rx_buf_len > (PAGE_SIZE / 2))
0447 return 1;
0448 #endif
0449 return 0;
0450 }
0451
0452 #define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
0453
0454 bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
0455 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
0456 u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb,
0457 struct net_device *sb_dev);
0458 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
0459 void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
0460 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
0461 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
0462 void i40e_free_tx_resources(struct i40e_ring *tx_ring);
0463 void i40e_free_rx_resources(struct i40e_ring *rx_ring);
0464 int i40e_napi_poll(struct napi_struct *napi, int budget);
0465 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
0466 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
0467 void i40e_detect_recover_hung(struct i40e_vsi *vsi);
0468 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
0469 bool __i40e_chk_linearize(struct sk_buff *skb);
0470 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
0471 u32 flags);
0472 int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
0473
0474
0475
0476
0477
0478
0479
0480
0481 static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
0482 {
0483 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
0484
0485 return le32_to_cpu(*(volatile __le32 *)head);
0486 }
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
0497 {
0498 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
0499 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
0500 int count = 0, size = skb_headlen(skb);
0501
0502 for (;;) {
0503 count += i40e_txd_use_count(size);
0504
0505 if (!nr_frags--)
0506 break;
0507
0508 size = skb_frag_size(frag++);
0509 }
0510
0511 return count;
0512 }
0513
0514
0515
0516
0517
0518
0519
0520
0521 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
0522 {
0523 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
0524 return 0;
0525 return __i40e_maybe_stop_tx(tx_ring, size);
0526 }
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537 static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
0538 {
0539
0540 if (likely(count < I40E_MAX_BUFFER_TXD))
0541 return false;
0542
0543 if (skb_is_gso(skb))
0544 return __i40e_chk_linearize(skb);
0545
0546
0547 return count != I40E_MAX_BUFFER_TXD;
0548 }
0549
0550
0551
0552
0553
0554 static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
0555 {
0556 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
0557 }
0558 #endif