0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #ifndef EFX_RX_COMMON_H
0012 #define EFX_RX_COMMON_H
0013
0014
0015 #define EFX_RX_PREFERRED_BATCH 8U
0016
0017
0018 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
0019 EFX_RX_USR_BUF_SIZE)
0020
0021
0022
0023
0024
0025 #define EFX_RECYCLE_RING_SIZE_10G 256
0026
0027 static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
0028 {
0029 return page_address(buf->page) + buf->page_offset;
0030 }
0031
0032 static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
0033 {
0034 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
0035 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
0036 #else
0037 const u8 *data = eh + efx->rx_packet_hash_offset;
0038
0039 return (u32)data[0] |
0040 (u32)data[1] << 8 |
0041 (u32)data[2] << 16 |
0042 (u32)data[3] << 24;
0043 #endif
0044 }
0045
0046 void efx_siena_rx_slow_fill(struct timer_list *t);
0047
0048 void efx_siena_recycle_rx_pages(struct efx_channel *channel,
0049 struct efx_rx_buffer *rx_buf,
0050 unsigned int n_frags);
0051 void efx_siena_discard_rx_packet(struct efx_channel *channel,
0052 struct efx_rx_buffer *rx_buf,
0053 unsigned int n_frags);
0054
0055 int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue);
0056 void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue);
0057 void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue);
0058 void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue);
0059
0060 static inline void efx_sync_rx_buffer(struct efx_nic *efx,
0061 struct efx_rx_buffer *rx_buf,
0062 unsigned int len)
0063 {
0064 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
0065 DMA_FROM_DEVICE);
0066 }
0067
0068 void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
0069 struct efx_rx_buffer *rx_buf,
0070 unsigned int num_bufs);
0071
0072 void efx_siena_rx_config_page_split(struct efx_nic *efx);
0073 void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
0074 bool atomic);
0075
0076 void
0077 efx_siena_rx_packet_gro(struct efx_channel *channel,
0078 struct efx_rx_buffer *rx_buf,
0079 unsigned int n_frags, u8 *eh, __wsum csum);
0080
0081 struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx);
0082 struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
0083 u32 id);
0084 void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx);
0085 void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
0086 struct efx_rss_context *ctx);
0087
0088 bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec);
0089 bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left,
0090 const struct efx_filter_spec *right);
0091 u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec);
0092
0093 #ifdef CONFIG_RFS_ACCEL
0094 bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule,
0095 unsigned int filter_idx, bool *force);
0096 struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx,
0097 const struct efx_filter_spec *spec);
0098 void efx_siena_rps_hash_del(struct efx_nic *efx,
0099 const struct efx_filter_spec *spec);
0100
0101 int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
0102 u16 rxq_index, u32 flow_id);
0103 bool __efx_siena_filter_rfs_expire(struct efx_channel *channel,
0104 unsigned int quota);
0105 #endif
0106
0107 int efx_siena_probe_filters(struct efx_nic *efx);
0108 void efx_siena_remove_filters(struct efx_nic *efx);
0109
0110 #endif